metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "Jososke/Driving_Clone",
"score": 3
} |
#### File: Jososke/Driving_Clone/model.py
```python
import os
import csv
import warnings
warnings.filterwarnings("ignore")
import cv2
import numpy as np
import matplotlib.pyplot as plt
from keras.layers import Lambda, Cropping2D
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.convolutional import Conv2D
import sklearn
from sklearn.utils import shuffle
samples1 = []
samples2 = []
samples = []
# Loading in the data from the csv file
with open('data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
samples1.append(line)
with open('data/data2/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
samples2.append(line)
samples = samples1[1:] + samples2
# create adjusted steering measurements for the side camera images
correction = 0.2 # this is a parameter to tune
from sklearn.model_selection import train_test_split
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
def generator(samples, batch_size=32):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
measurements = []
for line in batch_samples:
for i in range(3):
source_path = line[i]
filename = source_path.split('/')[-1]
imageBGR = cv2.imread('data/IMG/' + filename)
if imageBGR is None:
imageBGR = cv2.imread('data/data2/IMG/' + filename)
# Images in drive.py are read in as RGB
image = cv2.cvtColor(imageBGR, cv2.COLOR_BGR2RGB)
images.append(image)
if i == 0: # Center
measurement = float(line[3])
elif i == 1: # Left
measurement = float(line[3]) + correction
elif i == 2: # Right
measurement = float(line[3]) - correction
measurements.append(measurement)
# Augmenting data to avoid left bias on the track
augmented_images, augmented_measurements = [], []
for image, measurement in zip(images, measurements):
augmented_images.append(image)
augmented_measurements.append(measurement)
augmented_images.append(cv2.flip(image,1))
augmented_measurements.append(measurement*-1.0)
# trim image to only see section with road
X_train = np.array(augmented_images)
y_train = np.array(augmented_measurements)
yield sklearn.utils.shuffle(X_train, y_train)
# Set our batch size
batch_size=32
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=batch_size)
validation_generator = generator(validation_samples, batch_size=batch_size)
model = Sequential()
# set up lambda layer for normalization
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160,320,3)))
# cropping 70 pixels from top of image (trees) and 25 pixels from bottom of image (hood of car)
model.add(Cropping2D(cropping=((70,25),(0,0))))
# NVIDIA architecture and including a dropout layer for redundancy
model.add(Conv2D(24,5,5, subsample = (2,2), activation = "relu"))
model.add(Conv2D(36,5,5, subsample = (2,2), activation = "relu"))
model.add(Conv2D(48,5,5, subsample = (2,2), activation = "relu"))
model.add(Conv2D(64,3,3, activation = "relu"))
model.add(Conv2D(64,3,3, activation = "relu"))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
history_object = model.fit_generator(train_generator, steps_per_epoch=np.ceil(len(train_samples)/batch_size),
validation_data=validation_generator,
validation_steps=np.ceil(len(validation_samples)/batch_size),
epochs=5, verbose=1)
### print the keys contained in the history object
print(history_object.history.keys())
### plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()
# Save the model
model.save('model.h5')
``` |
{
"source": "jospdeleon/lambda-kinesis",
"score": 2
} |
#### File: lambda-kinesis/myWebServerFunction/app.py
```python
import json
import boto3
import newrelic
import os
GET_RESPONSE = """
<html>
<head>
<title>NRDT Demo App</title>
</head>
<body>
<form id="post_me" name="post_me" method="POST" action="">
<label for="message">Message</label>
<input id="message" name="message" type="text" value="Hello world" />
<select id="stream" name="stream">
<option value="go">Go</option>
<option value="node">Node</option>
</select>
<button type="submit" name="submit">Submit</button>
</form>
<div id="output" style="white-space: pre-wrap; font-family: monospace;">
</div>
<script>
const formElem = document.getElementById("post_me");
const messageElem = document.getElementById("message");
const streamElem = document.getElementById("stream");
formElem.addEventListener("submit", (ev) => {
let data = {
message: messageElem.value,
stream: streamElem.value
}
fetch(location.href, {
"method": "POST",
headers: {
'Content-Type': 'application/json'
},
"body": JSON.stringify(data)
})
.then(resp => resp.text())
.then(body => {
document.getElementById("output").innerText = body;
});
ev.preventDefault();
});
</script>
</body>
</html>
"""
def nr_trace_context_json():
"""Generate a distributed trace context as a JSON document"""
# The Python agent expects a list as an out-param
dt_headers = []
newrelic.agent.insert_distributed_trace_headers(headers=dt_headers)
# At this point, dt_headers is a list of tuples. We first convert it to a dict, then serialize as a JSON object.
# The resulting string can be used as a kinesis record attribute string value.
return json.dumps(dict(dt_headers))
def send_kinesis_message(message, stream):
nrcontext = newrelic.agent.get_linking_metadata()
# Get the Kinesis client
kinesis = boto3.client("kinesis")
# a Python object (dict):
nrData = {
"message": message,
"nrDt": nr_trace_context_json()
}
# Logs in context example using the agent API
log_message = {"message": 'RECORD: ' + nrData['message']}
log_message.update(nrcontext)
print(json.dumps(log_message))
streamArn = ''
if stream == 'go':
streamArn = os.environ.get('GO_STREAM')
elif stream == 'node':
streamArn = os.environ.get('NODE_STREAM')
return kinesis.put_record(
StreamName = streamArn,
Data=json.dumps(nrData),
PartitionKey='1'
)
def lambda_handler(event, context):
nrcontext = newrelic.agent.get_linking_metadata()
if event['httpMethod'] == 'GET':
print('inside GET')
# For our example, we return a static HTML page in response to GET requests
return {
"statusCode": 200,
"headers": {
"Content-Type": "text/html"
},
"isBase64Encoded": False,
"body": GET_RESPONSE
}
elif event['httpMethod'] == 'POST':
# Logs in context example using the agent API
log_message = {"message": "inside POST"}
log_message.update(nrcontext)
print(json.dumps(log_message))
data = json.loads(event['body'])
message = data['message']
stream = data['stream']
newrelic.agent.add_custom_parameter('myMessage', message)
# Handle POST requests by sending the message into a kinesis stream
send_status = send_kinesis_message(message, stream)
# Returns the raw batch status. A real application would want to process the API response.
return {
"statusCode": 200,
"headers": {
"Content-Type": "application/json"
},
"isBase64Encoded": False,
"body": json.dumps(send_status),
}
``` |
{
"source": "JospehCeh/Delight",
"score": 2
} |
#### File: Delight/scripts/mcmcTemplatePriors.py
```python
import sys
from mpi4py import MPI
import numpy as np
from scipy.interpolate import interp1d
sys.path.append('/Users/bl/Dropbox/repos/Delight/')
from delight.io import *
from delight.utils import *
from delight.photoz_gp import PhotozGP
from delight.photoz_kernels import Photoz_mean_function, Photoz_kernel
import scipy.stats
import matplotlib.pyplot as plt
import emcee
import corner
comm = MPI.COMM_WORLD
threadNum = comm.Get_rank()
numThreads = comm.Get_size()
# Parse parameters file
if len(sys.argv) < 2:
raise Exception('Please provide a parameter file')
paramFileName = sys.argv[1]
params = parseParamFile(paramFileName, verbose=False)
DL = approx_DL()
redshiftDistGrid, redshiftGrid, redshiftGridGP = createGrids(params)
numZ = redshiftGrid.size
# Locate which columns of the catalog correspond to which bands.
bandIndices, bandNames, bandColumns, bandVarColumns, redshiftColumn,\
refBandColumn = readColumnPositions(params, prefix="training_")
dir_seds = params['templates_directory']
dir_filters = params['bands_directory']
lambdaRef = params['lambdaRef']
sed_names = params['templates_names']
numBands = bandIndices.size
nt = len(sed_names)
f_mod = np.zeros((numZ, nt, len(params['bandNames'])))
for t, sed_name in enumerate(sed_names):
f_mod[:, t, :] = np.loadtxt(dir_seds + '/' + sed_name +
'_fluxredshiftmod.txt')
numObjectsTraining = np.sum(1 for line in open(params['training_catFile']))
print('Number of Training Objects', numObjectsTraining)
numMetrics = 7 + len(params['confidenceLevels'])
allFluxes = np.zeros((numObjectsTraining, numBands))
allFluxesVar = np.zeros((numObjectsTraining, numBands))
redshifts = np.zeros((numObjectsTraining, 1))
fmod_atZ = np.zeros((numObjectsTraining, nt, numBands))
# Now loop over training set to compute likelihood function
loc = - 1
trainingDataIter = getDataFromFile(params, 0, numObjectsTraining,
prefix="training_", getXY=False)
for z, ell, bands, fluxes, fluxesVar, bCV, fCV, fvCV in trainingDataIter:
loc += 1
allFluxes[loc, :] = fluxes
allFluxesVar[loc, :] = fluxesVar
redshifts[loc, 0] = z
for t, sed_name in enumerate(sed_names):
for ib, b in enumerate(bands):
fmod_atZ[loc, t, ib] = ell * np.interp(z, redshiftGrid,
f_mod[:, t, b])
def lnprob(params, nt, allFluxes, allFluxesVar, fmod_atZ, pmin, pmax):
if np.any(params > pmax) or np.any(params < pmin):
return - np.inf
alphas = params[0:nt]
betas = params[nt:2*nt][None, :]
sigma_ell = 1e16
like_grid = approx_flux_likelihood_multiobj(
allFluxes, allFluxesVar, fmod_atZ, 1, sigma_ell**2.) # no, nt
p_t = dirichlet(alphas)
p_z = redshifts * np.exp(-0.5 * redshifts**2 / betas) / betas # p(z|t)
p_z_t = p_z * p_t # no, nt
like_lt = (like_grid * p_z_t).sum(axis=1)
eps = 1e-305
ind = like_lt > eps
theprob = np.log(like_lt[ind]).sum()
return theprob
def plot_params(params):
alphas = params[0:nt]
betas = params[nt:2*nt]
fig, axs = plt.subplots(4, 4, figsize=(16, 8))
axs = axs.ravel()
alpha0 = np.sum(alphas)
dirsamples = dirichlet(alphas, 1000)
for i in range(nt):
mean = alphas[i]/alpha0
std = np.sqrt(alphas[i] * (alpha0-alphas[i]) / alpha0**2 / (alpha0+1))
axs[i].axvspan(mean-std, mean+std, color='gray', alpha=0.5)
axs[i].axvline(mean, c='k', lw=2)
axs[i].axvline(1/nt, c='k', lw=1, ls='dashed')
axs[i].set_title('alpha = '+str(alphas[i]))
axs[i].set_xlim([0, 1])
axs[i].hist(dirsamples[:, i], 50, color="k", histtype="step")
for i in range(nt):
pz = redshiftGrid*np.exp(-0.5*redshiftGrid**2/betas[i])/betas[i]
axs[nt+i].plot(redshiftGrid, pz, c='k', lw=2)
axs[nt+i].axvline(betas[i], lw=2, c='k', ls='dashed')
axs[nt+i].set_title('beta = '+str(betas[i]))
fig.tight_layout()
return fig
pmin = np.concatenate((np.repeat(0., nt), np.repeat(0., nt)))
pmax = np.concatenate((np.repeat(200., nt), np.repeat(redshiftGrid[-1], nt)))
ndim, nwalkers = 2*nt, 100
p0 = [pmin + (pmax-pmin)*np.random.uniform(0, 1, size=ndim)
for i in range(nwalkers)]
for i in range(10):
print(lnprob(p0[i], nt, allFluxes, allFluxesVar, fmod_atZ, pmin, pmax))
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob,
threads=4,
args=[nt, allFluxes, allFluxesVar, fmod_atZ,
pmin, pmax])
pos, prob, state = sampler.run_mcmc(p0, 2)
sampler.reset()
sampler.run_mcmc(pos, 10)
print("Mean acceptance fraction: {0:.3f}"
.format(np.mean(sampler.acceptance_fraction)))
samples = sampler.chain.reshape((-1, ndim))
lnprob = sampler.lnprobability.reshape((-1, 1))
params_mean = samples.mean(axis=0)
params_std = samples.std(axis=0)
fig, axs = plt.subplots(4, 5, figsize=(16, 8))
axs = axs.ravel()
for i in range(ndim):
axs[i].hist(samples[:, i], 50, color="k", histtype="step")
axs[i].axvspan(params_mean[i]-params_std[i],
params_mean[i]+params_std[i], color='gray', alpha=0.5)
axs[i].axvline(params_mean[i], c='k', lw=2)
fig.tight_layout()
fig.savefig('prior_parameters.pdf')
fig = plot_params(params_mean)
fig.savefig('prior_meanparameters.pdf')
print("params_mean", params_mean)
print("params_std", params_std)
alphas = params_mean[0:nt]
betas = params_mean[nt:2*nt]
alpha0 = np.sum(alphas)
print("p_t:", ' '.join(['%.2g' % x for x in alphas / alpha0]))
print("p_t err:", ' '.join(['%.2g' % x
for x in np.sqrt(alphas*(alpha0-alphas)/alpha0**2/(alpha0+1))]))
print("p_z_t:", ' '.join(['%.2g' % x for x in betas]))
fig = corner.corner(samples)
fig.savefig("triangle.pdf")
```
#### File: Delight/tests/profiling.py
```python
import time
import numpy as np
from delight.photoz_kernels_cy import kernelparts
from delight.photoz_gp import PhotozGP
from delight.utils import *
from delight.photoz_kernels import Photoz_mean_function, Photoz_kernel
NREPEAT = 2
nObj = 10
nObjGP = 4
nInducing = 5
numBands = 5
size = numBands * nObj
redshiftGrid = np.linspace(0, 3, num=30)
use_interpolators = True
extranoise = 1e-8
numLines = 3
numCoefs = 10
bandsUsed = range(numBands)
np.set_printoptions(suppress=True, precision=3)
relerr1 = np.zeros((size, size))
relerr2 = np.zeros((size, size))
relerr3 = np.zeros((size, size))
relerr4 = np.zeros((size, size))
t_constr = 0
t_interp = 0
t_raw = 0
for i in range(NREPEAT):
X = random_X_bzl(size, numBands=numBands)
X2 = random_X_bzl(size, numBands=numBands)
fcoefs_amp, fcoefs_mu, fcoefs_sig \
= random_filtercoefs(numBands, numCoefs)
lines_mu, lines_sig = random_linecoefs(numLines)
var_C, var_L, alpha_C, alpha_L, alpha_T = random_hyperparams()
norms = np.sqrt(2*np.pi) * np.sum(fcoefs_amp * fcoefs_sig, axis=1)
kern = Photoz_kernel(fcoefs_amp, fcoefs_mu, fcoefs_sig,
lines_mu, lines_sig, var_C, var_L,
alpha_C, alpha_L, alpha_T)
b1 = X[:, 0].astype(int)
b2 = X2[:, 0].astype(int)
fz1 = (1. + X[:, 1])
fz2 = (1. + X2[:, 1])
t1 = time.time()
kern.construct_interpolators()
t2 = time.time()
t_constr += (t2 - t1)
t1 = time.time()
kern.update_kernelparts(X, X2)
t2 = time.time()
t_interp += (t2 - t1)
t1 = time.time()
assert X.shape[0] == size
ts = (size, size)
KC, KL = np.zeros(ts), np.zeros(ts)
D_alpha_C, D_alpha_L, D_alpha_z\
= np.zeros(ts), np.zeros(ts), np.zeros(ts)
kernelparts(size, size, numCoefs, numLines,
alpha_C, alpha_L,
fcoefs_amp, fcoefs_mu, fcoefs_sig,
lines_mu, lines_sig,
norms, b1, fz1, b2, fz2,
True, KL, KC, D_alpha_C, D_alpha_L, D_alpha_z)
t2 = time.time()
t_raw += (t2 - t1)
relerr1 += np.abs(kern.KC/KC - 1.) / NREPEAT
relerr2 += np.abs(kern.KL/KL - 1.) / NREPEAT
relerr3 += np.abs(kern.D_alpha_C/D_alpha_C - 1.) / NREPEAT
relerr4 += np.abs(kern.D_alpha_L/D_alpha_L - 1.) / NREPEAT
print('Relative error on KC:', relerr1.mean(), relerr1.std())
print('Relative error on KL:', relerr2.mean(), relerr2.std())
print('Relative error on D_alpha_C:', relerr3.mean(), relerr3.std())
print('Relative error on D_alpha_L:', relerr4.mean(), relerr4.std())
print("=> kernelparts (raw): %s s" % (t_raw / NREPEAT))
print("=> kernelparts (constr): %s s" % (t_constr / NREPEAT))
print("=> kernelparts (interp): %s s" % (t_interp / NREPEAT))
class Timer(object):
def __init__(self, verbose=False):
self.verbose = verbose
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.secs = self.end - self.start
self.msecs = self.secs * 1000 # millisecs
if self.verbose:
print('elapsed time: %f ms' % self.msecs)
X = random_X_bzl(size, numBands=numBands)
if nInducing > 0:
X_inducing = random_X_bzl(nInducing, numBands=numBands)
else:
X_inducing = None
fcoefs_amp, fcoefs_mu, fcoefs_sig \
= random_filtercoefs(numBands, numCoefs)
lines_mu, lines_sig = random_linecoefs(numLines)
var_C, var_L, alpha_C, alpha_L, alpha_T = random_hyperparams()
kern = Photoz_kernel(fcoefs_amp, fcoefs_mu, fcoefs_sig,
lines_mu, lines_sig, var_C, var_L,
alpha_C, alpha_L, alpha_T)
kern.construct_interpolators()
dL_dm = np.ones((size, 1))
dL_dK = 1
norms = np.sqrt(2*np.pi) * np.sum(fcoefs_amp * fcoefs_sig, axis=1)
print('--------')
b1 = X[:, 0].astype(int)
b2 = X[:, 0].astype(int)
fz1 = 1 + X[:, 1]
fz2 = 1 + X[:, 1]
with Timer() as t:
for i in range(NREPEAT):
KC, KL = np.zeros((size, size)), np.zeros((size, size))
D_alpha_C, D_alpha_L, D_alpha_z \
= np.zeros((size, size)), np.zeros((size, size)),\
np.zeros((size, size))
kernelparts(size, size, numCoefs, numLines,
alpha_C, alpha_L,
fcoefs_amp, fcoefs_mu, fcoefs_sig,
lines_mu, lines_sig, norms,
b1, fz1, b2, fz2, True,
KL, KC,
D_alpha_C, D_alpha_L, D_alpha_z)
print("=> kernelparts (raw): %s s" % (t.secs / NREPEAT))
print('--------')
with Timer() as t:
for i in range(NREPEAT):
X = random_X_bzl(size, numBands=numBands)
print("=> Random X: %s s" % (t.secs / NREPEAT))
tX = (t.secs / NREPEAT)
with Timer() as t:
for i in range(NREPEAT):
X = random_X_bzl(size, numBands=numBands)
v = kern.K(X)
print("=> K (X varying): %s s" % (t.secs / NREPEAT))
with Timer() as t:
for i in range(NREPEAT):
v = kern.K(X)
print("=> K (X fixed): %s s" % (t.secs / NREPEAT))
print('-------')
mf = Photoz_mean_function(0.0, fcoefs_amp, fcoefs_mu, fcoefs_sig)
with Timer() as t:
for i in range(NREPEAT):
X = random_X_bzl(size, numBands=numBands)
v = mf.f(X)
print("=> f (X varying): %s s" % (t.secs / NREPEAT))
with Timer() as t:
for i in range(NREPEAT):
v = mf.f(X)
print("=> f (X fixed): %s s" % (t.secs / NREPEAT))
print('--------')
```
#### File: Delight/tests/test_photoz_kernels_cy.py
```python
import numpy as np
from delight.utils import *
from delight.photoz_kernels_cy import \
kernelparts, kernelparts_diag, kernel_parts_interp
from delight.utils_cy import find_positions
size = 50
nz = 150
numBands = 2
numLines = 5
numCoefs = 10
relative_accuracy = 0.1
def test_diagonalOfKernels():
"""
Test that diagonal of kernels and derivatives are correct across functions.
"""
X = random_X_bzl(size, numBands=numBands)
X2 = X
fcoefs_amp, fcoefs_mu, fcoefs_sig = random_filtercoefs(numBands, numCoefs)
lines_mu, lines_sig = random_linecoefs(numLines)
var_C, var_L, alpha_C, alpha_L, alpha_T = random_hyperparams()
norms = np.sqrt(2*np.pi) * np.sum(fcoefs_amp * fcoefs_sig, axis=1)
NO1, NO2 = X.shape[0], X2.shape[0]
b1 = X[:, 0].astype(int)
b2 = X2[:, 0].astype(int)
fz1 = 1 + X[:, 1]
fz2 = 1 + X2[:, 1]
KC, KL \
= np.zeros((NO1, NO2)), np.zeros((NO1, NO2))
D_alpha_C, D_alpha_L, D_alpha_z \
= np.zeros((NO1, NO2)), np.zeros((NO1, NO2)), np.zeros((NO1, NO2))
kernelparts(NO1, NO2, numCoefs, numLines,
alpha_C, alpha_L,
fcoefs_amp, fcoefs_mu, fcoefs_sig,
lines_mu[:numLines], lines_sig[:numLines], norms,
b1, fz1, b2, fz2, True,
KL, KC,
D_alpha_C, D_alpha_L, D_alpha_z)
KC_diag, KL_diag\
= np.zeros((NO1,)), np.zeros((NO1,))
D_alpha_C_diag, D_alpha_L_diag = np.zeros((NO1,)), np.zeros((NO1,))
kernelparts_diag(NO1, numCoefs, numLines,
alpha_C, alpha_L,
fcoefs_amp, fcoefs_mu, fcoefs_sig,
lines_mu[:numLines], lines_sig[:numLines], norms,
b1, fz1, True, KL_diag, KC_diag,
D_alpha_C_diag, D_alpha_L_diag)
np.testing.assert_almost_equal(KL_diag, np.diag(KL))
np.testing.assert_almost_equal(KC_diag, np.diag(KC))
np.testing.assert_almost_equal(D_alpha_C_diag, np.diag(D_alpha_C))
np.testing.assert_almost_equal(D_alpha_L_diag, np.diag(D_alpha_L))
def test_find_positions():
a = np.array([0., 1., 2., 3., 4.])
b = np.array([0.5, 2.5, 3.0, 3.1, 4.0])
pos = np.zeros(b.size, dtype=np.long)
find_positions(b.size, a.size, b, pos, a)
np.testing.assert_almost_equal(pos, [0, 2, 2, 3, 3])
def test_kernel_parts_interp():
fcoefs_amp, fcoefs_mu, fcoefs_sig = random_filtercoefs(numBands, numCoefs)
lines_mu, lines_sig = random_linecoefs(numLines)
var_C, var_L, alpha_C, alpha_L, alpha_T = random_hyperparams()
norms = np.sqrt(2*np.pi) * np.sum(fcoefs_amp * fcoefs_sig, axis=1)
zgrid = np.linspace(0, 3, num=nz)
opzgrid = 1 + zgrid
KC_grid, KL_grid =\
np.zeros((numBands, numBands, nz, nz)),\
np.zeros((numBands, numBands, nz, nz))
D_alpha_C_grid, D_alpha_L_grid, D_alpha_z_grid =\
np.zeros((numBands, numBands, nz, nz)),\
np.zeros((numBands, numBands, nz, nz)),\
np.zeros((numBands, numBands, nz, nz))
for ib1 in range(numBands):
for ib2 in range(numBands):
b1 = np.repeat(ib1, nz)
b2 = np.repeat(ib2, nz)
fz1 = 1 + zgrid
fz2 = 1 + zgrid
kernelparts(nz, nz, numCoefs, numLines,
alpha_C, alpha_L,
fcoefs_amp, fcoefs_mu, fcoefs_sig,
lines_mu[:numLines], lines_sig[:numLines], norms,
b1, fz1, b2, fz2, True,
KL_grid[ib1, ib2, :, :], KC_grid[ib1, ib2, :, :],
D_alpha_C_grid[ib1, ib2, :, :],
D_alpha_L_grid[ib1, ib2, :, :],
D_alpha_z_grid[ib1, ib2, :, :])
Xrand = random_X_bzl(size, numBands=numBands)
X2rand = random_X_bzl(size, numBands=numBands)
NO1, NO2 = Xrand.shape[0], X2rand.shape[0]
b1 = Xrand[:, 0].astype(int)
b2 = X2rand[:, 0].astype(int)
fz1 = 1 + Xrand[:, 1]
fz2 = 1 + X2rand[:, 1]
KC_rand, KL_rand =\
np.zeros((NO1, NO2)),\
np.zeros((NO1, NO2))
D_alpha_C_rand, D_alpha_L_rand, D_alpha_z_rand =\
np.zeros((NO1, NO2)),\
np.zeros((NO1, NO2)),\
np.zeros((NO1, NO2))
kernelparts(NO1, NO2, numCoefs, numLines,
alpha_C, alpha_L,
fcoefs_amp, fcoefs_mu, fcoefs_sig,
lines_mu[:numLines], lines_sig[:numLines], norms,
b1, fz1, b2, fz2, True,
KL_rand, KC_rand,
D_alpha_C_rand, D_alpha_L_rand, D_alpha_z_rand)
p1s = np.zeros(size, dtype=int)
p2s = np.zeros(size, dtype=int)
find_positions(size, nz, fz1, p1s, opzgrid)
find_positions(size, nz, fz2, p2s, opzgrid)
KC_interp, KL_interp =\
np.zeros((NO1, NO2)),\
np.zeros((NO1, NO2))
KC_diag_interp, KL_diag_interp =\
np.zeros((NO1, )),\
np.zeros((NO1, ))
D_alpha_C_interp, D_alpha_L_interp, D_alpha_z_interp =\
np.zeros((NO1, NO2)),\
np.zeros((NO1, NO2)),\
np.zeros((NO1, NO2))
kernel_parts_interp(size, size,
KC_interp,
b1, fz1, p1s,
b2, fz2, p2s,
opzgrid, KC_grid)
print(np.abs(KC_interp/KC_rand - 1))
assert np.mean(np.abs(KC_interp/KC_rand - 1)) < relative_accuracy
assert np.max(np.abs(KC_interp/KC_rand - 1)) < relative_accuracy
kernel_parts_interp(size, size,
D_alpha_C_interp,
b1, fz1, p1s,
b2, fz2, p2s,
opzgrid, D_alpha_C_grid)
print(np.abs(D_alpha_C_interp/D_alpha_C_rand - 1))
assert np.mean(np.abs(D_alpha_C_interp/D_alpha_C_rand - 1))\
< relative_accuracy
assert np.max(np.abs(D_alpha_C_interp/D_alpha_C_rand - 1))\
< relative_accuracy
```
#### File: Delight/tests/test_sedmixture.py
```python
import numpy as np
from delight.sedmixture import *
from scipy.misc import derivative
relative_accuracy = 0.01
def test_PhotometricFilter():
def f(x):
return np.exp(-0.5*((x-3e3)/1e2)**2)
x = np.linspace(2e3, 4e3, 1000)
y = f(x)
aFilter = PhotometricFilter('I', x, y)
xb = np.random.uniform(low=2e3, high=4e3, size=10)
res1 = f(xb)
res2 = aFilter(xb)
assert np.allclose(res2, res1, rtol=relative_accuracy)
def test_PhotometricFluxPolynomialInterpolation():
def f(x):
return np.exp(-0.5*((x-3e3)/1e2)**2)
x = np.linspace(2e3, 4e3, 1000)
y = f(x)
bandName = 'I'
photometricBands = [PhotometricFilter(bandName, x, y)]
x = np.linspace(2e1, 4e5, 1000)
y = f(x)
aTemplate = SpectralTemplate_z(x, y, photometricBands,
redshiftGrid=np.linspace(1e-2, 1.0, 10))
redshifts = np.random.uniform(1e-2, 1.0, 10)
f1 = aTemplate.photometricFlux(redshifts, bandName)
f2 = aTemplate.photometricFlux_bis(redshifts, bandName)
f1 = aTemplate.photometricFlux_gradz(redshifts, bandName)
f2 = aTemplate.photometricFlux_gradz_bis(redshifts, bandName)
```
#### File: Delight/tests/test_utils.py
```python
from scipy.interpolate import interp2d
from delight.utils import *
from astropy.cosmology import FlatLambdaCDM
from delight.utils import approx_flux_likelihood
from delight.posteriors import gaussian, gaussian2d
from delight.utils_cy import approx_flux_likelihood_cy
from delight.utils_cy import find_positions, bilininterp_precomputedbins
from time import time
relative_accuracy = 0.05
def test_approx_DL():
for z in np.linspace(0.01, 4, num=10):
z = 2.
v1 = approx_DL()(z)
cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Ob0=None)
v2 = cosmo.luminosity_distance(z).value
assert abs(v1/v2 - 1) < 0.01
def test_random_X():
size = 10
X = random_X_bzl(size, numBands=5, redshiftMax=3.0)
assert X.shape == (size, 3)
def test_full_fluxlikelihood():
nz, nt, nf = 100, 100, 5
for i in range(3):
f_obs = np.random.uniform(low=1, high=2, size=nf)
f_obs_var = np.random.uniform(low=.1, high=.2, size=nf)
f_mod = np.random.uniform(low=1, high=2, size=nz*nt*nf)\
.reshape((nz, nt, nf))
f_mod_covar = np.random.uniform(low=.1, high=.2, size=nz*nt*nf)\
.reshape((nz, nt, nf))
ell_hat, ell_var = np.ones((nz, )), 0.01*np.ones((nz, ))
t1 = time()
res1 = approx_flux_likelihood(
f_obs, f_obs_var, f_mod, f_mod_covar=f_mod_covar,
ell_hat=ell_hat, ell_var=ell_var)
t2 = time()
res2 = np.zeros_like(res1)
approx_flux_likelihood_cy(
res2, nz, nt, nf,
f_obs, f_obs_var, f_mod, f_mod_covar,
ell_hat, ell_var)
t3 = time()
print(t2-t1, t3-t2)
np.allclose(res1, res2, rtol=relative_accuracy)
def test_flux_likelihood_approxscalemarg():
nz, nt, nf = 3, 2, 5
fluxes = np.random.uniform(low=1, high=2, size=nf)
fluxesVar = np.random.uniform(low=.1, high=.2, size=nf)
model_mean = np.random.uniform(low=1, high=2, size=nz*nt*nf)\
.reshape((nz, nt, nf))
model_var = np.random.uniform(low=.1, high=.2, size=nz*nt*nf)\
.reshape((nz, nt, nf))
model_covar = np.zeros((nz, nt, nf, nf))
for i in range(nz):
for j in range(nt):
model_covar[i, j, :, :] = np.diag(model_var[i, j, :])
ell, ell_var = 0, 0
like_grid1 = approx_flux_likelihood(
fluxes, fluxesVar,
model_mean,
f_mod_covar=0*model_var,
ell_hat=ell,
ell_var=ell_var,
normalized=False, marginalizeEll=True, renormalize=False
)
like_grid2, ells = scalefree_flux_likelihood(
fluxes, fluxesVar,
model_mean
)
relative_accuracy = 1e-2
np.allclose(like_grid1, like_grid2, rtol=relative_accuracy)
def test_interp():
numBands, nobj = 3, 10
nz1, nz2 = 40, 50
grid1, grid2 = np.logspace(0., 1., nz1), np.linspace(1., 10., nz2)
v1s, v2s = np.random.uniform(1, 10, nobj), np.random.uniform(1, 10, nobj)
p1s = np.zeros((nobj, ), dtype=int)
find_positions(nobj, nz1, v1s, p1s, grid1)
p2s = np.zeros((nobj, ), dtype=int)
find_positions(nobj, nz2, v2s, p2s, grid2)
Kgrid = np.zeros((numBands, nz1, nz2))
for b in range(numBands):
Kgrid[b, :, :] = (grid1[:, None] * grid2[None, :])**(b+1.)
Kinterp = np.zeros((numBands, nobj))
bilininterp_precomputedbins(numBands, nobj, Kinterp, v1s, v2s, p1s, p2s,
grid1, grid2, Kgrid)
Kinterp2 = np.zeros((numBands, nobj))
for b in range(numBands):
interp = interp2d(grid2, grid1, Kgrid[b, :, :])
for o in range(nobj):
Kinterp2[b, o] = interp(v1s[o], v2s[o])
np.allclose(Kinterp, Kinterp2, rtol=relative_accuracy)
def test_correlatedgaussianfactorization():
mu_ell, mu_lnz, var_ell, var_lnz, rho = np.random.uniform(0, 1, 5)
rho *= np.sqrt(var_ell*var_lnz)
for i in range(10):
lnz, ell = np.random.uniform(-1, 2, 2)
mu_ell_prime = mu_ell + rho * (lnz - mu_lnz) / var_lnz
var_ell_prime = var_ell - rho**2 / var_lnz
val1 = gaussian(mu_ell_prime, ell, var_ell_prime**0.5)
val1 *= gaussian(mu_lnz, lnz, var_lnz**0.5)
val2 = gaussian2d(ell, lnz, mu_ell, mu_lnz, var_ell, var_lnz, rho)
assert np.abs(val1/val2) - 1 < 1e-12
rho = 0
val2 = gaussian2d(ell, lnz, mu_ell, mu_lnz, var_ell, var_lnz, rho)
val3 = gaussian(ell, mu_ell, var_ell**0.5) *\
gaussian(lnz, mu_lnz, var_lnz**0.5)
assert np.abs(val2/val3) - 1 < 1e-12
``` |
{
"source": "josperdom1/AII",
"score": 3
} |
#### File: AII/BSoup/BeautifulSoup_2.py
```python
import sqlite3
from tkinter import *
from tkinter import messagebox
import urllib.request
from bs4 import BeautifulSoup
def extract():
html_doc = urllib.request.urlopen("https://www.ulabox.com/campaign/productos-sin-gluten#gref").read()
soup = BeautifulSoup(html_doc, 'html.parser')
articles = soup.find_all('article')
products = []
for a in articles:
product = []
hgroup = a.find('hgroup')
product_brand = hgroup.find('h4').find('a').string
product_name = hgroup.find('h3').find('a').string
product_link = hgroup.find('h3').find('a').get('href')
product_price = a.find('span', class_='delta').string + a.find('span', class_='milli').string[0:3]
product_sale = a.find('del',
class_='product-item__price product-item__price--old product-grid-footer__price--old nano | flush--bottom')
product.append(product_brand.strip())
product.append(product_name.strip())
product.append(product_link)
product.append(float(product_price.replace(',', '.')))
if product_sale is not None:
product.append(float(product_sale.string[0:4].replace(',', '.')))
products.append(product)
return products
def create_db():
products = extract()
db = sqlite3.connect('products.db') # conectando a la base de datos
cursor = db.cursor()
cursor.execute("""DROP TABLE if exists producto""") # si existe la tabla 'producto' la elimina
# creamos la tabla producto: marca, nombre, link a la descripcion del producto y precio/s (si est en oferta tiene de un precio).
cursor.execute(
"""CREATE TABLE producto (marca text not null, nombre text not null, link text not null, precio real not null, oferta real)""")
for product in products:
marca = product[0]
nombre = product[1]
link = product[2]
precio = product[3]
oferta = None
if len(product) == 5:
oferta = product[4]
cursor.execute("""INSERT INTO producto (marca, nombre, link , precio, oferta) values(?,?,?,?,?)"""
, (marca, nombre, link, precio, oferta))
db.commit() # guardar el resultado de las operaciones realizadas en la BDD
cursor = db.execute("SELECT COUNT(*) FROM PRODUCTO") # numero de filas guardadas
messagebox.showinfo("Terminado", "Base de datos creada correctamente. Se han guardado " + str(
cursor.fetchone()[0]) + " elementos")
db.close()
def show_list(elements, tk):
# Scrollbar
scrollbar = Scrollbar(tk)
scrollbar.pack(side=RIGHT, fill=Y)
# Listbox widget
mylist = Listbox(tk, yscrollcommand=scrollbar.set)
mylist.pack(fill=BOTH, expand=1)
scrollbar.config(command=mylist.yview)
# Add elements to listbox
for item in elements:
mylist.insert(END, "Marca: " + item[0])
mylist.insert(END, "Nombre: " + item[1])
mylist.insert(END, "Link: " + item[2])
mylist.insert(END, "Precio normal: " + str(item[3]))
if item[4] != None:
mylist.insert(END, "Precio de oferta: " + str(item[4]))
mylist.insert(END, "")
def search_brand(query, tk):
db = sqlite3.connect('products.db')
cursor = db.cursor()
cursor.execute("SELECT * FROM PRODUCTO WHERE MARCA = '" + query + "'")
show_list(cursor.fetchall(), tk)
def clear_window(tk):
ls = tk.pack_slaves()
for l in ls:
l.destroy()
def show_main_buttons():
store_products_btn = Button(root, text="Almacenar productos", command=create_db)
store_products_btn.pack()
show_brand_btn = Button(root, text="Mostrar marca", command=show_brand)
show_brand_btn.pack()
search_deals_btn = Button(root, text="Buscar ofertas", command=search_deals)
search_deals_btn.pack()
def get_brands():
db = sqlite3.connect('products.db')
db.text_factory = str
cursor = db.cursor()
brands_list = cursor.execute("SELECT MARCA FROM PRODUCTO")
ls = []
for m in brands_list:
ls.append(m[0])
return ls
def show_brand():
new_window = Toplevel()
new_window.title("Buscar por marca")
new_window.geometry("800x600")
frame = Frame(new_window)
frame.pack()
brands = get_brands()
spinbox = Spinbox(frame, values=brands)
spinbox.pack(side="left")
results_frame = Frame(new_window)
results_frame.pack(fill=BOTH, expand=1)
def search_brand_caller():
clear_window(results_frame)
search_brand(spinbox.get(), results_frame)
b = Button(frame, text="Buscar", command=search_brand_caller)
b.pack(side="right")
def search_deals():
new_window = Toplevel()
new_window.title("Ofertas")
new_window.geometry("800x600")
db = sqlite3.connect('products.db')
cursor = db.cursor()
cursor.execute("SELECT * FROM PRODUCTO WHERE OFERTA NOT NULL")
prods_oferta = cursor.fetchall()
show_list(prods_oferta, new_window)
if __name__ == '__main__':
root = Tk()
root.title("Ulabox Scrapper")
root.geometry("300x150")
show_main_buttons()
root.mainloop()
```
#### File: films/migrations/0001_initial.py
```python
from django.conf import settings
import django.core.validators
from django.db import migrations, models
from django.apps import apps
import django.db.models.deletion
def load_categories():
category = apps.get_model('films', 'Category')
cats = ['unknown', 'Action', 'Adventure',
'Animation', 'Children\'s', 'Comedy', 'Crime', 'Documental', 'Drama', 'Fantasy', 'Film-Noir',
'Horror', 'Musical', 'Mystery', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western']
for cat in cats:
new_cat = category.create(name=cat)
new_cat.save()
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(max_length=500)),
],
),
migrations.CreateModel(
name='Director',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(blank=True, max_length=500)),
('surname', models.TextField(max_length=500)),
('biography', models.TextField(blank=True, max_length=500)),
(
'user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='StandardUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(max_length=500)),
('surname', models.TextField(max_length=500)),
('birth_date', models.DateField(blank=True, null=True)),
('categories', models.ManyToManyField(to='films.Category')),
(
'user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Film',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, unique=True)),
('year', models.PositiveSmallIntegerField(validators=[django.core.validators.MinValueValidator(1900),
django.core.validators.MaxValueValidator(2019)],
verbose_name='Year')),
('summary', models.TextField(verbose_name='Summary')),
('categories', models.ManyToManyField(to='films.Category')),
('director', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='films.Director')),
],
),
migrations.RunPython(load_categories),
]
```
#### File: practica1/films/models.py
```python
from django.db import models
# Create your models here.
class User(models.Model):
uid = models.IntegerField(primary_key=True)
age = models.PositiveSmallIntegerField()
sex = models.CharField(max_length=5, choices=(("M", "Man"), ("F", "Femme")))
postal_code = models.CharField(max_length=500)
occupation = models.ForeignKey('Occupation', on_delete=models.CASCADE)
def __str__(self):
return str(self.uid)
class Occupation(models.Model):
name = models.CharField(max_length=500)
def __str__(self):
return self.name
class Category(models.Model):
cid = models.IntegerField(primary_key=True)
name = models.TextField(max_length=500)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = "categories"
class Film(models.Model):
fid = models.IntegerField(primary_key=True)
title = models.CharField(max_length=100)
year = models.DateField(null=True, blank=True)
url = models.URLField()
rating = models.ManyToManyField(User, through='Rate')
categories = models.ManyToManyField(Category)
def __str__(self):
return self.title
class Rate(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
film = models.ForeignKey(Film, on_delete=models.CASCADE)
number = models.SmallIntegerField()
def __str__(self):
return str(self.number)
```
#### File: AII/Whoosh/examen_whoosh.py
```python
__author__ = '<NAME>'
import urllib.request
import locale
import os
import urllib
import dateutil.parser
from tkinter import *
from tkinter import messagebox
from bs4 import BeautifulSoup
from whoosh.fields import *
from whoosh.index import create_in, open_dir
from whoosh.qparser import QueryParser
from datetime import datetime as dt
locale.setlocale(locale.LC_ALL, 'es_ES.UTF-8')
def extract_events():
html_doc = urllib.request.urlopen(
"https://www.sevilla.org/ayuntamiento/alcaldia/comunicacion/calendario/agenda-actividades").read()
soup = BeautifulSoup(html_doc, 'html.parser')
events = soup.find_all("div", "cal_info")
saved_events = []
for e in events:
event = []
title = e.find("span", "summary").get_text()
event.append(str(title))
document_by_line = e.find("div", "documentByLine")
try:
start_date = document_by_line.find("abbr", "dtstart").get("title")
event.append(dateutil.parser.parse(start_date))
except:
try:
event.append(dt.strptime(document_by_line.contents[0].strip(), "%d/%m/%Y"))
except:
event.append(None)
try:
end_date = document_by_line.find("abbr", "dtend").get("title")
event.append(dateutil.parser.parse(end_date))
except:
event.append(None)
try:
description = e.find("p", "description").string
event.append(str(description))
except:
event.append("")
categories = []
li_category = e.find("li", "category")
if li_category is not None:
for cat in li_category.find_all('span'):
categories.append(cat.string)
event.append(", ".join(categories))
saved_events.append(event)
return saved_events
def get_events_schema():
return Schema(title=TEXT(stored=True), start_date=DATETIME(stored=True),
end_date=DATETIME(stored=True), description=TEXT(stored=True),
categories=TEXT(stored=True))
def create_events_index(dir_index, events):
if not os.path.exists(dir_index):
os.mkdir(dir_index)
ind = create_in(dir_index, schema=get_events_schema())
writer = ind.writer()
for event in events:
title = event[0]
start_date = event[1]
end_date = event[2]
description = event[3]
categories = event[4]
writer.add_document(title=title, start_date=start_date,
end_date=end_date, description=description,
categories=categories)
writer.commit()
messagebox.showinfo("Terminado",
"Base de datos creada correctamente. Se han guardado " + str(len(events)) + " elementos")
def search_events_a(text):
ix = open_dir("events_index")
words = text.split(" ")
for word in words:
text += " description:" + word
with ix.searcher() as searcher:
my_query = QueryParser("title", ix.schema).parse(text)
results = searcher.search(my_query, limit=None)
# events = [[r["title"], r["start_date"], r["end_date"]] for r in results]
events = []
for r in results:
event = []
event.append(r["title"])
try:
event.append(r["start_date"])
except:
event.append("")
try:
event.append(r["end_date"])
except:
event.append("")
events.append(event)
return events
def search_events_b(date):
ix = open_dir("events_index")
html_doc = urllib.request.urlopen(
"https://www.sevilla.org/ayuntamiento/alcaldia/comunicacion/calendario/agenda-actividades").read()
soup = BeautifulSoup(html_doc, 'html.parser')
events = soup.find_all("div", "cal_info")
saved_events = []
for e in events:
event = []
title = e.find("span", "summary").get_text()
event.append(str(title))
document_by_line = e.find("div", "documentByLine")
try:
start_date = document_by_line.find("abbr", "dtstart").get("title")
event.append(dateutil.parser.parse(start_date))
except:
event.append(None)
try:
end_date = document_by_line.find("abbr", "dtend").get("title")
event.append(dateutil.parser.parse(end_date))
except:
event.append(None)
try:
description = e.find("p", "description").string
event.append(str(description))
except:
event.append("")
categories = []
li_category = e.find("li", "category")
if li_category is not None:
for cat in li_category.find_all('span'):
categories.append(cat.string)
event.append(", ".join(categories))
saved_events.append(event)
return saved_events
def get_events_schema():
return Schema(title=TEXT(stored=True), start_date=DATETIME(stored=True),
end_date=DATETIME(stored=True), description=TEXT(stored=True),
categories=TEXT(stored=True))
def create_events_index(dir_index, events):
if not os.path.exists(dir_index):
os.mkdir(dir_index)
ind = create_in(dir_index, schema=get_events_schema())
writer = ind.writer()
for event in events:
title = event[0]
start_date = event[1]
end_date = event[2]
description = event[3]
categories = event[4]
writer.add_document(title=title, start_date=start_date,
end_date=end_date, description=description,
categories=categories)
writer.commit()
messagebox.showinfo("Terminado",
"Base de datos creada correctamente. Se han guardado " + str(len(events)) + " elementos")
def search_events_a(text):
ix = open_dir("events_index")
words = text.split(" ")
for word in words:
text += " description:" + word
with ix.searcher() as searcher:
my_query = QueryParser("title", ix.schema).parse(text)
results = searcher.search(my_query, limit=None)
# events = [[r["title"], r["start_date"], r["end_date"]] for r in results]
events = []
for r in results:
event = []
event.append(r["title"])
try:
event.append(r["start_date"])
except:
event.append("")
try:
event.append(r["end_date"])
except:
event.append("")
events.append(event)
return events
def search_events_b(date):
ix = open_dir("events_index")
with ix.searcher() as searcher:
my_query = QueryParser("start_date", ix.schema).parse(f"start_date:[to {date}]")
results = searcher.search(my_query, limit=None)
# events = [[r["title"], r["start_date"], r["end_date"]] for r in results]
events = []
for r in results:
event = [r["title"]]
try:
event.append(r["start_date"])
except:
event.append("")
try:
event.append(r["end_date"])
except:
event.append("")
events.append(event)
return events
def search_events_c(category):
ix = open_dir("events_index")
with ix.searcher() as searcher:
my_query = QueryParser("categories", ix.schema).parse(f'"{category}"')
results = searcher.search(my_query, limit=None)
# events = [[r["title"], r["start_date"], r["end_date"]] for r in results]
events = []
for r in results:
event = []
event.append(r["title"])
try:
event.append(r["start_date"])
except:
event.append("")
try:
event.append(r["end_date"])
except:
event.append("")
events.append(event)
return events
def get_categories():
html_doc = urllib.request.urlopen(
"https://www.sevilla.org/ayuntamiento/alcaldia/comunicacion/calendario/agenda-actividades").read()
soup = BeautifulSoup(html_doc, 'html.parser')
events = soup.find_all("div", "cal_info")
all_categories = []
for e in events:
categories = e.find('li', 'category')
if categories is not None:
for cat in categories.find_all('span'):
if not all_categories.__contains__(cat.string):
all_categories.append(cat.string)
return all_categories
def show_events_a(events, frame):
# Scrollbar
scrollbar = Scrollbar(frame)
scrollbar.pack(side=RIGHT, fill=Y)
# Listbox widget
my_list = Listbox(frame, yscrollcommand=scrollbar.set)
my_list.pack(fill=BOTH, expand=1)
scrollbar.config(command=my_list.yview)
# Add elements to listbox
if events is not None:
for item in events:
my_list.insert(END, "Title " + item[0])
my_list.insert(END, "Start date: " + str(item[1]))
my_list.insert(END, "End date: " + str(item[2]))
my_list.insert(END, "")
def search_window(option):
new_window = Toplevel()
new_window.title("Search")
new_window.geometry("800x600")
main_frame = Frame(new_window)
results_frame = Frame(new_window)
main_frame.pack()
results_frame.pack(fill=BOTH, expand=1)
if option == 1:
entry = Entry(main_frame)
entry.pack(side="left")
def search_caller():
clear_window(results_frame)
events = search_events_a(entry.get())
show_events_a(events, results_frame)
elif option == 2:
entry = Entry(main_frame)
entry.pack(side="left")
def search_caller():
clear_window(results_frame)
convert = dt.strptime(entry.get(), '%d de %B de %Y')
d = convert.strftime("%Y%m%d")
events = search_events_b(d)
show_events_a(events, results_frame)
else:
categories = get_categories()
spinbox = Spinbox(main_frame, values=categories)
spinbox.pack(side="left")
def search_caller():
clear_window(results_frame)
events = search_events_c(spinbox.get())
show_events_a(events, results_frame)
b = Button(main_frame, text="Search", command=search_caller)
b.pack(side="right")
def clear_window(tk):
ls = tk.pack_slaves()
for l in ls:
l.destroy()
def main():
root = Tk()
def close_window():
root.destroy()
root.title("Sevilla")
root.geometry("800x600")
menu_bar = Menu(root)
root.config(menu=menu_bar)
# Home
home_menu = Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label="Home", menu=home_menu)
home_menu.add_command(label="Create index", command=lambda: create_events_index('events_index', extract_events()))
home_menu.add_separator()
home_menu.add_command(label="Close", command=close_window)
search_menu = Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label="Search", menu=search_menu)
search_menu.add_command(label="By title and description", command=lambda: search_window(1))
search_menu.add_command(label="By date", command=lambda: search_window(2))
search_menu.add_command(label="By category", command=lambda: search_window(3))
root.mainloop()
if __name__ == '__main__':
main()
```
#### File: AII/Whoosh/pruebas.py
```python
from whoosh.index import create_in, open_dir
from whoosh.fields import *
from whoosh.qparser import QueryParser
from tkinter import messagebox
from tkinter import *
from bs4 import BeautifulSoup
from datetime import datetime
import urllib.request
import locale
import os
locale.setlocale(locale.LC_ALL, 'es_ES.UTF-8')
def extract_events():
print(extract_events())
``` |
{
"source": "josphat-mwangi/Blog-",
"score": 3
} |
#### File: app/main/views.py
```python
from flask import render_template, request, redirect, url_for, abort,flash
from . import main
from ..models import User, Post
from .. import db
from .forms import UpdateProfile,PostForm
from flask_login import login_required, current_user
import datetime
from ..requests import get_quote
# Views
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
post = Post.get_posts()
quote = get_quote()
title = 'Home - Welcome to Perfect Blog app'
return render_template('index.html', title=title,posts=post,quote=quote)
@main.route("/about")
def about():
return render_template('about.html', title='About')
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update', methods=['GET', 'POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username=uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile', uname=user.username))
return render_template('profile/update.html', form=form)
@main.route("/post/new", methods=['GET', 'POST'])
@login_required
def new_post():
form = PostForm()
if form.validate_on_submit():
post = Post(title=form.title.data,content=form.content.data,author=current_user)
db.session.add(post)
db.session.commit()
flash('Your post has been created!', 'success')
return redirect(url_for('main.index'))
return render_template('create_post.html', title='New Post',
form=form, legend='New Post')
@main.route("/post/<int:post_id>", methods=['GET','POST'])
def post(post_id):
post = Post.query.get_or_404(post_id)
return render_template('post.html', title=post.title, post=post)
@main.route("/post/<int:id>/update", methods=['GET', 'POST'])
@login_required
def update_post(id):
post = Post.query.get_or_404(id)
if post.author != current_user:
abort(403)
form = PostForm()
if form.validate_on_submit():
post.title = form.title.data
post.content = form.content.data
db.session.commit()
flash('Your post has been updated!', 'success')
return redirect(url_for('main.post', post_id=post.id))
elif request.method == 'GET':
form.title.data = post.title
form.content.data = post.content
return render_template('create_post.html', title='Update Post',
form=form, legend='Update Post')
@main.route("/post/<int:id>/delete", methods=['POST'])
@login_required
def delete_post(id):
post = Post.query.get_or_404(id)
if post.author != current_user:
abort(403)
db.session.delete(post)
db.session.commit()
flash('Your post has been deleted!', 'success')
return redirect(url_for('main.index'))
```
#### File: psycopg2cffi/_impl/_build_libpq.py
```python
from __future__ import print_function
from distutils import sysconfig
import os.path
import re
import sys
import subprocess
from cffi import FFI
PLATFORM_IS_WINDOWS = sys.platform.lower().startswith('win')
LIBRARY_NAME = 'pq' if not PLATFORM_IS_WINDOWS else 'libpq'
class PostgresConfig:
def __init__(self):
try:
from psycopg2cffi import _config
except ImportError:
self.pg_config_exe = None
if not self.pg_config_exe:
self.pg_config_exe = self.autodetect_pg_config_path()
if self.pg_config_exe is None:
# FIXME - do we need some way to set it?
sys.stderr.write("""\
Error: pg_config executable not found.
Please add the directory containing pg_config to the PATH.
""")
sys.exit(1)
self.libpq_include_dir = self.query('includedir') or None
self.libpq_lib_dir = self.query('libdir') or None
self.libpq_version = self.find_version()
else:
self.libpq_include_dir = _config.PG_INCLUDE_DIR
self.libpq_lib_dir = _config.PG_LIB_DIR
self.libpq_version = _config.PG_VERSION
def query(self, attr_name):
"""Spawn the pg_config executable, querying for the given config
name, and return the printed value, sanitized. """
try:
pg_config_process = subprocess.Popen(
[self.pg_config_exe, "--" + attr_name],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
raise Warning("Unable to find 'pg_config' file in '%s'" %
self.pg_config_exe)
pg_config_process.stdin.close()
result = pg_config_process.stdout.readline().strip()
if not result:
raise Warning(pg_config_process.stderr.readline())
if not isinstance(result, str):
result = result.decode('ascii')
return result
def find_on_path(self, exename, path_directories=None):
if not path_directories:
path_directories = os.environ['PATH'].split(os.pathsep)
for dir_name in path_directories:
fullpath = os.path.join(dir_name, exename)
if os.path.isfile(fullpath):
return fullpath
return None
def autodetect_pg_config_path(self):
"""Find and return the path to the pg_config executable."""
if PLATFORM_IS_WINDOWS:
return self.autodetect_pg_config_path_windows()
else:
return self.find_on_path('pg_config')
def autodetect_pg_config_path_windows(self):
"""Attempt several different ways of finding the pg_config
executable on Windows, and return its full path, if found."""
# This code only runs if they have not specified a pg_config option
# in the config file or via the commandline.
# First, check for pg_config.exe on the PATH, and use that if found.
pg_config_exe = self.find_on_path('pg_config.exe')
if pg_config_exe:
return pg_config_exe
# Now, try looking in the Windows Registry to find a PostgreSQL
# installation, and infer the path from that.
pg_config_exe = self._get_pg_config_from_registry()
if pg_config_exe:
return pg_config_exe
return None
def _get_pg_config_from_registry(self):
try:
import winreg
except ImportError:
import _winreg as winreg
reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
try:
pg_inst_list_key = winreg.OpenKey(reg,
'SOFTWARE\\PostgreSQL\\Installations')
except EnvironmentError:
# No PostgreSQL installation, as best as we can tell.
return None
try:
# Determine the name of the first subkey, if any:
try:
first_sub_key_name = winreg.EnumKey(pg_inst_list_key, 0)
except EnvironmentError:
return None
pg_first_inst_key = winreg.OpenKey(reg,
'SOFTWARE\\PostgreSQL\\Installations\\'
+ first_sub_key_name)
try:
pg_inst_base_dir = winreg.QueryValueEx(
pg_first_inst_key, 'Base Directory')[0]
finally:
winreg.CloseKey(pg_first_inst_key)
finally:
winreg.CloseKey(pg_inst_list_key)
pg_config_path = os.path.join(
pg_inst_base_dir, 'bin', 'pg_config.exe')
if not os.path.exists(pg_config_path):
return None
# Support unicode paths, if this version of Python provides the
# necessary infrastructure:
if sys.version_info[0] < 3 \
and hasattr(sys, 'getfilesystemencoding'):
pg_config_path = pg_config_path.encode(
sys.getfilesystemencoding())
return pg_config_path
def find_version(self):
try:
# Here we take a conservative approach: we suppose that
# *at least* PostgreSQL 7.4 is available (this is the only
# 7.x series supported by psycopg 2)
pgversion = self.query('version').split()[1]
except:
pgversion = '7.4.0'
verre = re.compile(
r'(\d+)\.(\d+)(?:(?:\.(\d+))|(devel|(alpha|beta|rc)\d+)?)')
m = verre.match(pgversion)
if m:
pgmajor, pgminor, pgpatch = m.group(1, 2, 3)
if pgpatch is None or not pgpatch.isdigit():
pgpatch = 0
else:
sys.stderr.write(
"Error: could not determine PostgreSQL version from '%s'"
% pgversion)
sys.exit(1)
return int(
'%02X%02X%02X' % (int(pgmajor), int(pgminor), int(pgpatch)), 16)
_config = PostgresConfig()
ffi = FFI()
# order and comments taken from libpq (ctypes impl)
ffi.cdef('''
static int const _PG_VERSION;
// postgres_ext.h
typedef unsigned int Oid;
// See comment below.
static int const LIBPQ_DIAG_SEVERITY;
static int const LIBPQ_DIAG_SQLSTATE;
static int const LIBPQ_DIAG_MESSAGE_PRIMARY;
static int const LIBPQ_DIAG_MESSAGE_DETAIL;
static int const LIBPQ_DIAG_MESSAGE_HINT;
static int const LIBPQ_DIAG_STATEMENT_POSITION;
static int const LIBPQ_DIAG_INTERNAL_POSITION;
static int const LIBPQ_DIAG_INTERNAL_QUERY;
static int const LIBPQ_DIAG_CONTEXT;
static int const LIBPQ_DIAG_SOURCE_FILE;
static int const LIBPQ_DIAG_SCHEMA_NAME;
static int const LIBPQ_DIAG_TABLE_NAME;
static int const LIBPQ_DIAG_COLUMN_NAME;
static int const LIBPQ_DIAG_DATATYPE_NAME ;
static int const LIBPQ_DIAG_CONSTRAINT_NAME;
static int const LIBPQ_DIAG_SOURCE_LINE;
static int const LIBPQ_DIAG_SOURCE_FUNCTION;
// libpq-fe.h
typedef enum
{
/*
* Although it is okay to add to this list, values which become unused
* should never be removed, nor should constants be redefined - that would
* break compatibility with existing code.
*/
CONNECTION_OK,
CONNECTION_BAD,
/* Non-blocking mode only below here */
/*
* The existence of these should never be relied upon - they should only
* be used for user feedback or similar purposes.
*/
CONNECTION_STARTED, /* Waiting for connection to be made. */
CONNECTION_MADE, /* Connection OK; waiting to send. */
CONNECTION_AWAITING_RESPONSE, /* Waiting for a response from the
* postmaster. */
CONNECTION_AUTH_OK, /* Received authentication; waiting for
* backend startup. */
CONNECTION_SETENV, /* Negotiating environment. */
CONNECTION_SSL_STARTUP, /* Negotiating SSL. */
CONNECTION_NEEDED /* Internal state: connect() needed */
} ConnStatusType;
typedef enum
{
PGRES_POLLING_FAILED = 0,
PGRES_POLLING_READING, /* These two indicate that one may */
PGRES_POLLING_WRITING, /* use select before polling again. */
PGRES_POLLING_OK,
PGRES_POLLING_ACTIVE /* unused; keep for awhile for backwards
* compatibility */
} PostgresPollingStatusType;
typedef enum
{
PGRES_EMPTY_QUERY = 0, /* empty query string was executed */
PGRES_COMMAND_OK, /* a query command that doesn't return
* anything was executed properly by the
* backend */
PGRES_TUPLES_OK, /* a query command that returns tuples was
* executed properly by the backend, PGresult
* contains the result tuples */
PGRES_COPY_OUT, /* Copy Out data transfer in progress */
PGRES_COPY_IN, /* Copy In data transfer in progress */
PGRES_BAD_RESPONSE, /* an unexpected response was recv'd from the
* backend */
PGRES_NONFATAL_ERROR, /* notice or warning message */
PGRES_FATAL_ERROR, /* query failed */
} ExecStatusType;
typedef enum
{
PQTRANS_IDLE, /* connection idle */
PQTRANS_ACTIVE, /* command in progress */
PQTRANS_INTRANS, /* idle, within transaction block */
PQTRANS_INERROR, /* idle, within failed transaction */
PQTRANS_UNKNOWN /* cannot determine status */
} PGTransactionStatusType;
typedef ... PGconn;
typedef ... PGresult;
typedef ... PGcancel;
typedef struct pgNotify
{
char *relname; /* notification condition name */
int be_pid; /* process ID of notifying server process */
char *extra; /* notification parameter */
...;
} PGnotify;
// Database connection control functions
extern PGconn *PQconnectdb(const char *conninfo);
extern PGconn *PQconnectStart(const char *conninfo);
extern /*PostgresPollingStatusType*/ int PQconnectPoll(PGconn *conn);
extern void PQfinish(PGconn *conn);
// Connection status functions
extern /*ConnStatusType*/ int PQstatus(const PGconn *conn);
extern /*PGTransactionStatusType*/ int PQtransactionStatus(const PGconn *conn);
extern const char *PQparameterStatus(const PGconn *conn, const char *paramName);
extern int PQprotocolVersion(const PGconn *conn);
extern int PQserverVersion(const PGconn *conn);
extern char *PQerrorMessage(const PGconn *conn);
extern int PQsocket(const PGconn *conn);
extern int PQbackendPID(const PGconn *conn);
// Command execution functions
extern PGresult *PQexec(PGconn *conn, const char *query);
extern /*ExecStatusType*/ int PQresultStatus(const PGresult *res);
extern char *PQresultErrorMessage(const PGresult *res);
extern char *PQresultErrorField(const PGresult *res, int fieldcode);
extern void PQclear(PGresult *res);
// Retrieving query result information
extern int PQntuples(const PGresult *res);
extern int PQnfields(const PGresult *res);
extern char *PQfname(const PGresult *res, int field_num);
extern Oid PQftype(const PGresult *res, int field_num);
extern int PQfsize(const PGresult *res, int field_num);
extern int PQfmod(const PGresult *res, int field_num);
extern int PQgetisnull(const PGresult *res, int tup_num, int field_num);
extern int PQgetlength(const PGresult *res, int tup_num, int field_num);
extern char *PQgetvalue(const PGresult *res, int tup_num, int field_num);
// direct parsers - not part of libpq
int PQEgetlong(int64_t *val, const PGresult *res, int tup_num, int field_num);
int PQEgetint(int32_t *val, const PGresult *res, int tup_num, int field_num);
int PQEgetfloat(float *val, const PGresult *res, int tup_num, int field_num);
int PQEgetdouble(double *val, const PGresult *res, int tup_num, int field_num);
// Retrieving other result information
extern char *PQcmdStatus(PGresult *res);
extern char *PQcmdTuples(PGresult *res);
extern Oid PQoidValue(const PGresult *res); /* new and improved */
''')
if _config.libpq_version >= 0x090000:
ffi.cdef('''
// Escaping string for inclusion in sql commands
extern char *PQescapeLiteral(PGconn *conn, const char *str, size_t len);
''')
ffi.cdef('''
// Escaping string for inclusion in sql commands
extern size_t PQescapeStringConn(PGconn *conn,
char *to, const char *from, size_t length,
int *error);
extern size_t PQescapeString(char *to, const char *from, size_t length);
extern unsigned char *PQescapeByteaConn(PGconn *conn,
const unsigned char *from, size_t from_length,
size_t *to_length);
extern unsigned char *PQescapeBytea(const unsigned char *from, size_t from_length,
size_t *to_length);
extern unsigned char *PQunescapeBytea(const unsigned char *strtext,
size_t *retbuflen);
// Asynchronous Command Processing
extern int PQsendQuery(PGconn *conn, const char *query);
extern PGresult *PQgetResult(PGconn *conn);
extern int PQconsumeInput(PGconn *conn);
extern int PQisBusy(PGconn *conn);
extern int PQsetnonblocking(PGconn *conn, int arg);
extern int PQflush(PGconn *conn);
// Cancelling queries in progress
extern PGcancel *PQgetCancel(PGconn *conn);
extern void PQfreeCancel(PGcancel *cancel);
extern int PQcancel(PGcancel *cancel, char *errbuf, int errbufsize);
extern int PQrequestCancel(PGconn *conn);
// Functions Associated with the COPY Command
extern int PQgetCopyData(PGconn *conn, char **buffer, int async);
extern int PQputCopyEnd(PGconn *conn, const char *errormsg);
extern int PQputCopyData(PGconn *conn, const char *buffer, int nbytes);
// Miscellaneous functions
extern void PQfreemem(void *ptr);
// Notice processing
typedef void (*PQnoticeProcessor) (void *arg, const char *message);
extern PQnoticeProcessor PQsetNoticeProcessor(PGconn *conn,
PQnoticeProcessor proc,
void *arg);
extern PGnotify *PQnotifies(PGconn *conn);
// Large object
extern int lo_open(PGconn *conn, Oid lobjId, int mode);
extern Oid lo_create(PGconn *conn, Oid lobjId);
extern Oid lo_import(PGconn *conn, const char *filename);
extern int lo_read(PGconn *conn, int fd, char *buf, size_t len);
extern int lo_write(PGconn *conn, int fd, const char *buf, size_t len);
extern int lo_tell(PGconn *conn, int fd);
extern int lo_lseek(PGconn *conn, int fd, int offset, int whence);
extern int lo_close(PGconn *conn, int fd);
extern int lo_unlink(PGconn *conn, Oid lobjId);
extern int lo_export(PGconn *conn, Oid lobjId, const char *filename);
extern int lo_truncate(PGconn *conn, int fd, size_t len);
''')
C_SOURCE = '''
#if (defined(_MSC_VER) && _MSC_VER < 1600)
typedef __int32 int32_t;
typedef __int64 int64_t;
#else
#include <stdint.h>
#endif
#include <postgres_ext.h>
#include <libpq-fe.h>
int PQEgetlong(int64_t *raw_res, const PGresult *res, int tup_num, int field_num) {
char *val = PQgetvalue(res, tup_num, field_num);
if (!val) return -1;
sscanf(val, "%ld", (long *)raw_res);
return 0;
}
int PQEgetint(int32_t *raw_res, const PGresult *res, int tup_num, int field_num) {
char *val = PQgetvalue(res, tup_num, field_num);
if (!val) return -1;
sscanf(val, "%d", (int *)raw_res);
return 0;
}
int PQEgetfloat(float *raw_res, const PGresult *res, int tup_num, int field_num) {
char *val = PQgetvalue(res, tup_num, field_num);
if (!val) return -1;
sscanf(val, "%f", raw_res);
return 0;
}
int PQEgetdouble(double *raw_res, const PGresult *res, int tup_num, int field_num) {
char *val = PQgetvalue(res, tup_num, field_num);
if (!val) return -1;
sscanf(val, "%lf", raw_res);
return 0;
}
// Real names start with PG_DIAG_, but here we define our prefixes,
// because some are defined and some are not depending on pg version.
static int const LIBPQ_DIAG_SEVERITY = 'S';
static int const LIBPQ_DIAG_SQLSTATE = 'C';
static int const LIBPQ_DIAG_MESSAGE_PRIMARY = 'M';
static int const LIBPQ_DIAG_MESSAGE_DETAIL = 'D';
static int const LIBPQ_DIAG_MESSAGE_HINT = 'H';
static int const LIBPQ_DIAG_STATEMENT_POSITION = 'P';
static int const LIBPQ_DIAG_INTERNAL_POSITION = 'p';
static int const LIBPQ_DIAG_INTERNAL_QUERY = 'q';
static int const LIBPQ_DIAG_CONTEXT = 'W';
static int const LIBPQ_DIAG_SCHEMA_NAME = 's';
static int const LIBPQ_DIAG_TABLE_NAME = 't';
static int const LIBPQ_DIAG_COLUMN_NAME = 'c';
static int const LIBPQ_DIAG_DATATYPE_NAME = 'd';
static int const LIBPQ_DIAG_CONSTRAINT_NAME = 'n';
static int const LIBPQ_DIAG_SOURCE_FILE = 'F';
static int const LIBPQ_DIAG_SOURCE_LINE = 'L';
static int const LIBPQ_DIAG_SOURCE_FUNCTION = 'R';
''' + '''
static int const _PG_VERSION = {libpq_version};
'''.format(libpq_version=_config.libpq_version)
_or_empty = lambda x: [x] if x else []
C_SOURCE_KWARGS = dict(
libraries=[LIBRARY_NAME],
library_dirs=(
_or_empty(sysconfig.get_config_var('LIBDIR')) +
_or_empty(_config.libpq_lib_dir)
),
include_dirs=(
_or_empty(sysconfig.get_python_inc()) +
_or_empty(_config.libpq_include_dir)
)
)
if hasattr(ffi, 'set_source'):
ffi.set_source('psycopg2cffi._impl._libpq', C_SOURCE, **C_SOURCE_KWARGS)
if __name__ == '__main__':
ffi.compile()
``` |
{
"source": "josphat-mwangi/Django1",
"score": 2
} |
#### File: Django1/photos/models.py
```python
from django.db import models
# Create your models here.
class Location(models.Model):
name=models.CharField(max_length=200)
def save_location(self):
self.save()
def delete_location(self):
self.delete()
def update_location(self, update):
self.photo_location = update
self.save()
def __str__(self):
return self.name
class Category(models.Model):
name=models.CharField(max_length=200)
def save_category(self):
self.save()
def delete_category(self):
self.delete()
def update_category(self, update):
self.photo_category = update
self.save()
def __str__(self):
return self.name
class Image(models.Model):
image_name=models.CharField(max_length=100)
image_description=models.TextField()
location=models.ForeignKey('Location',on_delete=models.CASCADE)
category=models.ManyToManyField(Category)
image = models.ImageField(upload_to = 'images/', blank=True)
pub_date = models.DateTimeField(auto_now_add=True)
def save_image(self):
self.save()
def delete_image(self):
self.delete()
@classmethod
def photo_display(cls):
photos = cls.objects.filter()
return photos
@classmethod
def search_by_category(cls, search_term):
display = cls.objects.filter(category__name__icontains=search_term)
return display
@classmethod
def filter_by_location(cls, id):
images = Image.objects.filter(location_id=id)
return images
def __str__(self):
return self.image_name
``` |
{
"source": "josphat-mwangi/Instagram_clone",
"score": 2
} |
#### File: Instagram_clone/core/models.py
```python
from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
from django.utils import timezone
# Create your models here.
class Image(models.Model):
image_name = models.CharField(max_length=60)
image_caption=models.TextField()
likes=models.IntegerField(default=0)
# comments=models.TextField()
image = models.ImageField(upload_to='images/',blank=True)
pub_date = models.DateTimeField(auto_now_add=True)
profile=models.ForeignKey('Profile',on_delete=models.CASCADE)
@classmethod
def photo_display(cls):
photos = cls.objects.filter()
return photos
@classmethod
def get_single_photo(cls,id):
image = cls.objects.get(pk=id)
return image
def __str__(self):
return f'{self.image_name }'
class Profile(models.Model):
user = models.OneToOneField(User,on_delete=models.CASCADE)
profile_photo = models.ImageField(default='default.jpeg', upload_to='images/')
bio = models.CharField(max_length=500)
def save_profile(self):
self.save()
@classmethod
def search_profile(cls, name):
profile = Profile.objects.filter(user__username__icontains = name)
return profile
@classmethod
def get_by_id(cls, id):
profile = Profile.objects.get(user = id)
return profile
@classmethod
def filter_by_id(cls, id):
profile = Profile.objects.filter(user = id).first()
return profile
def __str__(self):
return f'{self.user.username} Profile'
class Comments(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
image = models.ForeignKey('Post',on_delete=models.CASCADE)
comment = models.CharField(max_length=150, blank=True)
date_commented = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['-date_commented']
def save_comment(self):
self.save()
@classmethod
def get_comments(cls,id):
comments = cls.objects.filter(image__id=id)
return comments
class Post(models.Model):
title = models.CharField(max_length=100)
image = models.ImageField(upload_to='images/')
content = models.TextField()
date_posted = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('post-detail', kwargs={'pk': self.pk})
@classmethod
def get_single_photo(cls,id):
image = cls.objects.get(pk=id)
return image
``` |
{
"source": "josphat-njoroge/greatNews",
"score": 3
} |
#### File: greatNews/tests/test_source.py
```python
import unittest
# from .models import NewsSource
from models import Source
# Source = NewsSource.Source
class NewsTest(unittest.TestCase):
"""
Test the behaviour of News class
"""
def setUp(self):
"""
Set up method that will run before every Test
"""
self.new_source = Source("abc-news","ABC_News","Your trusted source for breaking news","https://abcnews.go.com", "general","us")
def test_instance(self):
self.assertTrue(isinstance(self.new_source,Source))
def test_save_source(self):
self.new_source.save_source() # saving the new source
self.assertEqual(len(Source.source_results),1)
``` |
{
"source": "josphat-otieno/blog-post",
"score": 3
} |
#### File: blog-post/tests/test_post.py
```python
import unittest
from app.models import Post, User
class PitchTest(unittest.TestCase):
def setUp(self):
self.user_jose=User(username='oti', password_hash='<PASSWORD>', email='<EMAIL>')
self.new_post=Post(title='jose', content='race with time', user=self.user_jose)
def tearDown(self):
Post.query.delete()
User.query.delete()
def test_check_instance_variables(self):
self.assertEqual(self.new_post.title, 'jose')
self.assertEqual(self.new_post.content, 'race with time')
self.assertEqual(self.new_post.user, self.user_jose)
def test_save_post(self):
self.new_post.save_post()
self.assertTrue(len(Post.query.all())==1)
def test_get_posts(self):
self.new_post.save_post()
got_posts=Post.get_all_posts()
self.assertTrue(len(got_posts)>0)
def test_delete_post(self):
self.new_post.delete_post()
self.assertTrue(len(Post.query.all()==0))
```
#### File: blog-post/tests/test_subscriber.py
```python
from app.models import Subscriber
import unittest
class SubscriberTest(unittest.TestCase):
def setUp(self):
self.new_subscriber=Subscriber(email='<EMAIL>')
def tearDown(self):
Subscriber.query.delete()
def test_check_instance_variables(self):
self.assertEqual(self.new_subscriber.email, '<EMAIL>')
def test_save_subscriber(self):
self.new_subscriber.save_subscriber()
self.assertTrue(len(Subscriber.query.all())==1)
``` |
{
"source": "josphat-otieno/Django-Gallery",
"score": 2
} |
#### File: Django-Gallery/gallery/views.py
```python
from django.shortcuts import render,redirect
from django.http import HttpResponse, Http404
from .models import Category, Location, Images
# Create your views here.
def index(request):
images = Images.objects.all()
categories = Category.objects.all()
locations = Location.objects.all()
return render(request, 'all-gallery/index.html', {"images": images, "categories":categories, "locations":locations})
def image(request,image_id):
try:
image = Images.objects.get(id = image_id)
except Images.DoesNotExist:
raise Http404()
return render(request,"all-gallery/image.html", {"image":image})
def copy_to_clipboard(request):
pass
def search_results(request):
if 'images' in request.GET and request.GET["images"]:
search_term = request.GET.get("images")
searched_images = Images.search_image_by_category(search_term)
message = f"{search_term}"
return render(request, 'all-gallery/search.html',{"message":message,"all_images": searched_images})
else:
message = "You haven't searched for any category"
return render(request, 'all-gallery/search.html',{"message":message})
def get_category_images(request,category):
image_categories = Images.get_images_by_category(category)
return render(request,'all-gallery/category.html',{'image-categories':image_categories})
def get_images_by_location(request,location):
location_images = Images.get_images_by_location(location)
return render(request,'all-gallery/location.html',{'location_images':location_images})
``` |
{
"source": "josphat-otieno/Neighbour-Spy",
"score": 2
} |
#### File: Neighbour-Spy/spypyapp/email.py
```python
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
def send_welcome_email(name,receiver):
#creating message subject and sender
subject = "Thanks for signing up to spy on your neighbours"
sender = '<EMAIL>'
#passing in the context variables
text_context = render_to_string('email/email.txt',{"name":name})
html_content = render_to_string('email/email.html',{"name":name})
msg = EmailMultiAlternatives(subject, text_context,sender,[receiver])
msg.attach_alternative(html_content, 'text/html')
msg.send()
``` |
{
"source": "josphat-otieno/news-app",
"score": 2
} |
#### File: news-app/app/__init__.py
```python
from flask import Flask
from flask_bootstrap import Bootstrap
from config import config_options
bootstrap = Bootstrap
def create_app(config_name):
app=Flask(__name__)
# creating the app configuration
app.config.from_object(config_options[config_name])
# initialising flask extensions
bootstrap.init_app(app,app)
# registering the blueprint
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
# setting configuratons
from .requests import configue_request
configue_request(app)
return app
```
#### File: app/main/views.py
```python
from flask import render_template,request, redirect, url_for
from . import main
from ..requests import get_articles, get_news_sources,get_top_headlines, get_news_category
@main.route('/')
def index():
'''
view root function that returns the idex page and its data
'''
title="Welcome to your favorite news app"
message='Read your favorite news here'
news_sources=get_news_sources('sources')
top_headlines = get_top_headlines()
return render_template('index.html', title=title, message=message, sources=news_sources,top_headlines=top_headlines)
@main.route('/article/<id>')
def articles(id):
'''function to dsiplay articls page and its data
'''
articles = get_articles(id)
title = 'trending articles'
return render_template('article.html' ,articles=articles, title = title)
@main.route('/categories/<category_name>')
def category(category_name):
'''
function to return the categories.html page and its content
'''
category = get_news_category(category_name)
title = f'{category_name}'
cat = category_name
return render_template('categories.html',title = title,category = category, category_name=cat)
```
#### File: news-app/app/requests.py
```python
import urllib.request, json
from .models import Articles, News
# getting the api key
api_key=None
# getting the news base url
base_url= None
# getting the article base url
article_base_url=None
category_base_url=None
def configue_request(app):
global api_key,base_url,article_base_url,category_base_url
api_key = app.config['NEWS_API_KEY']
base_url =app.config['NEWS_API_BASE_URL']
article_base_url=app.config['ARTICLE_API_BASE_URL']
category_base_url=app.config['CATEGORY_API_BASE_URL']
def get_news_sources(sources):
'''
function to get json to our url requests
'''
get_news_url =base_url.format(sources,api_key)
with urllib.request.urlopen(get_news_url) as url:
get_news_data = url.read()
get_news_response=json.loads(get_news_data)
news_results=None
if get_news_response['sources']:
movie_results_list=get_news_response['sources']
news_results = process_results(movie_results_list)
return news_results
def process_results(news_list):
'''
function to process news results and transforms them to a list of objects
args:
news_list: a list of dictionaries that contains movie details
Returns:
movie_list: a list of news objects
'''
news_results=[]
for news_item in news_list:
id= news_item.get('id')
name = news_item.get('name')
url= news_item.get('url')
news_object= News(id,name,url,)
news_results.append(news_object)
return news_results
def get_articles(id):
'''
function to get jsonn object to our url request on articles
'''
get_article_url=article_base_url.format(id,api_key)
with urllib.request.urlopen(get_article_url) as url:
get_articles_data=url.read()
get_articles_reponse=json.loads(get_articles_data)
articles_results = None
if get_articles_reponse['articles']:
articles_results_list=get_articles_reponse['articles']
articles_results=process_articles(articles_results_list)
return articles_results
def process_articles(article_list):
articles_results=[]
for article_item in article_list:
name=article_item.get('name')
title = article_item.get ('title')
author = article_item.get('author')
description = article_item.get('description')
publishedAt = article_item.get('publishedAt')
urlToImage = article_item.get('urlToImage')
url = article_item.get('url')
if urlToImage:
articles_object= Articles(name,title,author,description,publishedAt,urlToImage,url,)
articles_results.append(articles_object)
return articles_results
def get_top_headlines():
'''
function that gets the response to the category json
'''
get_top_headlines_url = 'https://newsapi.org/v2/top-headlines?country=us&apiKey={}'.format(api_key)
# print(get_top_headlines_url)
with urllib.request.urlopen(get_top_headlines_url) as url:
get_top_headlines_data = url.read()
get_top_headlines_response = json.loads(get_top_headlines_data)
top_headlines_results = None
if get_top_headlines_response['articles']:
get_headlines_list = get_top_headlines_response['articles']
top_headlines_results = process_articles(get_headlines_list)
return top_headlines_results
def get_news_category(category_name):
'''
function that gets the response to the category json
'''
get_category_url = category_base_url.format(category_name,api_key)
print(get_category_url)
with urllib.request.urlopen(get_category_url) as url:
get_category_data = url.read()
get_cartegory_response = json.loads(get_category_data)
get_cartegory_results = None
if get_cartegory_response['articles']:
get_cartegory_list = get_cartegory_response['articles']
get_cartegory_results = process_articles(get_cartegory_list)
return get_cartegory_results
```
#### File: news-app/tests/test_articles.py
```python
import unittest
from app.models import Articles
class ArticlesTest(unittest.TestCase):
def setUp(self):
'''
test case to run before each test
'''
self.news_articles=Articles("jose", "Black Barbecue Gets Its Due in an Inspiring New Cookbook", "<NAME>", "The first book from renowned pitmaster...", "2021-06-05T13:00:00Z", "https://media.wired.com/photos/60ba605dd9be3d", "https://www.wired.com/story/rodney-scotts-world-of-bbq/")
def test_instance(self):
self.assertEqual(isinstance(self.news_articles, Articles))
def test_init(self):
'''
test case to confirm the object is initialised correctly
'''
self.assertEqual(self.news_articles.name, "jose")
self.assertEqual(self.news_articles.title , "Black Barbecue Gets Its Due in an Inspiring New Cookbook")
self.assertEqual(self.news_articles.author , "<NAME>")
self.assertEqual(self.news_articles.description, "The first book from renowned pitmaster...")
self.assertEqual(self.news_articles.publishedAt, "2021-06-05T13:00:00Z")
self.assertEqual(self.news_articles.urlToImage, "https://media.wired.com/photos/60ba605dd9be3d")
self.assertEqual(self.news_articles.url, "https://www.wired.com/story/rodney-scotts-world-of-bbq/")
``` |
{
"source": "josphat-otieno/Password-Locker",
"score": 4
} |
#### File: josphat-otieno/Password-Locker/password.py
```python
import string
from random import choice
class User:
'''
class that generates new instances of users
'''
user_list=[]
'''
empty user list
'''
def __init__(self, username,password):
'''
A method to define properties of a user
args:
username
password
'''
self.username=username
self.password=password
def save_user(self):
'''
save user method to save user objects
'''
User.user_list.append(self)
@classmethod
def display_users(cls):
'''
dispaly method to dispaly user in a list
'''
return cls.user_list
class Credentials:
'''
class that generates new instances of credentials
'''
credentials_list=[]
def verify_user(cls,username, password):
"""
method to verify whether the user is in our user_list or not
"""
a_user = ""
for user in User.user_list:
if(user.username == username and user.password == password):
a_user == user.username
return a_user
def __init__(self, account_name, account_username, account_password):
'''
A method to define properties of a user credenntials
args:
account_name
account_username
account_password
'''
self.account_name=account_name
self.account_username=account_username
self.account_password=<PASSWORD>_password
def save_credentials(self):
'''
method to save credentials objects into credentials list
'''
Credentials.credentials_list.append(self)
def delete_credentials(self):
'''
delete credentials method deletes credentials saved from the credentials list
'''
Credentials.credentials_list.remove(self)
@classmethod
def find_credentials(cls, account_name):
'''
Method takes in a acount name and returns the credentials that matches that account.
Args:
acoount name: account name to search for
Returns :
returns the credentials that matches that account name.
'''
for credential in cls.credentials_list:
if credential.account_name == account_name:
return credential
@classmethod
def credentials_exists(cls, account_name):
'''
a method that checks if the credentials exists
from the credentials_list
args: account_name and a boolean
'''
for credential in cls.credentials_list:
if credential.account_name==account_name:
return True
return False
@classmethod
def display_credentials(cls):
'''
method to return all credentials saved from credentials list
'''
return cls.credentials_list
@classmethod
def generate_password(cls):
'''
Method that generates a random alphanumeric password
'''
size = 8
alphanum = string.ascii_uppercase + string.digits + string.ascii_lowercase
random_password = ''.join( choice(alphanum) for num in range(size))
return random_password
``` |
{
"source": "josphat-otieno/pitch-app",
"score": 3
} |
#### File: pitch-app/tests/test_comment.py
```python
import unittest
from app.models import Comments, User
from app import db
class CommentsTest(unittest.TestCase):
def setUp(self):
self.user_jose=User(username='kevin', password='<PASSWORD>', email = '<EMAIL>')
self.new_comment=Comments( pitch_id=1, comment='realy', user=self.user_jose)
def tearDown(self):
User.query.delete()
Comments.query.delete()
def check_instance_variables(self):
self.assertEqual(self.new_comment.comment, 'realy')
# self.assertEqual(self.new_pitch.user, self.user_jose)
self.assertEqual(self.new_comment.pitch_id, 1)
def test_save_comments(self):
self.new_comment.save_comments()
self.assertTrue(len(Comments.query.all())>0)
def test_get_comments(self):
self.new_comment.save_comments()
got_comments= Comments.get_comments(1)
self.assertTrue(len(got_comments)>0)
``` |
{
"source": "JosPolfliet/snippets",
"score": 3
} |
#### File: JosPolfliet/snippets/crawl-twitter.py
```python
import csv, codecs, cStringIO
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
# In[38]:
import tweepy
import csv
consumer_key = ''
consumer_secret = ''
access_token_key = ''
access_token_secret = ''
# Bounding boxes for geolocations
# Online-Tool to create boxes (c+p as raw CSV): http://boundingbox.klokantech.com/
GEOBOX_WORLD = [-180,-90,180,90]
GEOBOX_GERMANY = [5.0770049095, 47.2982950435, 15.0403900146, 54.9039819757]
GEOBOX_BELGIUM = [2.5214, 49.4753, 6.3776, 51.5087]
GEOCIRCLE_BELGIUM="50.56928286558243,4.7021484375,125km"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token_key, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
with open('tweets{}.csv'.format(time.strftime("%Y%m%d%H%M%S")), 'wb') as csvFile:
csvWriter = UnicodeWriter(csvFile)
csvWriter.writerow(["tweet_id","created_at","text","user_name",
"user_id",'user_screen_name','user_followers_count',
"favorite_count", "retweet_count", "is_quote_status", 'geo', 'lang'])
for tweet in tweepy.Cursor(api.search,q="*",geocode=GEOCIRCLE_BELGIUM).items(10):
csvWriter.writerow([tweet.id_str, str(tweet.created_at),
tweet.text, #.encode("utf-8"),
tweet.user.name,
str(tweet.user.id),
tweet.user.screen_name,
str(tweet.user.followers_count),
str(tweet.favorite_count),
str(tweet.retweet_count),
str(tweet.is_quote_status),
str(tweet.geo),
tweet.lang])
``` |
{
"source": "josrangel/flask101",
"score": 3
} |
#### File: josrangel/flask101/app.py
```python
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route("/") #Raiz del sitio
def index(): #La funcion index indica que sera la funcion principal
return render_template("index.html")
@app.route("/saludo") #Raiz del sitio
def saludo():
return "Hola ;V"
@app.route("/saludo/<nombre>") #parametros por url
def saludoNombre(nombre):
return render_template("saludo.html", nombre=nombre) #Se el pasa al template el parametro
@app.route("/funcion",methods=['POST']) #peticion tipo POST
def funcionApi():
funcion=request.form["funcion"]#Se accede al valor del form-data
return render_template("funcion.html", funcion=funcion) #Se el pasa al template el parametro
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port =5000)#port =5000, #El modo debug sirve para que se actualizen los cambios sin tener que detener el server manualmente
print("ESTOY VIVO!!!")
``` |
{
"source": "josrolgil/exjobbCalvin",
"score": 2
} |
#### File: systemactors/io/GPIOPWM.py
```python
from calvin.actor.actor import Actor, ActionResult, manage, condition
class GPIOPWM(Actor):
"""
GPIO pulse width modulation on <pin>.
Input:
dutycycle : change dutycycle
frequency : change frequency
"""
@manage(["gpio_pin", "frequency", "dutycycle"])
def init(self, gpio_pin, frequency, dutycycle):
self.gpio_pin = gpio_pin
self.frequency = frequency
self.dutycycle = dutycycle
self.setup()
def setup(self):
self.use("calvinsys.io.gpiohandler", shorthand="gpiohandler")
self.gpio = self["gpiohandler"].open(self.gpio_pin, "o")
self.gpio.pwm_start(self.frequency, self.dutycycle)
def will_migrate(self):
self.gpio.pwm_stop()
self.gpio.close()
def will_end(self):
self.gpio.pwm_stop()
self.gpio.close()
def did_migrate(self):
self.setup()
@condition(action_input=("dutycycle",))
def set_dutycycle(self, dutycycle):
self.gpio.pwm_set_dutycycle(dutycycle)
return ActionResult()
@condition(action_input=("frequency",))
def set_frequency(self, frequency):
self.gpio.pwm_set_frequency(frequency)
return ActionResult()
action_priority = (set_dutycycle, set_frequency)
requires = ["calvinsys.io.gpiohandler"]
```
#### File: systemactors/io/SerialPort.py
```python
from calvin.actor.actor import Actor, ActionResult, manage, condition, guard
from calvin.utilities.calvinlogger import get_logger
from calvin.runtime.north.calvin_token import ExceptionToken
from serial import PARITY_NONE, STOPBITS_ONE, EIGHTBITS
_log = get_logger(__name__)
class SerialPort(Actor):
"""
Read/write data from serial port.
inputs:
in : Tokens to write.
Outputs:
out : Tokens read.
"""
@manage(['devicename', 'baudrate', 'bytesize', 'parity', 'stopbits', 'timeout', 'xonxoff', 'rtscts'])
def init(self, devicename, baudrate, bytesize=EIGHTBITS, parity=PARITY_NONE, stopbits=STOPBITS_ONE, timeout=0, xonxoff=0, rtscts=0):
self.not_found = False
self.devicename = devicename
self.baudrate = baudrate
try:
self.device = self.calvinsys.io.serialport.open(
devicename,
baudrate,
bytesize,
parity,
stopbits,
timeout,
xonxoff,
rtscts)
except:
self.device = None
self.not_found = True
@condition([], ['out'])
@guard(lambda self: self.not_found)
def device_not_found(self):
token = ExceptionToken(value="Device not found")
self.not_found = False # Only report once
return ActionResult(production=(token, ))
@condition([], ['out'])
@guard(lambda self: self.device and self.device.has_data())
def read(self):
data = self.device.read()
return ActionResult(production=(data, ))
@condition(action_input=['in'])
@guard(lambda self, _: self.device)
def write(self, data):
self.device.write(str(data))
return ActionResult(production=())
action_priority = (device_not_found, read, write)
```
#### File: systemactors/io/StandardOut.py
```python
from calvin.actor.actor import Actor, ActionResult, manage, condition
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
class StandardOut(Actor):
"""
Write tokens to standard output
Input:
token : Any token
"""
def exception_handler(self, action, args, context):
# Check args to verify that it is EOSToken
return action(self, *args)
@manage(['tokens', 'store_tokens', 'quiet'])
def init(self, store_tokens=False, quiet=False):
self.store_tokens = store_tokens
self.tokens = []
self.quiet = quiet
self.setup()
def did_migrate(self):
self.setup()
def setup(self):
if self.quiet:
self.logger = _log.debug
else:
self.logger = _log.info
@condition(action_input=['token'])
def log(self, token):
if self.store_tokens:
self.tokens.append(token)
self.logger("%s<%s>: %s" % (self.__class__.__name__, self.id, str(token).strip()))
return ActionResult()
action_priority = (log, )
def report(self):
return self.tokens
test_kwargs = {'store_tokens': True}
test_set = [
{
'in': {'token': ['aa', 'ba', 'ca', 'da']},
'out': {},
'postcond': [lambda self: self.tokens == ['aa', 'ba', 'ca', 'da']]
}
]
```
#### File: systemactors/net/HTTPGet.py
```python
from calvin.actor.actor import Actor, ActionResult, manage, condition, guard
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
class HTTPGet(Actor):
"""
Get contents of URL
Input:
URL : URL to get
params : Optional parameters to request as a JSON dictionary
header: JSON dictionary with headers to include in request
Output:
status: 200/404/whatever
header: JSON dictionary of incoming headers
data : body of request
"""
@manage()
def init(self):
self.setup()
def did_migrate(self):
self.setup()
def setup(self):
self.request = None
self.reset_request()
self.use('calvinsys.network.httpclienthandler', shorthand='http')
def reset_request(self):
if self.request:
self['http'].finalize(self.request)
self.request = None
self.received_headers = False
@condition(action_input=['URL', 'params', 'header'])
@guard(lambda self, url, params, header: self.request is None)
def new_request(self, url, params, header):
url = url.encode('ascii', 'ignore')
self.request = self['http'].get(url, params, header)
return ActionResult()
@condition(action_output=['status', 'header'])
@guard(lambda self: self.request and not self.received_headers and self['http'].received_headers(self.request))
def handle_headers(self):
self.received_headers = True
status = self['http'].status(self.request)
headers = self['http'].headers(self.request)
return ActionResult(production=(status, headers))
@condition(action_output=['data'])
@guard(lambda self: self.received_headers and self['http'].received_body(self.request))
def handle_body(self):
body = self['http'].body(self.request)
self.reset_request()
return ActionResult(production=(body,))
@condition()
@guard(lambda self: self.received_headers and self['http'].received_empty_body(self.request))
def handle_empty_body(self):
self.reset_request()
return ActionResult()
action_priority = (handle_body, handle_empty_body, handle_headers, new_request)
requires = ['calvinsys.network.httpclienthandler']
```
#### File: systemactors/net/UDPSender.py
```python
from calvin.actor.actor import Actor, ActionResult, manage, condition, guard
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
class UDPSender(Actor):
"""
Send all incoming tokens to given address/port over UDP
Control port takes control commands of the form (uri only applicable for connect.)
{
"command" : "connect"/"disconnect",
"uri": "udp://<address>:<port>"
}
Input:
data_in : Each received token will be sent to address set via control port
control_in : Control port
"""
@manage(['address', 'port'])
def init(self):
self.address = None
self.port = None
self.sender = None
self.setup()
def connect(self):
self.sender = self['socket'].connect(self.address, self.port, connection_type="UDP")
def will_migrate(self):
if self.sender:
self.sender.disconnect()
def did_migrate(self):
self.setup()
if self.address is not None:
self.connect()
def setup(self):
self.use('calvinsys.network.socketclienthandler', shorthand='socket')
self.use('calvinsys.native.python-re', shorthand='regexp')
@condition(action_input=['data_in'])
@guard(lambda self, token: self.sender)
def send(self, token):
self.sender.send(token)
return ActionResult(production=())
# URI parsing - 0: protocol, 1: host, 2: port
URI_REGEXP = r'([^:]+)://([^/:]*):([0-9]+)'
def parse_uri(self, uri):
status = False
try:
parsed_uri = self['regexp'].findall(self.URI_REGEXP, uri)[0]
protocol = parsed_uri[0]
if protocol != 'udp':
_log.warn("Protocol '%s' not supported, assuming udp" % (protocol,))
self.address = parsed_uri[1]
self.port = int(parsed_uri[2])
status = True
except:
_log.warn("malformed or erroneous control uri '%s'" % (uri,))
self.address = None
self.port = None
return status
@condition(action_input=['control_in'])
@guard(lambda self, control: control.get('command', '') == 'connect' and not self.sender)
def new_connection(self, control):
print control
if self.parse_uri(control.get('uri', '')):
self.connect()
return ActionResult(production=())
@condition(action_input=['control_in'])
@guard(lambda self, control: control.get('control', '') == 'disconnect' and self.sender)
def close_connection(self, control):
self.sender.disconnect()
del self.sender
self.sender = None
return ActionResult(production=())
action_priority = (new_connection, close_connection, send)
requires = ['calvinsys.network.socketclienthandler', 'calvinsys.native.python-re', 'calvinsys.native.python-json']
```
#### File: systemactors/std/RecTimer.py
```python
from calvin.actor.actor import Actor, ActionResult, manage, condition, guard
class RecTimer(Actor):
"""
Pass input after a given delay
Input :
token : anything
Outputs:
token : anything
"""
@manage(['delay'])
def init(self, delay=0.1):
self.delay = delay
self.use('calvinsys.events.timer', shorthand='timer')
self.setup()
def setup(self):
self.timer = self['timer'].repeat(self.delay)
def will_migrate(self):
self.timer.cancel()
def did_migrate(self):
self.setup()
@condition(['token'], ['token'])
@guard(lambda self, _: self.timer and self.timer.triggered)
def flush(self, input):
return ActionResult(production=(input, ))
@condition()
@guard(lambda self: self.timer and self.timer.triggered)
def clear(self):
self.timer.ack()
return ActionResult()
action_priority = (flush, clear)
requires = ['calvinsys.events.timer']
test_args = [1]
# Trigger a timer then add tokens. The tokens shall wait for the next trigger.
test_set = [
{
'setup': [lambda self: self.timer.trigger()],
'in': {'token': []}, 'out': {'token': []}
}
]
# Add tokens, nothing returned since timer not triggered above shall have cleared.
test_set += [
{'in': {'token': [r]}, 'out': {'token': []}} for r in range(3)
]
# Trigger the timer once then fetch three tokens.
# All tokens shall be flushed.
test_set += [
{
'setup': [lambda self: self.timer.trigger()],
'in': {'token': []}, 'out': {'token': [0]}
},
{'in': {'token': []}, 'out': {'token': [1]}},
{'in': {'token': []}, 'out': {'token': [2]}}
]
```
#### File: systemactors/text/RegexMatch.py
```python
from calvin.actor.actor import Actor, ActionResult, manage, condition, guard
class RegexMatch(Actor):
"""
Apply the regex supplied as argument to the incoming text.
If the regex matches, the text is routed to the 'match' output,
otherwise it is routed to the 'no_match' output.
If a (single) capture group is present, the captured result will
be routed to 'match' instead of the full match, but it the match
fails, the full input text will be routed to 'no_match' just as
if no capture group was present. Any additional capture groups
will be ignored.
Inputs:
text : text to match
Outputs:
match : matching text or capture if capture group present
no_match : input text if match fails
"""
@manage(['regex', 'result', 'did_match'])
def init(self, regex):
self.regex = regex
self.result = None
self.did_match = False
self.use('calvinsys.native.python-re', shorthand='re')
def perform_match(self, text):
m = self['re'].match(self.regex, text)
self.did_match = m is not None
self.result = m.groups()[0] if m and m.groups() else text
@condition(['text'], [])
@guard(lambda self, text: self.result is None)
def match(self, text):
self.perform_match(str(text))
return ActionResult()
@condition([], ['match'])
@guard(lambda self: self.result is not None and self.did_match)
def output_match(self):
result = self.result
self.result = None
return ActionResult(production=(result,))
@condition([], ['no_match'])
@guard(lambda self: self.result is not None and not self.did_match)
def output_no_match(self):
result = self.result
self.result = None
return ActionResult(production=(result,))
action_priority = (match, output_match, output_no_match)
requires = ['calvinsys.native.python-re']
test_args = [".* (FLERP).* "]
test_set = [
{'in': {'text': ["This is a test FLERP please ignore"]},
'out': {'match': ['FLERP'], 'no_match':[]}
},
{'in': {'text': ["This is a test please ignore"]},
'out': {'match': [], 'no_match':["This is a test please ignore"]}
}
]
```
#### File: calvinsys/io/serialporthandler.py
```python
import os.path
import os
from calvin.runtime.south.plugins.async import serialport
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
class SerialPort(object):
def __init__(self, devicename, baudrate, bytesize, parity, stopbits, timeout, xonxoff, rtscts, trigger, actor_id):
self.actor_id = actor_id
self.port = serialport.SP(
devicename,
baudrate,
bytesize,
parity,
stopbits,
timeout,
xonxoff,
rtscts,
trigger,
actor_id)
def write(self, data):
self.port.write(data)
def read(self):
return self.port.read()
def has_data(self):
return self.port.hasData()
def close(self):
self.port.close()
class SerialPortHandler(object):
def __init__(self, node, actor):
self.node = node
self._actor = actor
def open(self, devicename, baudrate, bytesize, parity, stopbits, timeout, xonxoff, rtscts):
if not os.path.exists(devicename):
raise Exception("Device not found")
return SerialPort(
devicename,
baudrate,
bytesize,
parity,
stopbits,
timeout,
xonxoff,
rtscts,
self.node.sched.trigger_loop,
self._actor.id)
def close(self, port):
port.close()
def register(node, actor):
"""
Called when the system object is first created.
"""
return SerialPortHandler(node, actor)
```
#### File: calvinsys/media/image.py
```python
from calvin.runtime.south.plugins.media import image
class Image(object):
"""
Image object
"""
def __init__(self):
"""
Initialize
"""
self.image = image.Image()
def show_image(self, image, width, height):
"""
Show image
"""
self.image.show_image(image, width, height)
def detect_face(self, image):
"""
Return True if face detected in image
"""
return self.image.detect_face(image)
def close(self):
"""
Close display
"""
self.image.close()
def register(node=None, actor=None):
"""
Called when the system object is first created.
"""
return Image()
```
#### File: calvin/csparser/parser.py
```python
import os
import ply.lex as lex
import ply.yacc as yacc
import calvin_rules
from calvin_rules import tokens
class CalvinSyntaxError(Exception):
def __init__(self, message, token):
super(CalvinSyntaxError, self).__init__(message)
self.token = token
class CalvinEOFError(Exception):
def __init__(self, message):
super(CalvinEOFError, self).__init__(message)
def p_script(p):
"""script : constdefs compdefs opt_program"""
p[0] = {'constants': p[1], 'components': p[2], 'structure': p[3]}
def p_constdefs(p):
"""constdefs :
| constdefs constdef
| constdef"""
if len(p) == 3:
p[1].update(p[2])
p[0] = p[1] if len(p) > 1 else {}
def p_constdef(p):
"""constdef : DEFINE IDENTIFIER EQ argument"""
constdef = {p[2]: p[4]}
p[0] = constdef
def p_compdefs(p):
"""compdefs :
| compdefs compdef
| compdef"""
if len(p) == 3:
p[1].update(p[2])
p[0] = p[1] if len(p) > 1 else {}
def p_compdef(p):
"""compdef : COMPONENT qualified_name LPAREN identifiers RPAREN identifiers RARROW identifiers LBRACE docstring program RBRACE"""
name = p[2]
arg_ids = p[4]
inputs = p[6]
outputs = p[8]
docstring = p[10]
structure = p[11]
comp = {
'name': name,
'inports': inputs,
'outports': outputs,
'arg_identifiers': arg_ids,
'structure': structure,
'docstring': docstring,
'dbg_line': p.lineno(2)
}
p[0] = {name: comp}
def p_docstring(p):
"""docstring :
| DOCSTRING """
if len(p) == 1:
p[0] = "Someone(TM) should write some documentation for this component."
else:
p[0] = p[1]
def p_opt_program(p):
"""opt_program :
| program"""
if len(p) == 1:
p[0] = {'connections': [], 'actors': {}}
else:
p[0] = p[1]
def p_program(p):
"""program : program statement
| statement """
if len(p) == 3:
# Update dict
# p[1] is dict and p[2] is tuple (assignment|link, statement)
kind, stmt = p[2]
if kind is 'link':
p[1]['connections'].append(stmt)
else:
p[1]['actors'].update(stmt)
p[0] = p[1]
else:
# Create dict, p[1] is tuple (assignment|link, statement)
kind, stmt = p[1]
if kind is 'link':
p[0] = {'connections': [stmt], 'actors': {}}
else:
p[0] = {'connections': [], 'actors': stmt}
def p_statement(p):
"""statement : assignment
| link"""
p[0] = p[1]
def p_assignment(p):
"""assignment : IDENTIFIER COLON qualified_name LPAREN named_args RPAREN"""
p[0] = ('assignment', {p[1]: {'actor_type': p[3], 'args': p[5], 'dbg_line': p.lineno(2)}})
def p_link(p):
"""link : port GT port
| argument GT port"""
kind, value = p[1]
(src, port) = value if kind == 'PORT' else (None, (kind, value))
d = {}
d['src'] = src
d['src_port'] = port
_, (dst, port) = p[3]
d['dst'] = dst
d['dst_port'] = port
d['dbg_line'] = p.lineno(2)
p[0] = ('link', d)
def p_port(p):
"""port : IDENTIFIER DOT IDENTIFIER
| DOT IDENTIFIER"""
p[0] = ('PORT', (p[1], p[2] if len(p) == 3 else p[3]))
def p_named_args(p):
"""named_args :
| named_args named_arg COMMA
| named_args named_arg"""
if len(p) > 2:
p[1].update(p[2])
p[0] = p[1] if len(p) > 1 else {}
def p_named_arg(p):
"""named_arg : IDENTIFIER EQ argument"""
p[0] = {p[1]: p[3]}
def p_argument(p):
"""argument : value
| IDENTIFIER"""
p[0] = (p.slice[1].type.upper(), p[1])
def p_value(p):
"""value : dictionary
| array
| bool
| null
| NUMBER
| STRING"""
p[0] = p[1]
def p_bool(p):
"""bool : TRUE
| FALSE"""
p[0] = bool(p.slice[1].type == 'TRUE')
def p_null(p):
"""null : NULL"""
p[0] = None
def p_dictionary(p):
"""dictionary : LBRACE members RBRACE"""
p[0] = dict(p[2])
def p_members(p):
"""members :
| members member COMMA
| members member"""
if len(p) == 1:
p[0] = list()
else:
p[1].append(p[2])
p[0] = p[1]
def p_member(p):
"""member : STRING COLON value"""
p[0] = (p[1], p[3])
def p_values(p):
"""values :
| values value COMMA
| values value"""
if len(p) == 1:
p[0] = list()
else:
p[1].append(p[2])
p[0] = p[1]
def p_array(p):
"""array : LBRACK values RBRACK"""
p[0] = p[2]
# def p_opt_id_list(p):
# """opt_id_list :
# | id_list"""
# if len(p) == 1:
# p[0] = []
# else:
# p[0] = p[1]
def p_identifiers(p):
"""identifiers :
| identifiers IDENTIFIER COMMA
| identifiers IDENTIFIER"""
if len(p) > 2:
p[1].append(p[2])
p[0] = p[1] if len(p) > 1 else []
def p_qualified_name(p):
"""qualified_name : qualified_name DOT IDENTIFIER
| IDENTIFIER"""
if len(p) == 4:
# Concatenate name
p[0] = p[1] + p[2] + p[3]
else:
p[0] = p[1]
# Error rule for syntax errors
def p_error(p):
if not p:
raise CalvinEOFError("Unexpected end of file.")
else:
raise CalvinSyntaxError("Syntax error.", p)
def _calvin_parser():
lexer = lex.lex(module=calvin_rules)
lexer.zerocol = 0
# Since the parse may be called from other scripts, we want to have control
# over where parse tables (and parser.out log) will be put if the tables
# have to be recreated
this_file = os.path.realpath(__file__)
containing_dir = os.path.dirname(this_file)
parser = yacc.yacc(debug=False, optimize=True, outputdir=containing_dir)
return parser
# Compute column.
# input is the input text string
# token is a token instance
def _find_column(input, token):
last_cr = input.rfind('\n', 0, token.lexpos)
if last_cr < 0:
last_cr = 0
column = (token.lexpos - last_cr) + 1
return column
def calvin_parser(source_text, source_file=''):
parser = _calvin_parser()
result = {}
# Until there is error recovery, there will only be a single error at a time
errors = []
try:
result = parser.parse(source_text)
except CalvinSyntaxError as e:
error = {
'reason': str(e),
'line': e.token.lexer.lineno,
'col': _find_column(source_text, e.token)
}
errors.append(error)
except CalvinEOFError as e:
lines = source_text.splitlines()
error = {
'reason': str(e),
'line': len(lines),
'col': len(lines[-1])
}
errors.append(error)
result['sourcefile'] = source_file
warnings = []
return result, errors, warnings
if __name__ == '__main__':
import sys
import json
if len(sys.argv) < 2:
script = 'inline'
source_text = \
''' # Test script
define FOO = true
define BAR = false
# define BAZ = 43
component Count(len) -> a,b,seq {
"""FOO"""
src : std.Constant(data="hup", n=len)
src.token > .seq
}
# component Count2(len) -> seq {
# src : std.Constant(data="hup", n=len)
# src.token > .seq
# }
#
src: Count(len=5)
snk : io.StandardOut()
42 > snk.token
'''
else:
script = sys.argv[1]
script = os.path.expanduser(script)
try:
with open(script, 'r') as source:
source_text = source.read()
except:
print "Error: Could not read file: '%s'" % script
sys.exit(1)
result, errors, warnings = calvin_parser(source_text, script)
if errors:
print "{reason} {script} [{line}:{col}]".format(script=script, **errors[0])
else:
print(json.dumps(result, indent=4, sort_keys=True))
```
#### File: devactors/sensor/Environmental.py
```python
from calvin.actor.actor import Actor, ActionResult, condition
class Environmental(Actor):
"""
Output temperature, humidity and pressure from sensor
Inputs:
trigger: Trigger reading
Outputs:
data: Sensor data as string (T:x H:x P:p)
"""
def init(self):
self.setup()
def setup(self):
self.use("calvinsys.sensors.environmental", shorthand="sensor")
self.sensor = self["sensor"]
def did_migrate(self):
self.setup()
@condition(action_input=["trigger"], action_output=["data"])
def get_data(self, input):
data = "T:%s H:%s P:%s" % (int(self.sensor.get_temperature()),
int(self.sensor.get_humidity()),
int(self.sensor.get_pressure()))
return ActionResult(production=(data, ))
action_priority = (get_data, )
requires = ["calvinsys.sensors.environmental"]
```
#### File: actors/http/HTTPResponseGenerator.py
```python
from calvin.actor.actor import Actor, ActionResult, manage, condition, guard
from string import Template
class HTTPResponseGenerator(Actor):
"""
Read status and optional body (if status = 200) and generate a HTML response
Inputs:
status : HTTP status 200/400/404/501
body : HTML text
Outputs:
out : Properly formatted HTML response
"""
STATUSMAP = {
200: "OK",
400: "Bad Request",
404: "Not Found",
501: "Not Implemented"
}
HEADER_TEMPLATE = Template("Content-Type: text/html\r\nContent-Length: $length")
RESPONSE_TEMPLATE = Template("HTTP/1.0 $status $reason\r\n$header\r\n\r\n$body\r\n\r\n")
ERROR_BODY = Template("<html><body>$reason ($status)</body></html>")
@manage()
def init(self):
pass
@condition(['status', 'body'], ['out'])
@guard(lambda self, status, body : status == 200)
def ok(self, status, body):
header = self.HEADER_TEMPLATE.substitute(
length=len(body)
)
response = self.RESPONSE_TEMPLATE.substitute(
header=header,
status=status,
reason=self.STATUSMAP.get(status, "Unknown"),
body=body
)
return ActionResult(production=(response, ))
@condition(['status', 'body'], ['out'])
@guard(lambda self, status, body : status != 200)
def error(self, status, body):
body = self.ERROR_BODY.substitute(
status=status,
reason=self.STATUSMAP.get(status, "Unknown")
)
header = self.HEADER_TEMPLATE.substitute(
length=len(body)
)
response = self.RESPONSE_TEMPLATE.substitute(
header=header,
status=status,
reason=self.STATUSMAP.get(status, "Unknown"),
body=body
)
return ActionResult(production=(response, ))
action_priority = (ok, error)
```
#### File: runtime/north/calvin_node.py
```python
from multiprocessing import Process
# For trace
import sys
import trace
import logging
from calvin.calvinsys import Sys as CalvinSys
from calvin.runtime.north import actormanager
from calvin.runtime.north import appmanager
from calvin.runtime.north import scheduler
from calvin.runtime.north import storage
from calvin.runtime.north import calvincontrol
from calvin.runtime.north import metering
from calvin.runtime.north.calvin_network import CalvinNetwork
from calvin.runtime.north.calvin_proto import CalvinProto
from calvin.runtime.north.portmanager import PortManager
from calvin.runtime.south.monitor import Event_Monitor
from calvin.runtime.south.plugins.async import async
from calvin.utilities.attribute_resolver import AttributeResolver
from calvin.utilities.calvin_callback import CalvinCB
from calvin.utilities.security import security_modules_check
from calvin.utilities.authorization.policy_decision_point import PolicyDecisionPoint
from calvin.utilities import authorization
from calvin.utilities import calvinuuid
from calvin.utilities import certificate
from calvin.utilities.calvinlogger import get_logger
from calvin.utilities import calvinconfig
_log = get_logger(__name__)
_conf = calvinconfig.get()
def addr_from_uri(uri):
_, host = uri.split("://")
addr, _ = host.split(":")
return addr
class Node(object):
"""A node of calvin
the uri is a list of server connection points
the control_uri is the local console
attributes is a supplied list of external defined attributes that will be used as the key when storing index
such as name of node
authz_server is True if the runtime can act as an authorization server
"""
def __init__(self, uri, control_uri, attributes=None, authz_server=False):
super(Node, self).__init__()
self.uri = uri
self.control_uri = control_uri
self.external_uri = attributes.pop('external_uri', self.uri) \
if attributes else self.uri
self.external_control_uri = attributes.pop('external_control_uri', self.control_uri) \
if attributes else self.control_uri
try:
self.attributes = AttributeResolver(attributes)
except:
_log.exception("Attributes not correct, uses empty attribute!")
self.attributes = AttributeResolver(None)
try:
self.sec_conf = _conf.get("security","security_conf")
if authz_server or self.sec_conf['authorization']['procedure'] == "local":
self.pdp = PolicyDecisionPoint(self.sec_conf['authorization'])
except:
self.sec_conf = None
# Obtain node id, when using security also handle runtime certificate
self.id = certificate.obtain_cert_node_info(self.attributes.get_node_name_as_str())['id']
self.metering = metering.set_metering(metering.Metering(self))
self.monitor = Event_Monitor()
self.am = actormanager.ActorManager(self)
self.control = calvincontrol.get_calvincontrol()
_scheduler = scheduler.DebugScheduler if _log.getEffectiveLevel() <= logging.DEBUG else scheduler.Scheduler
self.sched = _scheduler(self, self.am, self.monitor)
self.async_msg_ids = {}
self._calvinsys = CalvinSys(self)
# Default will multicast and listen on all interfaces
# TODO: be able to specify the interfaces
# @TODO: Store capabilities
self.storage = storage.Storage(self)
self.network = CalvinNetwork(self)
self.proto = CalvinProto(self, self.network)
self.pm = PortManager(self, self.proto)
self.app_manager = appmanager.AppManager(self)
# The initialization that requires the main loop operating is deferred to start function
async.DelayedCall(0, self.start)
def insert_local_reply(self):
msg_id = calvinuuid.uuid("LMSG")
self.async_msg_ids[msg_id] = None
return msg_id
def set_local_reply(self, msg_id, reply):
if msg_id in self.async_msg_ids:
self.async_msg_ids[msg_id] = reply
def connect(self, actor_id=None, port_name=None, port_dir=None, port_id=None,
peer_node_id=None, peer_actor_id=None, peer_port_name=None,
peer_port_dir=None, peer_port_id=None, cb=None):
self.pm.connect(actor_id=actor_id,
port_name=port_name,
port_dir=port_dir,
port_id=port_id,
peer_node_id=peer_node_id,
peer_actor_id=peer_actor_id,
peer_port_name=peer_port_name,
peer_port_dir=peer_port_dir,
peer_port_id=peer_port_id,
callback=CalvinCB(self.logging_callback, preamble="connect cb") if cb is None else cb)
def disconnect(self, actor_id=None, port_name=None, port_dir=None, port_id=None, cb=None):
_log.debug("disconnect(actor_id=%s, port_name=%s, port_dir=%s, port_id=%s)" %
(actor_id if actor_id else "", port_name if port_name else "",
port_dir if port_dir else "", port_id if port_id else ""))
self.pm.disconnect(actor_id=actor_id, port_name=port_name,
port_dir=port_dir, port_id=port_id,
callback=CalvinCB(self.logging_callback, preamble="disconnect cb") if cb is None else cb)
def peersetup(self, peers, cb=None):
""" Sets up a RT to RT communication channel, only needed if the peer can't be found in storage.
peers: a list of peer uris, e.g. ["calvinip://127.0.0.1:5001"]
"""
_log.debug("peersetup(%s)" % (peers))
peers_copy = peers[:]
peer_node_ids = {}
if not cb:
callback = CalvinCB(self.logging_callback, preamble="peersetup cb")
else:
callback = CalvinCB(self.peersetup_collect_cb, peers=peers_copy, peer_node_ids=peer_node_ids, org_cb=cb)
self.network.join(peers, callback=callback)
def peersetup_collect_cb(self, status, uri, peer_node_id, peer_node_ids, peers, org_cb):
if uri in peers:
peers.remove(uri)
peer_node_ids[uri] = (peer_node_id, status)
if not peers:
# Get highest status, i.e. any error
comb_status = max([s for _, s in peer_node_ids.values()])
org_cb(peer_node_ids=peer_node_ids, status=comb_status)
def logging_callback(self, preamble=None, *args, **kwargs):
_log.debug("\n%s# NODE: %s \n# %s %s %s \n%s" %
('#' * 40, self.id, preamble if preamble else "*", args, kwargs, '#' * 40))
def new(self, actor_type, args, deploy_args=None, state=None, prev_connections=None, connection_list=None):
# TODO requirements should be input to am.new
actor_id = self.am.new(actor_type, args, state, prev_connections, connection_list,
signature=deploy_args['signature'] if deploy_args and 'signature' in deploy_args else None,
credentials=deploy_args['credentials'] if deploy_args and 'credentials' in deploy_args else None)
if deploy_args:
app_id = deploy_args['app_id']
if 'app_name' not in deploy_args:
app_name = app_id
else:
app_name = deploy_args['app_name']
self.app_manager.add(app_id, actor_id,
deploy_info = deploy_args['deploy_info'] if 'deploy_info' in deploy_args else None)
return actor_id
def calvinsys(self):
"""Return a CalvinSys instance"""
# FIXME: We still need to sort out actor requirements vs. node capabilities and user permissions.
# @TODO: Write node capabilities to storage
return self._calvinsys
#
# Event loop
#
def run(self):
"""main loop on node"""
_log.debug("Node %s is running" % self.id)
self.sched.run()
def start(self):
""" Run once when main loop is started """
interfaces = _conf.get(None, 'transports')
self.network.register(interfaces, ['json'])
self.network.start_listeners(self.uri)
# Start storage after network, proto etc since storage proxy expects them
self.storage.start()
self.storage.add_node(self)
if hasattr(self, "pdp"):
self.storage.add_authz_server(self)
if self.sec_conf and "authorization" in self.sec_conf:
authorization.register_node(self)
# Start control API
proxy_control_uri = _conf.get(None, 'control_proxy')
_log.debug("Start control API on %s with uri: %s and proxy: %s" % (self.id, self.control_uri, proxy_control_uri))
if proxy_control_uri is not None:
self.control.start(node=self, uri=proxy_control_uri, tunnel=True)
else:
if self.control_uri is not None:
self.control.start(node=self, uri=self.control_uri)
def stop(self, callback=None):
def stopped(*args):
_log.analyze(self.id, "+", {'args': args})
_log.debug(args)
self.sched.stop()
_log.analyze(self.id, "+ SCHED STOPPED", {'args': args})
self.control.stop()
_log.analyze(self.id, "+ CONTROL STOPPED", {'args': args})
def deleted_node(*args, **kwargs):
_log.analyze(self.id, "+", {'args': args, 'kwargs': kwargs})
self.storage.stop(stopped)
_log.analyze(self.id, "+", {})
# FIXME: this function is never called when the node quits
if hasattr(self, "pdp"):
self.storage.delete_authz_server(self)
self.storage.delete_node(self, cb=deleted_node)
def create_node(uri, control_uri, attributes=None, authz_server=False):
n = Node(uri, control_uri, attributes, authz_server)
n.run()
_log.info('Quitting node "%s"' % n.uri)
def create_tracing_node(uri, control_uri, attributes=None, authz_server=False):
"""
Same as create_node, but will trace every line of execution.
Creates trace dump in output file '<host>_<port>.trace'
"""
n = Node(uri, control_uri, attributes, authz_server)
_, host = uri.split('://')
with open("%s.trace" % (host, ), "w") as f:
tmp = sys.stdout
# Modules to ignore
ignore = [
'fifo', 'calvin', 'actor', 'pickle', 'socket',
'uuid', 'codecs', 'copy_reg', 'string_escape', '__init__',
'colorlog', 'posixpath', 'glob', 'genericpath', 'base',
'sre_parse', 'sre_compile', 'fdesc', 'posixbase', 'escape_codes',
'fnmatch', 'urlparse', 're', 'stat', 'six'
]
with f as sys.stdout:
tracer = trace.Trace(trace=1, count=0, ignoremods=ignore)
tracer.runfunc(n.run)
sys.stdout = tmp
_log.info('Quitting node "%s"' % n.uri)
def start_node(uri, control_uri, trace_exec=False, attributes=None, authz_server=False):
if not security_modules_check():
raise Exception("Security module missing")
_create_node = create_tracing_node if trace_exec else create_node
p = Process(target=_create_node, args=(uri, control_uri, attributes, authz_server))
p.daemon = True
p.start()
return p
```
#### File: plugins/requirements/shadow_actor_reqs_match.py
```python
from calvin.utilities.calvin_callback import CalvinCB
from calvin.utilities import dynops
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
def get_description(out_iter, kwargs, final, signature):
_log.debug("shadow_match:get_description BEGIN")
if final[0]:
_log.debug("shadow_match:get_description FINAL")
out_iter.auto_final(kwargs['counter'])
else:
_log.debug("shadow_match:get_description ACT")
kwargs['counter'] += 1
kwargs['node'].storage.get_iter('actor_type-', signature, it=out_iter)
_log.debug("shadow_match:get_description END")
def extract_capabilities(out_iter, kwargs, final, value):
_log.debug("shadow_match:extract_capabilities BEGIN")
shadow_params = kwargs.get('shadow_params', [])
if not final[0] and value != dynops.FailedElement:
_log.debug("shadow_match:extract_capabilities VALUE %s" % value)
mandatory = value['args']['mandatory']
optional = value['args']['optional'].keys()
# To be valid actor type all mandatory params need to be supplied and only valid params
if all([p in shadow_params for p in mandatory]) and all([p in (mandatory + optional) for p in shadow_params]):
_log.debug("shadow_match:extract_capabilities ACT")
kwargs['descriptions'].append(value)
reqs = value['requires']
new = set(reqs) - kwargs['capabilities']
kwargs['capabilities'].update(new)
out_iter.extend(new)
if final[0]:
_log.debug("shadow_match:extract_capabilities FINAL")
out_iter.final()
_log.debug("shadow_match:extract_capabilities END")
def get_capability(out_iter, kwargs, final, value):
_log.debug("shadow_match:get_capability BEGIN")
if final[0]:
_log.debug("shadow_match:get_capability FINAL")
out_iter.auto_final(kwargs['counter'])
else:
kwargs['counter'] += 1
_log.debug("shadow_match:get_capability GET %s counter:%d" % (value, kwargs['counter']))
out_iter.append(kwargs['node'].storage.get_index_iter(['node', 'capabilities', value], include_key=True))
_log.debug("shadow_match:get_capability END")
def placement(out_iter, kwargs, final, capability_nodes):
_log.debug("shadow_match:placement BEGIN %s" % (capability_nodes,))
if final[0]:
try:
possible_nodes = set.union(*[d['node_match'] for d in kwargs['descriptions'] if 'node_match' in d])
except:
possible_nodes = set([])
if not kwargs['capabilities']:
# No capabilities required, then get final direct, ok
out_iter.append(dynops.InfiniteElement())
out_iter.final()
return
if not possible_nodes:
# None found
out_iter.final()
return
if any([isinstance(n, dynops.InfiniteElement) for n in possible_nodes]):
# Some actor can have infinte placement, lets that be our response
out_iter.append(dynops.InfiniteElement())
out_iter.final()
return
# Send out the union of possible nodes
out_iter.extend(possible_nodes)
out_iter.final()
return
else:
capability = "".join(capability_nodes[0].partition('calvinsys.')[1:])
kwargs['capabilities'].setdefault(capability, []).append(capability_nodes[1])
_log.debug("shadow_match:placement EVALUATE %s" % kwargs)
# Update matches
for d in kwargs['descriptions']:
if 'node_match' not in d and not d['requires']:
# No capability requirements
_log.debug("shadow_match:placement No requires create Infinity")
d['node_match'] = set([dynops.InfiniteElement()])
elif set(d['requires']) <= set(kwargs['capabilities'].keys()):
_log.debug("shadow_match:placement require:%s, caps:%s" % (d['requires'], kwargs['capabilities']))
found = set.intersection(*[set(kwargs['capabilities'][r]) for r in d['requires']])
new = found - d.setdefault('node_match', set([]))
d['node_match'].update(new)
# TODO drip out matches as they come, but how to handle infinte responses
def req_op(node, signature, shadow_params, actor_id=None, component=None):
""" Based on signature find actors' requires in global storage,
filter actors based on params that are supplied
and find any nodes with those capabilities
"""
# Lookup signature to get a list of ids of the actor types
signature_iter = node.storage.get_index_iter(['actor', 'signature', signature])
signature_iter.set_name("shadow_match:sign")
# Lookup description for all matching actor types
description_iter = dynops.Map(get_description, signature_iter, eager=True, counter=0, node=node)
description_iter.set_name("shadow_match:desc")
# Filter with matching parameters and return set of needed capabilities
extract_caps_iter = dynops.Map(extract_capabilities, description_iter, eager=True,
shadow_params=shadow_params, capabilities=set([]), descriptions=[])
extract_caps_iter.set_name("shadow_match:extract")
# Lookup nodes having each capability
get_caps_iter = dynops.Map(get_capability, extract_caps_iter, eager=True, counter=0, node=node)
get_caps_iter.set_name("shadow_match:caps")
# Previous returned iterable with iterables, chain them to one iterable
collect_caps_iter = dynops.Chain(get_caps_iter)
collect_caps_iter.set_name("shadow_match:collect")
# return nodes that can host first seen actor type with all capabilities fulfilled
placement_iter = dynops.Map(placement, collect_caps_iter, capabilities={},
descriptions=extract_caps_iter.get_kwargs()['descriptions'])
placement_iter.set_name("shadow_match:place")
return placement_iter
```
#### File: async/twistedimpl/http_client.py
```python
from twisted.internet import reactor
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
try:
from twisted.internet.ssl import ClientContextFactory
HAS_OPENSSL = True
except:
# Probably no OpenSSL available.
HAS_OPENSSL = False
from twisted.web.client import FileBodyProducer
from twisted.internet.defer import Deferred
from twisted.internet.protocol import Protocol
from StringIO import StringIO
from urllib import urlencode
# from calvin.utilities.calvinlogger import get_logger
from calvin.utilities.calvin_callback import CalvinCBClass
# _log = get_logger(__name__)
class HTTPRequest(object):
def __init__(self):
self._response = {}
def parse_headers(self, response):
self._response = {}
self._response['version'] = "%s/%d.%d" % (response.version)
self._response['status'] = response.code
self._response['phrase'] = response.phrase
self._response['headers'] = {}
for hdr, val in response.headers.getAllRawHeaders():
self._response['headers'][hdr.lower()] = val[0] if isinstance(val, list) and len(val) > 0 else val
def parse_body(self, body):
self._response['body'] = body
def body(self):
return self._response.get('body', None)
def headers(self):
return self._response.get('headers', None)
def status(self):
return self._response.get('status', None)
def version(self):
return self._response.get('version', None)
def phrase(self):
return self._response.get('phrase', None)
def encode_params(params):
if params:
return "?" + urlencode(params)
return ""
def encode_headers(headers):
twisted_headers = Headers()
for k, v in headers.items():
key = k.encode('ascii', 'ignore')
val = v.encode('ascii', 'ignore')
twisted_headers.addRawHeader(key, val)
return twisted_headers
def encode_body(data):
if not data:
return None
if not isinstance(data, str):
return None
return FileBodyProducer(StringIO(data))
class BodyReader(Protocol):
def __init__(self, deferred, cb, request):
self.deferred = deferred
self.data = ""
self.cb = cb
self.request = request
def dataReceived(self, bytes):
self.data += bytes
def connectionLost(self, reason):
self.deferred.callback(None)
self.cb(self.data, self.request)
class HTTPClient(CalvinCBClass):
def create_agent(self):
if HAS_OPENSSL:
class WebClientContextFactory(ClientContextFactory):
"""TODO: enable certificate verification, hostname checking"""
def getContext(self, hostname, port):
return ClientContextFactory.getContext(self)
return Agent(reactor, WebClientContextFactory())
else:
return Agent(reactor)
def __init__(self, callbacks=None):
super(HTTPClient, self).__init__(callbacks)
self._agent = self.create_agent()
def _receive_headers(self, response, request):
request.parse_headers(response)
self._callback_execute('receive-headers', request)
finished = Deferred()
response.deliverBody(BodyReader(finished, self._receive_body, request))
return finished
def _receive_body(self, response, request):
request.parse_body(response)
self._callback_execute('receive-body', request)
def request(self, command, url, params, headers, data):
url += encode_params(params)
twisted_headers = encode_headers(headers)
body = encode_body(data)
deferred = self._agent.request(command, url, headers=twisted_headers, bodyProducer=body)
request = HTTPRequest()
deferred.addCallback(self._receive_headers, request)
return request
```
#### File: media/defaultimpl/camera.py
```python
import cv2
import numpy
class Camera(object):
"""
Capture image from device
"""
def __init__(self, device, width, height):
"""
Initialize camera
"""
self.cap = cv2.VideoCapture(device)
self.cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, width)
self.cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, height)
def get_image(self):
"""
Captures an image
returns: Image as jpeg encoded binary string, None if no frame
"""
ret, frame = self.cap.read()
if ret:
ret, jpeg = cv2.imencode(".jpg", frame)
if ret:
data = numpy.array(jpeg)
return data.tostring()
def close(self):
"""
Uninitialize camera
"""
self.cap.release()
```
#### File: media/defaultimpl/image.py
```python
import pygame
from StringIO import StringIO
import cv2
import os
import numpy
class Image(object):
"""
Image object
"""
def __init__(self):
self.display = None
def show_image(self, image, width, height):
"""
Show image
"""
size = (width, height)
self.display = pygame.display.set_mode(size, 0)
self.snapshot = pygame.surface.Surface(size, 0, self.display)
img = pygame.image.load(StringIO(image))
self.display.blit(img, (0, 0))
pygame.display.flip()
def detect_face(self, image):
linux_prefix = "/usr/share/opencv"
mac_prefix = "/usr/local/share/OpenCV"
suffix = "/haarcascades/haarcascade_frontalface_default.xml"
linux_path = linux_prefix + suffix
mac_path = mac_prefix + suffix
if os.path.exists(linux_path) :
cpath = linux_path
elif os.path.exists(mac_path) :
cpath = mac_path
else :
raise Exception("No Haarcascade found")
classifier = cv2.CascadeClassifier(cpath)
jpg = numpy.fromstring(image, numpy.int8)
image = cv2.imdecode(jpg, 1)
faces = classifier.detectMultiScale(image)
if len(faces) > 0 :
for (x,y,w,h) in faces :
if w < 120 :
# Too small to be a nearby face
continue
return True
return False
def close(self):
"""
Close display
"""
if not self.display is None:
pygame.display.quit()
```
#### File: dht/tests/test_append.py
```python
import pytest
import sys
import os
import traceback
import random
import time
import json
import Queue
from twisted.application import service, internet
from twisted.python.log import ILogObserver
from twisted.internet import reactor, task, defer, threads
from threading import Thread
from kademlia import log
from calvin.runtime.south.plugins.storage.twistedimpl.dht.append_server import AppendServer
# _log = get_logger(__name__)
class KNet(object):
def __init__(self, number, server_type=AppendServer):
self.nodes = []
self.boot_strap = None
if not reactor.running:
print "Starting reactor only once"
self.reactor_thread = Thread(target=reactor.run, args=(False,)).start()
for a in xrange(number):
self.nodes.append(ServerApp(server_type))
def start(self):
bootstrap = []
for a in self.nodes:
port, kserver = a.start(0, bootstrap)
if len(bootstrap) < 100:
bootstrap.append(("127.0.0.1", port))
# Wait for them to start
time.sleep(.8)
def stop(self):
for node in self.nodes:
node.stop()
self.nodes = []
time.sleep(1)
def get_rand_node(self):
index = random.randint(0, max(0, len(self.nodes) - 1))
return self.nodes[index]
class ServerApp(object):
def __init__(self, server_type):
self.server_type = server_type
def start(self, port=0, boot_strap=[]):
self.kserver = self.server_type()
self.kserver.bootstrap(boot_strap)
self.port = threads.blockingCallFromThread(reactor, reactor.listenUDP, port, self.kserver.protocol)
print "Starting server:", self.port
time.sleep(.2)
return self.port.getHost().port, self.kserver
def call(self, func, *args, **kwargs):
reactor.callFromThread(func, *args, **kwargs)
def __getattr__(self, name):
class caller:
def __init__(self, f, func):
self.f = f
self.func = func
def __call__(self, *args, **kwargs):
# _log.debug("Calling %s(%s, %s, %s)" %(self.f, self.func, args, kwargs))
return self.func(*args, **kwargs)
if hasattr(self.kserver, name) and callable(getattr(self.kserver, name)):
return caller(self.call, getattr(self.kserver, name))
else:
# Default behaviour
raise AttributeError
def get_port(self):
return self.port
def stop(self):
result = threads.blockingCallFromThread(reactor, self.port.stopListening)
def normal_test(match):
def test(obj):
if obj != match:
print("%s != %s" % (repr(obj), repr(match)))
return obj == match
return test
def json_test(match):
try:
jmatch = json.loads(match)
except:
print("Not JSON in json test!!!")
return False
def test(obj):
try:
jobj = json.loads(obj)
except:
print("Not JSON in json test!!!")
return False
if jobj != jmatch and not isinstance(jobj, list) and not isinstance(jmatch, list):
print("%s != %s" % (repr(jobj), repr(jmatch)))
if isinstance(jobj, list) and isinstance(jmatch, list):
return set(jobj) == set(jmatch)
return jobj == jmatch
return test
def do_sync(func, **kwargs):
test = None
timeout = .2
if 'timeout' in kwargs:
timeout = kwargs.pop('timeout')
if 'test' in kwargs:
test = kwargs.pop('test')
q = Queue.Queue()
def respond(value):
q.put(value)
d = func(**kwargs)
d.addCallback(respond)
try:
a = q.get(timeout=timeout)
except Queue.Empty:
assert False
if test is not None:
assert test(a)
@pytest.fixture(scope="session", autouse=True)
def cleanup(request):
def fin():
reactor.callFromThread(reactor.stop)
request.addfinalizer(fin)
print "hejsan"
@pytest.mark.slow
class TestKAppend(object):
test_nodes = 20
def test_append(self, monkeypatch):
a = KNet(self.test_nodes)
a.start()
try:
item = ["apa"]
test_str = json.dumps(item)
# set(["apa"])
do_sync(a.get_rand_node().append, key="kalas", value=test_str, test=normal_test(True))
do_sync(a.get_rand_node().append, key="kalas", value=test_str, test=normal_test(True))
do_sync(a.get_rand_node().append, key="kalas", value=test_str, test=normal_test(True))
do_sync(a.get_rand_node().append, key="kalas", value=test_str, test=normal_test(True))
match_str = json.dumps(item)
do_sync(a.get_rand_node().get_concat, key="kalas", test=json_test(match_str))
# set(["apa", "elefant", "tiger"])
test_str2 = json.dumps(["elefant", "tiger"])
do_sync(a.get_rand_node().append, key="kalas", value=test_str2, test=normal_test(True))
match_str = json.dumps(["apa", "elefant", "tiger"])
do_sync(a.get_rand_node().get_concat, key="kalas", test=json_test(match_str))
# set(["apa", "tiger"])
test_str3 = json.dumps(["elefant"])
do_sync(a.get_rand_node().remove, key="kalas", value=test_str3, test=normal_test(True))
match_str = json.dumps(["apa", "tiger"])
do_sync(a.get_rand_node().get_concat, key="kalas", test=json_test(match_str))
# set(["apa", "elefant", "tiger"])
test_str2 = json.dumps(["elefant", "tiger"])
do_sync(a.get_rand_node().append, key="kalas", value=test_str2, test=normal_test(True))
match_str = json.dumps(["apa", "elefant", "tiger"])
do_sync(a.get_rand_node().get_concat, key="kalas", test=json_test(match_str))
# set(["apa", "elefant", "tiger"])
test_str4 = json.dumps(["lejon"])
do_sync(a.get_rand_node().remove, key="kalas", value=test_str4, test=normal_test(True))
match_str = json.dumps(["apa", "elefant", "tiger"])
do_sync(a.get_rand_node().get_concat, key="kalas", test=json_test(match_str))
match_str = json.dumps(item)
do_sync(a.get_rand_node().set, key="kalas", value=test_str, test=normal_test(True))
do_sync(a.get_rand_node().get, key="kalas", test=json_test(match_str))
# Should fail
do_sync(a.get_rand_node().append, key="kalas", value="apa", test=normal_test(False))
do_sync(a.get_rand_node().set, key="kalas", value="apa", test=normal_test(True))
do_sync(a.get_rand_node().get, key="kalas", test=normal_test("apa"))
# Should fail
do_sync(a.get_rand_node().append, key="kalas", value="apa", test=normal_test(False))
do_sync(a.get_rand_node().get, key="kalas", test=normal_test("apa"))
finally:
import traceback
traceback.print_exc()
a.stop()
def test_set(self, monkeypatch):
a = KNet(self.test_nodes)
a.start()
try:
do_sync(a.get_rand_node().set, key="kalas", value="apa", test=normal_test(True))
do_sync(a.get_rand_node().get, key="kalas", test=normal_test("apa"))
for _ in range(10):
test_str = '%030x' % random.randrange(16 ** random.randint(1, 2000))
do_sync(a.get_rand_node().set, key="kalas", value=test_str, test=normal_test(True))
do_sync(a.get_rand_node().get, key="kalas", test=normal_test(test_str))
finally:
a.stop()
def test_delete(self, monkeypatch):
a = KNet(self.test_nodes)
a.start()
try:
# Make the nodes know each other
for _ in range(10):
key_str = '%030x' % random.randrange(16 ** random.randint(1, 2000))
test_str = '%030x' % random.randrange(16 ** random.randint(1, 2000))
do_sync(a.get_rand_node().set, key=key_str, value=test_str, test=normal_test(True))
do_sync(a.get_rand_node().get, key=key_str, test=normal_test(test_str))
do_sync(a.get_rand_node().set, key="kalas", value="apa", test=normal_test(True))
time.sleep(.7)
do_sync(a.get_rand_node().get, key="kalas", test=normal_test("apa"))
for _ in range(3):
test_str = '%030x' % random.randrange(16 ** random.randint(1, 2000))
do_sync(a.get_rand_node().set, key="kalas", value=test_str, test=normal_test(True))
do_sync(a.get_rand_node().get, key="kalas", test=normal_test(test_str))
do_sync(a.get_rand_node().set, key="kalas", value=None, test=normal_test(True))
do_sync(a.get_rand_node().get, key="kalas", test=normal_test(None))
finally:
a.stop()
```
#### File: calvin/tests/test_actormanager.py
```python
import unittest
import pytest
from mock import Mock, patch
from calvin.tests import DummyNode
from calvin.runtime.north.actormanager import ActorManager
pytestmark = pytest.mark.unittest
class ActorManagerTests(unittest.TestCase):
def setUp(self):
n = DummyNode()
self.am = ActorManager(node=n)
n.am = self.am
def tearDown(self):
pass
def _new_actor(self, a_type, a_args, **kwargs):
a_id = self.am.new(a_type, a_args, **kwargs)
a = self.am.actors.get(a_id, None)
self.assertTrue(a)
return a, a_id
def test_new_actor(self):
# Test basic actor creation
a_type = 'std.Constant'
data = 42
a, _ = self._new_actor(a_type, {'data':data})
self.assertEqual(a.data, data)
def test_actor_state_get(self):
# Test basic actor state retrieval
a_type = 'std.Constant'
data = 42
a, a_id = self._new_actor(a_type, {'data':data})
s = a.state()
self.assertEqual(s['data'], data)
self.assertEqual(s['id'], a_id)
self.assertEqual(s['n'], 1)
def test_new_actor_from_state(self):
# Test basic actor state manipulation
a_type = 'std.Constant'
data = 42
a, a_id = self._new_actor(a_type, {'data':data})
a.data = 43
a.n = 2
s = a.state()
self.am.destroy(a_id)
self.assertEqual(len(self.am.actors), 0)
b, b_id = self._new_actor(a_type, None, state = s)
self.assertEqual(a.data, 43)
self.assertEqual(a.n, 2)
# Assert id is preserved
self.assertEqual(a.id, a_id)
# Assert actor database is consistent
self.assertTrue(self.am.actors[a_id])
self.assertEqual(len(self.am.actors), 1)
@patch('calvin.runtime.north.storage.Storage.delete_actor')
@patch('calvin.runtime.north.metering.Metering.remove_actor_info')
def test_destroy_actor(self, remove_actor_info, delete_actor):
actor, actor_id = self._new_actor('std.Constant', {'data': 42})
self.am.destroy(actor_id)
assert actor_id not in self.am.actors
remove_actor_info.assert_called_with(actor_id)
self.am.node.storage.delete_actor.assert_called_with(actor_id)
self.am.node.control.log_actor_destroy.assert_called_with(actor_id)
def test_enable_actor(self):
actor, actor_id = self._new_actor('std.Constant', {'data': 42})
actor.enable = Mock()
self.am.enable(actor_id)
assert actor.enable.called
def test_disable_actor(self):
actor, actor_id = self._new_actor('std.Constant', {'data': 42})
actor.disable = Mock()
self.am.disable(actor_id)
assert actor.disable.called
def test_migrate_to_same_node_does_nothing(self):
callback_mock = Mock()
actor, actor_id = self._new_actor('std.Constant', {'data': 42})
actor.will_migrate = Mock()
self.am.migrate(actor_id, self.am.node.id, callback_mock)
assert not actor.will_migrate.called
assert callback_mock.called
args, kwargs = callback_mock.call_args
self.assertEqual(kwargs['status'].status, 200)
def test_migrate_non_existing_actor_returns_false(self):
callback_mock = Mock()
self.am.migrate("123", self.am.node.id, callback_mock)
assert callback_mock.called
args, kwargs = callback_mock.call_args
self.assertEqual(kwargs['status'].status, 500)
def test_migrate(self):
callback_mock = Mock()
actor, actor_id = self._new_actor('std.Constant', {'data': 42})
peer_node = DummyNode()
actor.will_migrate = Mock()
self.am.migrate(actor_id, peer_node.id, callback_mock)
assert actor.will_migrate.called
assert self.am.node.pm.disconnect.called
args, kwargs = self.am.node.pm.disconnect.call_args
self.assertEqual(kwargs['actor_id'], actor_id)
cb = kwargs['callback']
self.assertEqual(cb.kwargs['actor'], actor)
self.assertEqual(cb.kwargs['actor_type'], actor._type)
self.assertEqual(cb.kwargs['callback'], callback_mock)
self.assertEqual(cb.kwargs['node_id'], peer_node.id)
self.assertEqual(cb.kwargs['ports'], actor.connections(self.am.node.id))
self.am.node.control.log_actor_migrate.assert_called_once_with(actor_id, peer_node.id)
def test_connect(self):
actor, actor_id = self._new_actor('std.Constant', {'data': 42})
connection_list = [['1', '2', '3', '4'], ['5', '6', '7', '8']]
callback_mock = Mock()
self.am.connect(actor_id, connection_list, callback_mock)
self.assertEqual(self.am.node.pm.connect.call_count, 2)
calls = self.am.node.pm.connect.call_args_list
for index, (args, kwargs) in enumerate(calls):
self.assertEqual(kwargs['port_id'], connection_list[index][1])
self.assertEqual(kwargs['peer_node_id'], connection_list[index][2])
self.assertEqual(kwargs['peer_port_id'], connection_list[index][3])
callback = kwargs['callback'].kwargs
self.assertEqual(callback['peer_port_id'], connection_list[index][3])
self.assertEqual(callback['actor_id'], actor_id)
self.assertEqual(callback['peer_port_ids'], ['4', '8'])
self.assertEqual(callback['_callback'], callback_mock)
def test_connections_returns_actor_connections_for_current_node(self):
actor, actor_id = self._new_actor('std.Constant', {'data': 42, 'name': 'actor'})
expected = {
'actor_name': 'actor',
'actor_id': actor_id,
'inports': {},
'outports': {actor.outports['token'].id: actor.outports['token'].get_peers()}
}
self.assertEqual(self.am.connections(actor_id), expected)
def test_missing_actor(self):
test_functions = [("report", ()), ("destroy", ()), ("enable", ()), ("disable", ()),
("connect", ([], None)), ("connections", ()), ("dump", ()),
("set_port_property", (None, None, None, None)),
("get_port_state", (None, ))]
for func, args in test_functions:
with pytest.raises(Exception) as excinfo:
print func
getattr(self.am, func)('123', *args)
assert "Actor '123' not found" in str(excinfo.value)
def test_actor_type(self):
actor, actor_id = self._new_actor('std.Constant', {'data': 42, 'name': 'actor'})
self.assertEqual(self.am.actor_type(actor_id), 'std.Constant')
def test_actor_type_of_missing_actor(self):
self.assertEqual(self.am.actor_type("123"), 'BAD ACTOR')
def test_enabled_actors(self):
actor, actor_id = self._new_actor('std.Constant', {'data': 42, 'name': 'actor'})
enabled_actor, enabled_actor_id = self._new_actor('std.Constant', {'data': 42, 'name': 'actor'})
enabled_actor.enable()
self.assertEqual(self.am.enabled_actors(), [enabled_actor])
def test_list_actors(self):
actor_1, actor_1_id = self._new_actor('std.Constant', {'data': 42, 'name': 'actor'})
actor_2, actor_2_id = self._new_actor('std.Constant', {'data': 42, 'name': 'actor'})
actors = self.am.list_actors()
assert actor_1_id in actors
assert actor_2_id in actors
if __name__ == '__main__':
import unittest
suite = unittest.TestLoader().loadTestsFromTestCase(ActorManagerTests)
unittest.TextTestRunner(verbosity=2).run(suite)
```
#### File: calvin/tests/test_actor.py
```python
import pytest
from mock import Mock
from calvin.tests import DummyNode
from calvin.runtime.north.actormanager import ActorManager
from calvin.runtime.south.endpoint import LocalOutEndpoint, LocalInEndpoint
from calvin.actor.actor import Actor
pytestmark = pytest.mark.unittest
def create_actor(node):
actor_manager = ActorManager(node)
actor_id = actor_manager.new('std.Identity', {})
actor = actor_manager.actors[actor_id]
actor._calvinsys = Mock()
return actor
@pytest.fixture
def actor():
return create_actor(DummyNode())
@pytest.mark.parametrize("port_type,port_name,port_property,value,expected", [
("invalid", "", "", "", False),
("in", "missing", "", "", False),
("out", "missing", "", "", False),
("out", "token", "missing", "", False),
("in", "token", "missing", "", False),
("out", "token", "name", "new_name", True),
("out", "token", "name", "new_name", True),
])
def test_set_port_property(port_type, port_name, port_property, value, expected):
assert actor().set_port_property(port_type, port_name, port_property, value) is expected
@pytest.mark.parametrize("inport_ret_val,outport_ret_val,expected", [
(False, False, False),
(False, True, False),
(True, False, False),
(True, True, True),
])
def test_did_connect(actor, inport_ret_val, outport_ret_val, expected):
for port in actor.inports.values():
port.is_connected = Mock(return_value=inport_ret_val)
for port in actor.outports.values():
port.is_connected = Mock(return_value=outport_ret_val)
actor.fsm = Mock()
actor.did_connect(None)
if expected:
actor.fsm.transition_to.assert_called_with(Actor.STATUS.ENABLED)
assert actor._calvinsys.scheduler_wakeup.called
else:
assert not actor.fsm.transition_to.called
assert not actor._calvinsys.scheduler_wakeup.called
@pytest.mark.parametrize("inport_ret_val,outport_ret_val,expected", [
(True, True, False),
(True, False, False),
(False, True, False),
(False, False, True),
])
def test_did_disconnect(actor, inport_ret_val, outport_ret_val, expected):
for port in actor.inports.values():
port.is_connected = Mock(return_value=inport_ret_val)
for port in actor.outports.values():
port.is_connected = Mock(return_value=outport_ret_val)
actor.fsm = Mock()
actor.did_disconnect(None)
if expected:
actor.fsm.transition_to.assert_called_with(Actor.STATUS.READY)
else:
assert not actor.fsm.transition_to.called
def test_enabled(actor):
actor.enable()
assert actor.enabled()
actor.disable()
assert not actor.enabled()
def test_connections():
node = DummyNode()
node.id = "node_id"
actor = create_actor(node)
inport = actor.inports['token']
outport = actor.outports['token']
port = Mock()
port.id = "x"
peer_port = Mock()
peer_port.id = "y"
inport.attach_endpoint(LocalInEndpoint(port, peer_port))
outport.attach_endpoint(LocalOutEndpoint(port, peer_port))
assert actor.connections(node) == {
'actor_id': actor.id,
'actor_name': actor.name,
'inports': {inport.id: (node, "y")},
'outports': {outport.id: [(node, "y")]}
}
def test_state(actor):
inport = actor.inports['token']
outport = actor.outports['token']
correct_state = {
'_component_members': set([actor.id]),
'_deployment_requirements': [],
'_managed': set(['dump', '_signature', 'id', '_deployment_requirements', 'name', 'credentials']),
'_signature': None,
'dump': False,
'id': actor.id,
'inports': {'token': {'fifo': {'N': 5,
'fifo': [{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'}],
'read_pos': {inport.id: 0},
'readers': [inport.id],
'tentative_read_pos': {inport.id: 0},
'write_pos': 0},
'id': inport.id,
'name': 'token'}},
'name': '',
'outports': {'token': {'fanout': 1,
'fifo': {'N': 5,
'fifo': [{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'}],
'read_pos': {},
'readers': [],
'tentative_read_pos': {},
'write_pos': 0},
'id': outport.id,
'name': 'token'}}}
test_state = actor.state()
for k, v in correct_state.iteritems():
# Read state use list to support JSON serialization
if isinstance(v, set):
assert set(test_state[k]) == v
else:
assert test_state[k] == v
@pytest.mark.parametrize("prev_signature,new_signature,expected", [
(None, "new_val", "new_val"),
("old_val", "new_val", "old_val")
])
def test_set_signature(actor, prev_signature, new_signature, expected):
actor.signature_set(prev_signature)
actor.signature_set(new_signature)
assert actor._signature == expected
def test_component(actor):
actor.component_add(1)
assert 1 in actor.component_members()
actor.component_add([2, 3])
assert 2 in actor.component_members()
assert 3 in actor.component_members()
actor.component_remove(1)
assert 1 not in actor.component_members()
actor.component_remove([2, 3])
assert 2 not in actor.component_members()
assert 3 not in actor.component_members()
def test_requirements(actor):
assert actor.requirements_get() == []
actor.requirements_add([1, 2, 3])
assert actor.requirements_get() == [1, 2, 3]
actor.requirements_add([4, 5])
assert actor.requirements_get() == [4, 5]
actor.requirements_add([6, 7], extend=True)
assert actor.requirements_get() == [4, 5, 6, 7]
```
#### File: calvin/tests/test_calvincontrol.py
```python
import pytest
from mock import Mock, patch
from calvin.runtime.north.calvincontrol import get_calvincontrol, CalvinControl
from calvin.utilities import calvinuuid
pytestmark = pytest.mark.unittest
def calvincontrol():
control = CalvinControl()
control.send_response = Mock()
control.send_response = Mock()
return control
uuid = calvinuuid.uuid("")
def test_get_calvincontrol_returns_xxx():
control = get_calvincontrol()
assert control == get_calvincontrol()
@pytest.mark.parametrize("url,match,handler", [
("GET /actor_doc HTTP/1", None, "handle_get_actor_doc"),
("POST /log HTTP/1", None, "handle_post_log"),
("DELETE /log/TRACE_" + uuid + " HTTP/1", "TRACE_" + uuid, "handle_delete_log"),
("GET /log/TRACE_" + uuid + " HTTP/1", "TRACE_" + uuid, "handle_get_log"),
("GET /id HTTP/1", None, "handle_get_node_id"),
("GET /nodes HTTP/1", None, "handle_get_nodes"),
("GET /node/NODE_" + uuid + " HTTP/1", "NODE_" + uuid, "handle_get_node"),
("POST /peer_setup HTTP/1", None, "handle_peer_setup"),
("GET /applications HTTP/1", None, "handle_get_applications"),
("GET /application/APP_" + uuid + " HTTP/1", "APP_" + uuid, "handle_get_application"),
("DELETE /application/APP_" + uuid + " HTTP/1", "APP_" + uuid, "handle_del_application"),
("POST /actor HTTP/1", None, "handle_new_actor"),
("GET /actors HTTP/1", None, "handle_get_actors"),
("GET /actor/" + uuid + " HTTP/1", uuid, "handle_get_actor"),
("DELETE /actor/" + uuid + " HTTP/1", uuid, "handle_del_actor"),
("GET /actor/" + uuid + "/report HTTP/1", uuid, "handle_get_actor_report"),
("POST /actor/" + uuid + "/migrate HTTP/1", uuid, "handle_actor_migrate"),
("POST /actor/" + uuid + "/disable HTTP/1", uuid, "handle_actor_disable"),
("GET /actor/" + uuid + "/port/PORT_" + uuid + " HTTP/1", uuid, "handle_get_port"),
("GET /actor/" + uuid + "/port/PORT_" + uuid + "/state HTTP/1", uuid, "handle_get_port_state"),
("POST /connect HTTP/1", None, "handle_connect"),
("POST /set_port_property HTTP/1", None, "handle_set_port_property"),
("POST /deploy HTTP/1", None, "handle_deploy"),
("POST /application/APP_" + uuid + "/migrate HTTP/1", "APP_" + uuid, "handle_post_application_migrate"),
("POST /disconnect HTTP/1", None, "handle_disconnect"),
("DELETE /node HTTP/1", None, "handle_quit"),
("POST /meter HTTP/1", None, "handle_post_meter"),
("DELETE /meter/METERING_" + uuid + " HTTP/1", "METERING_" + uuid, "handle_delete_meter"),
("GET /meter/METERING_" + uuid + "/timed HTTP/1", "METERING_" + uuid, "handle_get_timed_meter"),
("GET /meter/METERING_" + uuid + "/aggregated HTTP/1", "METERING_" + uuid, "handle_get_aggregated_meter"),
("GET /meter/METERING_" + uuid + "/metainfo HTTP/1", "METERING_" + uuid, "handle_get_metainfo_meter"),
("POST /index/abc123 HTTP/1", "abc123", "handle_post_index"),
("DELETE /index/abc123 HTTP/1", "abc123", "handle_delete_index"),
("GET /index/abc123 HTTP/1", "abc123", "handle_get_index"),
("GET /storage/abc123 HTTP/1", "abc123", "handle_get_storage"),
("POST /storage/abc123 HTTP/1", "abc123", "handle_post_storage"),
("OPTIONS /abc123 HTTP/1.1", None, "handle_options")
])
def test_routes_correctly(url, match, handler):
with patch.object(CalvinControl, handler) as func:
control = calvincontrol()
control.route_request(1, 2, url, 3, {})
assert func.called
args, kwargs = func.call_args
assert args[0] == 1
assert args[1] == 2
if match:
assert args[2].group(1) == match
assert args[3] == {}
assert args[4] == 3
def test_send_response():
control = CalvinControl()
control.tunnel_client = Mock()
handle = Mock()
connection = Mock()
data = {'value': 1}
status = 200
control.connections[handle] = connection
control.send_response(handle, None, data, status)
assert control.tunnel_client.send.called
control.connections[handle] = connection
connection.connection_lost = True
control.send_response(handle, connection, data, status)
assert not connection.send.called
control.connections[handle] = connection
connection.connection_lost = False
control.send_response(handle, connection, data, status)
assert connection.send.called
connection.send.assert_called_with(data)
assert handle not in control.connections
def test_send_streamhader():
control = CalvinControl()
control.tunnel_client = Mock()
handle = Mock()
connection = Mock()
control.connections[handle] = connection
control.send_streamheader(handle, None)
assert control.tunnel_client.send.called
control.connections[handle] = connection
connection.connection_lost = True
control.send_streamheader(handle, connection)
assert not connection.send.called
control.connections[handle] = connection
connection.connection_lost = False
control.send_streamheader(handle, connection)
assert connection.send.called
```
#### File: calvin/tests/test_calvin.py
```python
import os
import unittest
import time
import pytest
import multiprocessing
from calvin.Tools import cscompiler as compiler
from calvin.Tools import deployer
from calvin.utilities import calvinconfig
from calvin.utilities import calvinlogger
from calvin.utilities.nodecontrol import dispatch_node
from calvin.utilities.attribute_resolver import format_index_string
from calvin.requests.request_handler import RequestHandler, RT
_log = calvinlogger.get_logger(__name__)
_conf = calvinconfig.get()
def actual_tokens(rt, actor_id):
return request_handler.report(rt, actor_id)
def expected_counter(n):
return [i for i in range(1, n + 1)]
def cumsum(l):
s = 0
for n in l:
s = s + n
yield s
def expected_sum(n):
return list(cumsum(range(1, n + 1)))
def expected_tokens(rt, actor_id, src_actor_type):
tokens = request_handler.report(rt, actor_id)
if src_actor_type == 'std.CountTimer':
return expected_counter(tokens)
if src_actor_type == 'std.Sum':
return expected_sum(tokens)
return None
runtime = None
runtimes = []
peerlist = []
kill_peers = True
request_handler = None
def setup_module(module):
global runtime
global runtimes
global peerlist
global kill_peers
global request_handler
ip_addr = None
bt_master_controluri = None
request_handler = RequestHandler()
try:
ip_addr = os.environ["CALVIN_TEST_IP"]
purpose = os.environ["CALVIN_TEST_UUID"]
except KeyError:
pass
if ip_addr is None:
# Bluetooth tests assumes one master runtime with two connected peers
# CALVIN_TEST_BT_MASTERCONTROLURI is the control uri of the master runtime
try:
bt_master_controluri = os.environ["CALVIN_TEST_BT_MASTERCONTROLURI"]
_log.debug("Running Bluetooth tests")
except KeyError:
pass
if ip_addr:
remote_node_count = 2
kill_peers = False
test_peers = None
import socket
ports=[]
for a in range(2):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0))
addr = s.getsockname()
ports.append(addr[1])
s.close()
runtime,_ = dispatch_node(["calvinip://%s:%s" % (ip_addr, ports[0])], "http://%s:%s" % (ip_addr, ports[1]))
_log.debug("First runtime started, control http://%s:%s, calvinip://%s:%s" % (ip_addr, ports[1], ip_addr, ports[0]))
interval = 0.5
for retries in range(1,20):
time.sleep(interval)
_log.debug("Trying to get test nodes for 'purpose' %s" % purpose)
test_peers = request_handler.get_index(runtime, format_index_string({'node_name':
{'organization': 'com.ericsson',
'purpose': purpose}
}))
if not test_peers is None and not test_peers["result"] is None and \
len(test_peers["result"]) == remote_node_count:
test_peers = test_peers["result"]
break
if test_peers is None or len(test_peers) != remote_node_count:
_log.debug("Failed to find all remote nodes within time, peers = %s" % test_peers)
raise Exception("Not all nodes found dont run tests, peers = %s" % test_peers)
test_peer2_id = test_peers[0]
test_peer2 = request_handler.get_node(runtime, test_peer2_id)
if test_peer2:
runtime2 = RT(test_peer2["control_uri"])
runtime2.id = test_peer2_id
runtime2.uri = test_peer2["uri"]
runtimes.append(runtime2)
test_peer3_id = test_peers[1]
if test_peer3_id:
test_peer3 = request_handler.get_node(runtime, test_peer3_id)
if test_peer3:
runtime3 = RT(test_peer3["control_uri"])
runtime3.id = test_peer3_id
runtime3.uri = test_peer3["uri"]
runtimes.append(runtime3)
elif bt_master_controluri:
runtime = RT(bt_master_controluri)
bt_master_id = request_handler.get_node_id(bt_master_controluri)
data = request_handler.get_node(runtime, bt_master_id)
if data:
runtime.id = bt_master_id
runtime.uri = data["uri"]
test_peers = request_handler.get_nodes(runtime)
test_peer2_id = test_peers[0]
test_peer2 = request_handler.get_node(runtime, test_peer2_id)
if test_peer2:
rt2 = RT(test_peer2["control_uri"])
rt2.id = test_peer2_id
rt2.uri = test_peer2["uri"]
runtimes.append(rt2)
test_peer3_id = test_peers[1]
if test_peer3_id:
test_peer3 = request_handler.get_node(runtime, test_peer3_id)
if test_peer3:
rt3 = request_handler.RT(test_peer3["control_uri"])
rt3.id = test_peer3_id
rt3.uri = test_peer3["uri"]
runtimes.append(rt3)
else:
try:
ip_addr = os.environ["CALVIN_TEST_LOCALHOST"]
except:
import socket
ip_addr = socket.gethostbyname(socket.gethostname())
localhost = "calvinip://%s:5000" % (ip_addr,), "http://localhost:5001"
remotehosts = [("calvinip://%s:%d" % (ip_addr, d), "http://localhost:%d" % (d+1)) for d in range(5002, 5005, 2)]
# remotehosts = [("calvinip://127.0.0.1:5002", "http://localhost:5003")]
for host in remotehosts:
runtimes += [dispatch_node([host[0]], host[1])[0]]
runtime, _ = dispatch_node([localhost[0]], localhost[1])
time.sleep(1)
# FIXME When storage up and running peersetup not needed, but still useful during testing
request_handler.peer_setup(runtime, [i[0] for i in remotehosts])
time.sleep(0.5)
"""
# FIXME Does not yet support peerlist
try:
self.peerlist = peerlist(
self.runtime, self.runtime.id, len(remotehosts))
# Make sure all peers agree on network
[peerlist(self.runtime, p, len(self.runtimes)) for p in self.peerlist]
except:
self.peerlist = []
"""
peerlist = [rt.control_uri for rt in runtimes]
print "SETUP DONE ***", peerlist
def teardown_module(module):
global runtime
global runtimes
global kill_peers
if kill_peers:
for peer in runtimes:
request_handler.quit(peer)
time.sleep(0.2)
request_handler.quit(runtime)
time.sleep(0.2)
for p in multiprocessing.active_children():
p.terminate()
time.sleep(0.2)
class CalvinTestBase(unittest.TestCase):
def assertListPrefix(self, expected, actual, allow_empty=False):
assert actual
if len(expected) > len(actual):
self.assertListEqual(expected[:len(actual)], actual)
elif len(expected) < len(actual):
self.assertListEqual(expected, actual[:len(expected)])
else :
self.assertListEqual(expected, actual)
def setUp(self):
self.runtime = runtime
self.runtimes = runtimes
self.peerlist = peerlist
@pytest.mark.slow
@pytest.mark.essential
class TestNodeSetup(CalvinTestBase):
"""Testing starting a node"""
def testStartNode(self):
"""Testing starting node"""
print "### testStartNode ###", self.runtime
rt, id_, peers = self.runtime, self.runtime.id, self.peerlist
print "GOT RT"
assert request_handler.get_node(rt, id_)['uri'] == rt.uri
print "GOT URI", rt.uri
@pytest.mark.essential
@pytest.mark.slow
class TestLocalConnectDisconnect(CalvinTestBase):
"""Testing local connect/disconnect/re-connect"""
def testLocalSourceSink(self):
"""Testing local source and sink"""
rt, id_, peers = self.runtime, self.runtime.id, self.peerlist
src = request_handler.new_actor(rt, 'std.CountTimer', 'src')
snk = request_handler.new_actor_wargs(rt, 'io.StandardOut', 'snk', store_tokens=1)
request_handler.connect(rt, snk, 'token', id_, src, 'integer')
time.sleep(0.4)
# disable(rt, id_, src)
request_handler.disconnect(rt, src)
expected = expected_tokens(rt, src, 'std.CountTimer')
actual = actual_tokens(rt, snk)
self.assertListPrefix(expected, actual)
request_handler.delete_actor(rt, src)
request_handler.delete_actor(rt, snk)
def testLocalConnectDisconnectSink(self):
"""Testing local connect/disconnect/re-connect on sink"""
rt, id_ = self.runtime, self.runtime.id
src = request_handler.new_actor(rt, "std.CountTimer", "src")
snk = request_handler.new_actor_wargs(rt, "io.StandardOut", "snk", store_tokens=1)
request_handler.connect(rt, snk, 'token', id_, src, 'integer')
time.sleep(0.2)
request_handler.disconnect(rt, snk)
request_handler.connect(rt, snk, 'token', id_, src, 'integer')
time.sleep(0.2)
request_handler.disconnect(rt, snk)
# disable(rt, id_, src)
expected = expected_tokens(rt, src, 'std.CountTimer')
actual = actual_tokens(rt, snk)
self.assertListPrefix(expected, actual)
request_handler.delete_actor(rt, src)
request_handler.delete_actor(rt, snk)
def testLocalConnectDisconnectSource(self):
"""Testing local connect/disconnect/re-connect on source"""
rt, id_ = self.runtime, self.runtime.id
src = request_handler.new_actor(rt, "std.CountTimer", "src")
snk = request_handler.new_actor_wargs(rt, "io.StandardOut", "snk", store_tokens=1)
request_handler.connect(rt, snk, "token", id_, src, "integer")
time.sleep(0.2)
request_handler.disconnect(rt, src)
request_handler.connect(rt, snk, "token", id_, src, "integer")
time.sleep(0.2)
request_handler.disconnect(rt, src)
#disable(rt, id_, src)
expected = expected_tokens(rt, src, "std.CountTimer")
actual = actual_tokens(rt, snk)
self.assertListPrefix(expected, actual)
request_handler.delete_actor(rt, src)
request_handler.delete_actor(rt, snk)
def testLocalConnectDisconnectFilter(self):
"""Testing local connect/disconnect/re-connect on filter"""
rt, id_ = self.runtime, self.runtime.id
src = request_handler.new_actor(rt, "std.CountTimer", "src")
sum_ = request_handler.new_actor(rt, "std.Sum", "sum")
snk = request_handler.new_actor_wargs(rt, "io.StandardOut", "snk", store_tokens=1)
request_handler.connect(rt, snk, "token", id_, sum_, "integer")
request_handler.connect(rt, sum_, "integer", id_, src, "integer")
time.sleep(0.2)
request_handler.disconnect(rt, sum_)
request_handler.connect(rt, snk, "token", id_, sum_, "integer")
request_handler.connect(rt, sum_, "integer", id_, src, "integer")
time.sleep(0.2)
request_handler.disconnect(rt, src)
# disable(rt, id_, src)
expected = expected_tokens(rt, src, "std.Sum")
actual = actual_tokens(rt, snk)
self.assertListPrefix(expected, actual)
request_handler.delete_actor(rt, src)
request_handler.delete_actor(rt, sum_)
request_handler.delete_actor(rt, snk)
def testTimerLocalSourceSink(self):
"""Testing timer based local source and sink"""
rt, id_, peers = self.runtime, self.runtime.id, self.peerlist
src = request_handler.new_actor_wargs(
rt, 'std.CountTimer', 'src', sleep=0.1, steps=10)
snk = request_handler.new_actor_wargs(rt, 'io.StandardOut', 'snk', store_tokens=1)
request_handler.connect(rt, snk, 'token', id_, src, 'integer')
time.sleep(1.2)
# disable(rt, id_, src)
request_handler.disconnect(rt, src)
expected = expected_tokens(rt, src, 'std.CountTimer')
actual = actual_tokens(rt, snk)
self.assertListPrefix(expected, actual)
self.assertTrue(len(actual) > 0)
request_handler.delete_actor(rt, src)
request_handler.delete_actor(rt, snk)
@pytest.mark.essential
@pytest.mark.slow
class TestRemoteConnection(CalvinTestBase):
"""Testing remote connections"""
def testRemoteOneActor(self):
"""Testing remote port"""
rt = self.runtime
id_ = rt.id
peer = self.runtimes[0]
peer_id = peer.id
snk = request_handler.new_actor_wargs(rt, 'io.StandardOut', 'snk', store_tokens=1)
sum_ = request_handler.new_actor(peer, 'std.Sum', 'sum')
src = request_handler.new_actor(rt, 'std.CountTimer', 'src')
request_handler.connect(rt, snk, 'token', peer_id, sum_, 'integer')
request_handler.connect(peer, sum_, 'integer', id_, src, 'integer')
time.sleep(0.5)
request_handler.disable(rt, src)
expected = expected_tokens(rt, src, 'std.Sum')
actual = actual_tokens(rt, snk)
assert(len(actual) > 1)
self.assertListPrefix(expected, actual)
request_handler.delete_actor(rt, snk)
request_handler.delete_actor(peer, sum_)
request_handler.delete_actor(rt, src)
def testRemoteSlowPort(self):
"""Testing remote slow port and that token flow control works"""
rt = self.runtime
id_ = rt.id
peer = self.runtimes[0]
peer_id = peer.id
snk1 = request_handler.new_actor_wargs(rt, 'io.StandardOut', 'snk1', store_tokens=1)
alt = request_handler.new_actor(peer, 'std.Alternate', 'alt')
src1 = request_handler.new_actor_wargs(rt, 'std.CountTimer', 'src1', sleep=0.1, steps=100)
src2 = request_handler.new_actor_wargs(rt, 'std.CountTimer', 'src2', sleep=1.0, steps=10)
request_handler.connect(rt, snk1, 'token', peer_id, alt, 'token')
request_handler.connect(peer, alt, 'token_1', id_, src1, 'integer')
request_handler.connect(peer, alt, 'token_2', id_, src2, 'integer')
time.sleep(2)
request_handler.disable(rt, src1)
request_handler.disable(rt, src2)
time.sleep(0.2) # HACK
def _d():
for i in range(1,100):
yield i
yield i
expected = list(_d())
actual = actual_tokens(rt, snk1)
assert(len(actual) > 1)
self.assertListPrefix(expected, actual)
request_handler.delete_actor(rt, snk1)
request_handler.delete_actor(peer, alt)
request_handler.delete_actor(rt, src1)
request_handler.delete_actor(rt, src2)
def testRemoteSlowFanoutPort(self):
"""Testing remote slow port with fan out and that token flow control works"""
rt = self.runtime
id_ = rt.id
peer = self.runtimes[0]
peer_id = peer.id
snk1 = request_handler.new_actor_wargs(rt, 'io.StandardOut', 'snk1', store_tokens=1)
snk2 = request_handler.new_actor_wargs(peer, 'io.StandardOut', 'snk2', store_tokens=1)
alt = request_handler.new_actor(peer, 'std.Alternate', 'alt')
src1 = request_handler.new_actor_wargs(rt, 'std.CountTimer', 'src1', sleep=0.1, steps=100)
src2 = request_handler.new_actor_wargs(rt, 'std.CountTimer', 'src2', sleep=1.0, steps=10)
request_handler.connect(rt, snk1, 'token', peer_id, alt, 'token')
request_handler.connect(peer, snk2, 'token', id_, src1, 'integer')
request_handler.connect(peer, alt, 'token_1', id_, src1, 'integer')
request_handler.connect(peer, alt, 'token_2', id_, src2, 'integer')
time.sleep(2)
request_handler.disable(rt, src1)
request_handler.disable(rt, src2)
time.sleep(0.2) # HACK
def _d():
for i in range(1,100):
yield i
yield i
expected = list(_d())
actual = actual_tokens(rt, snk1)
assert(len(actual) > 1)
self.assertListPrefix(expected, actual)
expected = range(1, 100)
actual = actual_tokens(peer, snk2)
assert(len(actual) > 1)
self.assertListPrefix(expected, actual)
request_handler.delete_actor(rt, snk1)
request_handler.delete_actor(peer, snk2)
request_handler.delete_actor(peer, alt)
request_handler.delete_actor(rt, src1)
request_handler.delete_actor(rt, src2)
@pytest.mark.essential
@pytest.mark.slow
class TestActorMigration(CalvinTestBase):
def testOutPortRemoteToLocalMigration(self):
"""Testing outport remote to local migration"""
rt = self.runtime
id_ = rt.id
peer = self.runtimes[0]
peer_id = peer.id
snk = request_handler.new_actor_wargs(rt, 'io.StandardOut', 'snk', store_tokens=1)
sum_ = request_handler.new_actor(peer, 'std.Sum', 'sum')
src = request_handler.new_actor(rt, 'std.CountTimer', 'src')
request_handler.connect(rt, snk, 'token', peer_id, sum_, 'integer')
request_handler.connect(peer, sum_, 'integer', id_, src, 'integer')
time.sleep(0.27)
actual_1 = actual_tokens(rt, snk)
request_handler.migrate(rt, src, peer_id)
time.sleep(0.2)
expected = expected_tokens(peer, src, 'std.Sum')
actual = actual_tokens(rt, snk)
assert(len(actual) > 1)
assert(len(actual) > len(actual_1))
self.assertListPrefix(expected, actual)
request_handler.delete_actor(rt, snk)
request_handler.delete_actor(peer, sum_)
request_handler.delete_actor(peer, src)
def testOutPortLocalToRemoteMigration(self):
"""Testing outport local to remote migration"""
rt = self.runtime
id_ = rt.id
peer = self.runtimes[0]
peer_id = peer.id
snk = request_handler.new_actor_wargs(rt, 'io.StandardOut', 'snk', store_tokens=1)
sum_ = request_handler.new_actor(peer, 'std.Sum', 'sum')
src = request_handler.new_actor(peer, 'std.CountTimer', 'src')
request_handler.connect(rt, snk, 'token', peer_id, sum_, 'integer')
request_handler.connect(peer, sum_, 'integer', peer_id, src, 'integer')
time.sleep(0.27)
actual_1 = actual_tokens(rt, snk)
request_handler.migrate(peer, src, id_)
time.sleep(0.2)
expected = expected_tokens(rt, src, 'std.Sum')
actual = actual_tokens(rt, snk)
assert(len(actual) > 1)
assert(len(actual) > len(actual_1))
self.assertListPrefix(expected, actual)
request_handler.delete_actor(rt, snk)
request_handler.delete_actor(peer, sum_)
request_handler.delete_actor(rt, src)
def testOutPortLocalRemoteRepeatedMigration(self):
"""Testing outport local to remote migration and revers repeatedly"""
rt = self.runtime
id_ = rt.id
peer = self.runtimes[0]
peer_id = peer.id
snk = request_handler.new_actor_wargs(rt, 'io.StandardOut', 'snk', store_tokens=1)
sum_ = request_handler.new_actor(peer, 'std.Sum', 'sum')
src = request_handler.new_actor(peer, 'std.CountTimer', 'src')
request_handler.connect(rt, snk, 'token', peer_id, sum_, 'integer')
request_handler.connect(peer, sum_, 'integer', peer_id, src, 'integer')
time.sleep(0.27)
actual_x = []
actual_1 = actual_tokens(rt, snk)
for i in range(5):
if i % 2 == 0:
request_handler.migrate(peer, src, id_)
else:
request_handler.migrate(rt, src, peer_id)
time.sleep(0.2)
actual_x_ = actual_tokens(rt, snk)
assert(len(actual_x_) > len(actual_x))
actual_x = actual_x_
expected = expected_tokens(rt, src, 'std.Sum')
actual = actual_tokens(rt, snk)
assert(len(actual) > 1)
assert(len(actual) > len(actual_1))
self.assertListPrefix(expected, actual)
request_handler.delete_actor(rt, snk)
request_handler.delete_actor(peer, sum_)
request_handler.delete_actor(rt, src)
def testInOutPortRemoteToLocalMigration(self):
"""Testing out- and inport remote to local migration"""
rt = self.runtime
id_ = rt.id
peer = self.runtimes[0]
peer_id = peer.id
snk = request_handler.new_actor_wargs(rt, 'io.StandardOut', 'snk', store_tokens=1)
sum_ = request_handler.new_actor(peer, 'std.Sum', 'sum')
src = request_handler.new_actor(rt, 'std.CountTimer', 'src')
request_handler.connect(rt, snk, 'token', peer_id, sum_, 'integer')
request_handler.connect(peer, sum_, 'integer', id_, src, 'integer')
time.sleep(0.27)
actual_1 = actual_tokens(rt, snk)
request_handler.migrate(peer, sum_, id_)
time.sleep(0.2)
expected = expected_tokens(rt, src, 'std.Sum')
actual = actual_tokens(rt, snk)
assert(len(actual) > 1)
assert(len(actual) > len(actual_1))
self.assertListPrefix(expected, actual)
request_handler.delete_actor(rt, snk)
request_handler.delete_actor(rt, sum_)
request_handler.delete_actor(rt, src)
def testInOutPortLocalRemoteRepeatedMigration(self):
"""Testing outport local to remote migration and revers repeatedly"""
rt = self.runtime
id_ = rt.id
peer = self.runtimes[0]
peer_id = peer.id
snk = request_handler.new_actor_wargs(rt, 'io.StandardOut', 'snk', store_tokens=1)
sum_ = request_handler.new_actor(rt, 'std.Sum', 'sum')
src = request_handler.new_actor(rt, 'std.CountTimer', 'src')
request_handler.connect(rt, snk, 'token', id_, sum_, 'integer')
request_handler.connect(rt, sum_, 'integer', id_, src, 'integer')
time.sleep(0.27)
actual_x = []
actual_1 = actual_tokens(rt, snk)
for i in range(5):
if i % 2 == 0:
request_handler.migrate(rt, sum_, peer_id)
else:
request_handler.migrate(peer, sum_, id_)
time.sleep(0.2)
actual_x_ = actual_tokens(rt, snk)
assert(len(actual_x_) > len(actual_x))
actual_x = actual_x_
expected = expected_tokens(rt, src, 'std.Sum')
actual = actual_tokens(rt, snk)
assert(len(actual) > 1)
assert(len(actual) > len(actual_1))
self.assertListPrefix(expected, actual)
request_handler.delete_actor(rt, snk)
request_handler.delete_actor(peer, sum_)
request_handler.delete_actor(rt, src)
def testInOutPortLocalToRemoteMigration(self):
"""Testing out- and inport local to remote migration"""
rt = self.runtime
id_ = rt.id
peer = self.runtimes[0]
peer_id = peer.id
snk = request_handler.new_actor_wargs(rt, 'io.StandardOut', 'snk', store_tokens=1)
sum_ = request_handler.new_actor(rt, 'std.Sum', 'sum')
src = request_handler.new_actor(rt, 'std.CountTimer', 'src')
request_handler.connect(rt, snk, 'token', id_, sum_, 'integer')
request_handler.connect(rt, sum_, 'integer', id_, src, 'integer')
time.sleep(0.27)
actual_1 = actual_tokens(rt, snk)
request_handler.migrate(rt, sum_, peer_id)
time.sleep(0.2)
expected = expected_tokens(rt, src, 'std.Sum')
actual = actual_tokens(rt, snk)
assert(len(actual) > 1)
assert(len(actual) > len(actual_1))
self.assertListPrefix(expected, actual)
request_handler.delete_actor(rt, snk)
request_handler.delete_actor(peer, sum_)
request_handler.delete_actor(rt, src)
def testInOutPortRemoteToRemoteMigration(self):
"""Testing out- and inport remote to remote migration"""
rt = self.runtime
id_ = rt.id
peer0 = self.runtimes[0]
peer0_id = peer0.id
peer1 = self.runtimes[1]
peer1_id = peer1.id
time.sleep(0.5)
snk = request_handler.new_actor_wargs(rt, 'io.StandardOut', 'snk', store_tokens=1)
sum_ = request_handler.new_actor(peer0, 'std.Sum', 'sum')
src = request_handler.new_actor(rt, 'std.CountTimer', 'src')
request_handler.connect(rt, snk, 'token', peer0_id, sum_, 'integer')
time.sleep(0.5)
request_handler.connect(peer0, sum_, 'integer', id_, src, 'integer')
time.sleep(0.5)
actual_1 = actual_tokens(rt, snk)
request_handler.migrate(peer0, sum_, peer1_id)
time.sleep(0.5)
expected = expected_tokens(rt, src, 'std.Sum')
actual = actual_tokens(rt, snk)
assert(len(actual) > 1)
assert(len(actual) > len(actual_1))
self.assertListPrefix(expected, actual)
request_handler.delete_actor(rt, snk)
request_handler.delete_actor(peer1, sum_)
request_handler.delete_actor(rt, src)
def testExplicitStateMigration(self):
"""Testing migration of explicit state handling"""
rt = self.runtime
id_ = rt.id
peer0 = self.runtimes[0]
peer0_id = peer0.id
peer1 = self.runtimes[1]
peer1_id = peer1.id
snk = request_handler.new_actor_wargs(peer0, 'io.StandardOut', 'snk', store_tokens=1)
wrapper = request_handler.new_actor(rt, 'misc.ExplicitStateExample', 'wrapper')
src = request_handler.new_actor(rt, 'std.CountTimer', 'src')
request_handler.connect(peer0, snk, 'token', id_, wrapper, 'token')
request_handler.connect(rt, wrapper, 'token', id_, src, 'integer')
time.sleep(0.3)
actual_1 = actual_tokens(peer0, snk)
request_handler.migrate(rt, wrapper, peer0_id)
time.sleep(0.3)
actual = actual_tokens(peer0, snk)
expected = [u'((( 1 )))', u'((( 2 )))', u'((( 3 )))', u'((( 4 )))', u'((( 5 )))', u'((( 6 )))', u'((( 7 )))', u'((( 8 )))']
assert(len(actual) > 1)
assert(len(actual) > len(actual_1))
self.assertListPrefix(expected, actual)
request_handler.delete_actor(peer0, snk)
request_handler.delete_actor(peer0, wrapper)
request_handler.delete_actor(rt, src)
@pytest.mark.essential
@pytest.mark.slow
class TestCalvinScript(CalvinTestBase):
def testCompileSimple(self):
rt = self.runtime
script = """
src : std.CountTimer()
snk : io.StandardOut(store_tokens=1)
src.integer > snk.token
"""
app_info, errors, warnings = compiler.compile(script, "simple")
d = deployer.Deployer(rt, app_info)
d.deploy() # ignoring app_id here
time.sleep(0.5)
src = d.actor_map['simple:src']
snk = d.actor_map['simple:snk']
request_handler.disconnect(rt, src)
actual = actual_tokens(rt, snk)
expected = expected_tokens(rt, src, 'std.CountTimer')
self.assertListPrefix(expected, actual)
d.destroy()
def testDestroyAppWithLocalActors(self):
rt = self.runtime
script = """
src : std.CountTimer()
snk : io.StandardOut(store_tokens=1)
src.integer > snk.token
"""
app_info, errors, warnings = compiler.compile(script, "simple")
d = deployer.Deployer(rt, app_info)
app_id = d.deploy()
time.sleep(0.2)
src = d.actor_map['simple:src']
snk = d.actor_map['simple:snk']
applications = request_handler.get_applications(rt)
assert app_id in applications
d.destroy()
applications = request_handler.get_applications(rt)
assert app_id not in applications
actors = request_handler.get_actors(rt)
assert src not in actors
assert snk not in actors
def testDestroyAppWithMigratedActors(self):
rt = self.runtime
rt1 = self.runtimes[0]
rt2 = self.runtimes[1]
script = """
src : std.CountTimer()
snk : io.StandardOut(store_tokens=1)
src.integer > snk.token
"""
app_info, errors, warnings = compiler.compile(script, "simple")
d = deployer.Deployer(rt, app_info)
app_id = d.deploy()
time.sleep(1.0)
src = d.actor_map['simple:src']
snk = d.actor_map['simple:snk']
# FIXME --> remove when operating on closed pending connections during migration is fixed
request_handler.disable(rt, src)
request_handler.disable(rt, snk)
# <--
request_handler.migrate(rt, snk, rt1.id)
request_handler.migrate(rt, src, rt2.id)
applications = request_handler.get_applications(rt)
assert app_id in applications
d.destroy()
for retry in range(1, 5):
applications = request_handler.get_applications(rt)
if app_id in applications:
print("Retrying in %s" % (retry * 0.2, ))
time.sleep(0.2 * retry)
else :
break
assert app_id not in applications
for retry in range(1, 5):
actors = []
actors.extend(request_handler.get_actors(rt))
actors.extend(request_handler.get_actors(rt1))
actors.extend(request_handler.get_actors(rt2))
intersection = [a for a in actors if a in d.actor_map.values()]
if len(intersection) > 0:
print("Retrying in %s" % (retry * 0.2, ))
time.sleep(0.2 * retry)
else:
break
for actor in d.actor_map.values():
assert actor not in actors
```
#### File: calvin/tests/test_calvinscript.py
```python
from calvin.csparser.parser import calvin_parser
from calvin.csparser.analyzer import generate_app_info
from calvin.csparser.checker import check
import unittest
import json
import difflib
import pytest
def absolute_filename(filename):
import os.path
return os.path.join(os.path.dirname(__file__), filename)
class CalvinTestBase(unittest.TestCase):
def setUp(self):
self.test_script_dir = absolute_filename('scripts/')
def tearDown(self):
pass
def _read_file(self, file):
try:
with open(file, 'r') as source:
text = source.read()
except Exception as e:
print "Error: Could not read file: '%s'" % file
raise e
return text
def _format_unexpected_error_message(self, errors):
msg_list = ["Expected empty error, not {0}".format(err) for err in errors]
return '\n'.join(msg_list)
def invoke_parser(self, test, source_text=None):
if not source_text:
test = self.test_script_dir + test + '.calvin'
source_text = self._read_file(test)
return calvin_parser(source_text, test)
def invoke_parser_assert_syntax(self, test, source_text=None):
"""Verify that the source is free from syntax errors and return parser output"""
result, errors, warnings = self.invoke_parser(test, source_text)
self.assertFalse(errors, self._format_unexpected_error_message(errors))
return result
def assert_script(self, test):
"""Check parsing of script against a reference result"""
result = self.invoke_parser_assert_syntax(test)
ref_file = self.test_script_dir + test + '.ref'
reference = self._read_file(ref_file)
# Canonical form
sorted_result = json.dumps(result, indent=4, sort_keys=True)
sorted_result = "\n".join([line for line in sorted_result.splitlines() if "sourcefile" not in line])
reference = "\n".join([line for line in reference.splitlines() if "sourcefile" not in line])
diff_lines = difflib.unified_diff(sorted_result.splitlines(), reference.splitlines())
diff = '\n'.join(diff_lines)
self.assertFalse(diff, diff)
class CalvinScriptParserTest(CalvinTestBase):
"""Test the CalvinScript parser"""
def testSimpleStructure(self):
"""Basic sanity check"""
self.assert_script('test1')
def testComplexScript(self):
self.assert_script('test9')
def testComponentDefinitions(self):
self.assert_script('test8')
def testSyntaxError(self):
"""Check syntax error output"""
test = 'test10'
result, errors, warnings = self.invoke_parser(test)
self.assertEqual(errors[0], {'reason': 'Syntax error.', 'line': 6, 'col': 2})
class CalvinScriptAnalyzerTest(CalvinTestBase):
"""Test the CalvinsScript analyzer"""
def assert_app_info(self, test, app_info):
"""Check app_info against a reference result"""
ref_file = self.test_script_dir + test + '.app_info'
reference = self._read_file(ref_file)
# Canonical form
sorted_app_info = json.dumps(app_info, indent=4, sort_keys=True)
diff_lines = difflib.unified_diff(sorted_app_info.splitlines(), reference.splitlines())
diff = '\n'.join(diff_lines)
self.assertFalse(diff, diff)
def testSimpleScript(self):
test = 'test9'
# First make sure result below is error-free
result = self.invoke_parser_assert_syntax(test)
app_info = generate_app_info(result)
self.assert_app_info(test, app_info)
def testMissingActor(self):
script = """a:std.NotLikely()"""
result = self.invoke_parser_assert_syntax('inline', script)
app_info = generate_app_info(result)
self.assertFalse(app_info['valid'])
class CalvinScriptCheckerTest(CalvinTestBase):
"""Test the CalvinsScript checker"""
def testCheckSimpleScript(self):
script = """
a:Foo()
b:Bar()
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertTrue(errors)
def testCheckLocalComponent(self):
script = """
component Foo() -> out {
f:std.CountTimer()
f.integer > .out
}
a:Foo()
b:io.StandardOut()
a.out > b.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertFalse(errors, '\n'.join([str(error) for error in errors]))
self.assertFalse(warnings, '\n'.join([str(warning) for warning in warnings]))
def testCheckOutportConnections(self):
script = """
a:std.CountTimer()
b:std.CountTimer()
c:io.StandardOut()
a.integer > c.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(errors[0]['reason'], "Actor b (std.CountTimer) is missing connection to outport 'integer'")
self.assertFalse(warnings)
def testCheckInportConnections(self):
script = """
c:io.StandardOut()
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(errors[0]['reason'], "Missing connection to inport 'c.token'")
self.assertFalse(warnings)
def testCheckInportConnections(self):
script = """
a:std.CountTimer()
b:std.CountTimer()
c:io.StandardOut()
a.integer > c.token
b.integer > c.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 2)
self.assertEqual(errors[0]['reason'], "Actor c (io.StandardOut) has multiple connections to inport 'token'")
self.assertFalse(warnings)
def testBadComponent1(self):
script = """
component Foo() -> out {
a:std.CountTimer()
b:std.CountTimer()
a.integer > .out
}
a:Foo()
b:io.StandardOut()
a.out > b.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]['reason'], "Actor b (std.CountTimer) is missing connection to outport 'integer'")
self.assertFalse(warnings)
def testBadComponent2(self):
script = """
component Foo() -> out {
a:std.CountTimer()
b:io.StandardOut()
a.integer > b.token
}
a:Foo()
b:io.StandardOut()
a.out > b.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]['reason'], "Component Foo is missing connection to outport 'out'")
self.assertFalse(warnings)
def testBadComponent3(self):
script = """
component Foo() -> out {
a:std.CountTimer()
a.integer > .out
a.integer > .out
}
a:Foo()
b:io.StandardOut()
a.out > b.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 2)
self.assertEqual(errors[0]['reason'], "Component Foo has multiple connections to outport 'out'")
self.assertEqual(errors[1]['reason'], "Component Foo has multiple connections to outport 'out'")
self.assertFalse(warnings)
def testBadComponent4(self):
script = """
component Foo() in -> {
a:io.StandardOut()
}
b:Foo()
a:std.CountTimer()
a.integer > b.in
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 2)
self.assertEqual(errors[0]['reason'], "Component Foo is missing connection to inport 'in'")
self.assertEqual(errors[1]['reason'], "Actor a (io.StandardOut) is missing connection to inport 'token'")
self.assertFalse(warnings)
def testBadComponent5(self):
script = """
component Foo() in -> {
a:io.StandardOut()
.foo > a.token
}
b:Foo()
a:std.CountTimer()
a.integer > b.in
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 2)
self.assertEqual(errors[0]['reason'], "Component Foo has no inport 'foo'")
self.assertEqual(errors[1]['reason'], "Component Foo is missing connection to inport 'in'")
self.assertEqual(len(warnings), 0)
def testBadComponent6(self):
script = """
component Foo() -> out {
a:std.CountTimer()
a.integer > .foo
}
b:Foo()
a:io.StandardOut()
b.out > a.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 2)
self.assertEqual(errors[0]['reason'], "Component Foo has no outport 'foo'")
self.assertEqual(errors[1]['reason'], "Component Foo is missing connection to outport 'out'")
self.assertEqual(len(warnings), 0)
def testBadComponent7(self):
script = """
component Foo() in -> out {
.in > .out
}
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]['reason'], "Component Foo passes port 'in' directly to port 'out'")
self.assertEqual(len(warnings), 0)
def testUndefinedActors(self):
script = """
a.token > b.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 2)
self.assertEqual(errors[0]['reason'], "Undefined actor: 'a'")
self.assertEqual(errors[1]['reason'], "Undefined actor: 'b'")
def testUndefinedArguments(self):
script = """
a:std.Constant()
b:io.StandardOut()
a.token > b.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]['reason'], "Missing argument: 'data'")
def testComponentUndefinedArgument(self):
script = """
component Foo(file) in -> {
a:io.StandardOut()
.in > a.token
}
b:Foo()
a:std.CountTimer()
a.integer > b.in
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 2)
self.assertEqual(errors[0]['reason'], "Unused argument: 'file'")
self.assertEqual(errors[1]['reason'], "Missing argument: 'file'")
def testComponentUnusedArgument(self):
script = """
component Foo(file) in -> {
a:io.StandardOut()
.in > a.token
}
b:Foo(file="Foo.txt")
a:std.CountTimer()
a.integer > b.in
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 1)
self.assertEqual(len(warnings), 0)
self.assertEqual(errors[0]['reason'], "Unused argument: 'file'")
def testLocalComponentRecurse(self):
script = """
component E() in -> out {
f:std.Identity()
.in > f.token
f.token > .out
}
component B() in -> out {
e:E()
.in > e.in
e.out > .out
}
a:std.Counter()
b:B()
c:io.StandardOut()
a.integer > b.in
b.out > c.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 0)
self.assertEqual(len(warnings), 0)
@pytest.mark.xfail(reason="Since component def is now a dict, order is not preserved. Needs fix.")
def testLocalComponentBad(self):
script = """
component B() in -> out {
e:E()
.in > e.in
e.out > .out
}
component E() in -> out {
f:std.Identity()
.in > f.token
f.token > .out
}
a:std.Counter()
b:B()
c:io.StandardOut()
a.integer > b.in
b.out > c.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]['reason'], "Unknown actor type: 'E'")
self.assertEqual(len(warnings), 0)
def testNoSuchPort(self):
script = """
i:std.Identity()
src:std.CountTimer()
dst:io.StandardOut()
src.integer > i.foo
i.bar > dst.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 4)
self.assertEqual(errors[0]['reason'], "Actor i (std.Identity) has no inport 'foo'")
self.assertEqual(errors[1]['reason'], "Actor i (std.Identity) has no outport 'bar'")
self.assertEqual(errors[2]['reason'], "Actor i (std.Identity) is missing connection to inport 'token'")
self.assertEqual(errors[3]['reason'], "Actor i (std.Identity) is missing connection to outport 'token'")
self.assertEqual(len(warnings), 0)
@pytest.mark.xfail()
def testRedfineInstance(self):
script = """
i:std.Identity()
src:std.CountTimer()
dst:io.StandardOut()
i:std.RecTimer()
src.integer > i.token
i.token > dst.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
print errors
self.assertEqual(len(errors), 1)
self.assertEqual(len(warnings), 0)
def testUndefinedActorInComponent(self):
script = """
component Bug() -> out {
b.out > .out
}
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
print errors
self.assertEqual(len(errors), 1)
self.assertEqual(len(warnings), 0)
class CalvinScriptDefinesTest(CalvinTestBase):
"""Test CalvinsScript defines"""
def testUndefinedConstant(self):
script = """
src : std.Constant(data=FOO)
snk : io.StandardOut()
src.token > snk.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
print errors
self.assertEqual(len(errors), 1)
self.assertEqual(len(warnings), 0)
self.assertEqual(errors[0]['reason'], "Undefined identifier: 'FOO'")
def testDefinedConstant(self):
script = """
define FOO = 42
src : std.Constant(data=FOO)
snk : io.StandardOut()
src.token > snk.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
print errors
self.assertEqual(len(errors), 0)
self.assertEqual(len(warnings), 0)
@pytest.mark.xfail()
def testUndefinedRecursiveConstant(self):
script = """
define FOO = BAR
src : std.Constant(data=FOO)
snk : io.StandardOut()
src.token > snk.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
print errors
self.assertEqual(len(errors), 1)
self.assertEqual(len(warnings), 0)
self.assertEqual(errors[0]['reason'], "Undefined identifier: 'FOO'")
def testDefinedRecursiveConstant(self):
script = """
define FOO = BAR
define BAR = 42
src : std.Constant(data=FOO)
snk : io.StandardOut()
src.token > snk.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
print errors
self.assertEqual(len(errors), 0)
self.assertEqual(len(warnings), 0)
def testLiteralOnPort(self):
script = """
snk : io.StandardOut()
42 > snk.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
print errors
self.assertEqual(len(errors), 0)
self.assertEqual(len(warnings), 0)
@pytest.mark.xfail()
def testComponentArgumentOnPort(self):
script = """
component Foo(foo) -> out {
foo > .out
}
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
print errors
self.assertEqual(len(errors), 0)
self.assertEqual(len(warnings), 0)
@pytest.mark.xfail()
def testBadLocalPort(self):
script = """
component Foo() in -> {
snk : io.StandardOut()
.in > snk.token
}
src : std.Counter()
src.integer > .in
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
print errors
self.assertNotEqual(len(errors), 0)
self.assertEqual(len(warnings), 0)
```
#### File: calvin/tests/test_calvin_securedht.py
```python
import unittest
import time
import copy
import multiprocessing
import pytest
import socket
import os
import shutil
import json
from calvin.requests.request_handler import RequestHandler, RT
from calvin.utilities.nodecontrol import dispatch_node, dispatch_storage_node
from calvin.utilities.attribute_resolver import format_index_string
from calvin.utilities import certificate
from calvin.utilities import calvinlogger
from calvin.utilities import calvinconfig
from calvin.utilities import calvinuuid
_log = calvinlogger.get_logger(__name__)
_conf = calvinconfig.get()
request_handler = RequestHandler()
try:
ip_addr = os.environ["CALVIN_TEST_LOCALHOST"]
except:
ip_addr = socket.gethostbyname(socket.gethostname())
rt1 = None
rt2 = None
rt3 = None
rt1_id = None
rt2_id = None
rt3_id = None
test_script_dir = None
def absolute_filename(filename):
import os.path
return os.path.join(os.path.dirname(__file__), filename)
@pytest.mark.slow
class TestSecureDht(unittest.TestCase):
@pytest.fixture(autouse=True, scope="class")
def setup(self, request):
from calvin.Tools.csruntime import csruntime
from conftest import _config_pytest
homefolder = os.getenv("HOME")
domain = "rttest"
configdir = os.path.join(homefolder, ".calvin",
"security", domain)
try:
shutil.rmtree(configdir)
except:
pass
print "Trying to create a new test domain configuration."
testconfig = certificate.Config(domain=domain)
print "Reading configuration successfull."
print "Creating new domain."
certificate.new_domain(testconfig)
print "Created new domain."
# Now handled within runtime
#for i in range(3):
# name = "++++node{}".format(i)
# nodeid = calvinuuid.uuid("NODE")
# certreq = certificate.new_runtime(testconfig, name, nodeid=nodeid)
# certificate.sign_req(testconfig, os.path.basename(certreq), name)
global rt1
global rt2
global rt3
global test_script_dir
rt_conf = copy.deepcopy(_conf)
rt_conf.set('global', 'storage_type', 'securedht')
rt_conf.add_section('security')
rt_conf.set('security', "certificate_conf", None)
rt_conf.set('security', "certificate_domain", domain)
rt_conf.save("/tmp/calvin500x.conf")
try:
logfile = _config_pytest.getoption("logfile")+"5000"
outfile = os.path.join(os.path.dirname(logfile), os.path.basename(logfile).replace("log", "out"))
if outfile == logfile:
outfile = None
except:
logfile = None
outfile = None
csruntime(ip_addr, port=5000, controlport=5003, attr={'indexed_public':
{'owner':{'organization': 'org.testexample', 'personOrGroup': 'testOwner1'},
'node_name': {'name': 'node0'},
'address': {'country': 'SE', 'locality': 'testCity', 'street': 'testStreet', 'streetNumber': 1}}},
loglevel=_config_pytest.getoption("loglevel"), logfile=logfile, outfile=outfile,
configfile="/tmp/calvin500x.conf")
rt1 = RT("http://%s:5003" % ip_addr)
try:
logfile = _config_pytest.getoption("logfile")+"5001"
outfile = os.path.join(os.path.dirname(logfile), os.path.basename(logfile).replace("log", "out"))
if outfile == logfile:
outfile = None
except:
logfile = None
outfile = None
csruntime(ip_addr, port=5001, controlport=5004, attr={'indexed_public':
{'owner':{'organization': 'org.testexample', 'personOrGroup': 'testOwner1'},
'node_name': {'name': 'node1'},
'address': {'country': 'SE', 'locality': 'testCity', 'street': 'testStreet', 'streetNumber': 1}}},
loglevel=_config_pytest.getoption("loglevel"), logfile=logfile, outfile=outfile,
configfile="/tmp/calvin500x.conf")
rt2 = RT("http://%s:5004" % ip_addr)
try:
logfile = _config_pytest.getoption("logfile")+"5002"
outfile = os.path.join(os.path.dirname(logfile), os.path.basename(logfile).replace("log", "out"))
if outfile == logfile:
outfile = None
except:
logfile = None
outfile = None
csruntime(ip_addr, port=5002, controlport=5005, attr={'indexed_public':
{'owner':{'organization': 'org.testexample', 'personOrGroup': 'testOwner1'},
'node_name': {'name': 'node2'},
'address': {'country': 'SE', 'locality': 'testCity', 'street': 'testStreet', 'streetNumber': 1}}},
loglevel=_config_pytest.getoption("loglevel"), logfile=logfile, outfile=outfile,
configfile="/tmp/calvin500x.conf")
rt3 = RT("http://%s:5005" % ip_addr)
test_script_dir = absolute_filename('scripts/')
request.addfinalizer(self.teardown)
def teardown(self):
global rt1
global rt2
global rt3
request_handler.quit(rt1)
request_handler.quit(rt2)
request_handler.quit(rt3)
time.sleep(0.2)
for p in multiprocessing.active_children():
p.terminate()
# They will die eventually (about 5 seconds) in most cases, but this makes sure without wasting time
os.system("pkill -9 -f -l 'csruntime -n %s -p 5000'" % (ip_addr,))
os.system("pkill -9 -f -l 'csruntime -n %s -p 5001'" % (ip_addr,))
os.system("pkill -9 -f -l 'csruntime -n %s -p 5002'" % (ip_addr,))
time.sleep(0.2)
def verify_storage(self):
global rt1
global rt2
global rt3
global rt1_id
global rt2_id
global rt3_id
rt1_id = None
rt2_id = None
rt3_id = None
failed = True
# Try 30 times waiting for control API to be up and running
for i in range(30):
try:
rt1_id = rt1_id or request_handler.get_node_id(rt1)
rt2_id = rt2_id or request_handler.get_node_id(rt2)
rt3_id = rt3_id or request_handler.get_node_id(rt3)
failed = False
break
except:
time.sleep(0.1)
assert not failed
assert rt1_id
assert rt2_id
assert rt3_id
print "RUNTIMES:", rt1_id, rt2_id, rt3_id
_log.analyze("TESTRUN", "+ IDS", {'waited': 0.1*i})
failed = True
# Try 30 times waiting for storage to be connected
caps1 = []
caps2 = []
caps3 = []
rt_ids = set([rt1_id, rt2_id, rt3_id])
for i in range(30):
try:
if not (rt1_id in caps1 and rt2_id in caps1 and rt3_id in caps1):
caps1 = request_handler.get_index(rt1, "node/capabilities/calvinsys.native.python-json")['result']
if not (rt1_id in caps2 and rt2_id in caps2 and rt3_id in caps2):
caps2 = request_handler.get_index(rt2, "node/capabilities/calvinsys.native.python-json")['result']
if not (rt1_id in caps3 and rt2_id in caps3 and rt3_id in caps3):
caps3 = request_handler.get_index(rt3, "node/capabilities/calvinsys.native.python-json")['result']
if rt_ids <= set(caps1) and rt_ids <= set(caps2) and rt_ids <= set(caps3):
failed = False
break
else:
time.sleep(0.1)
except:
time.sleep(0.1)
assert not failed
_log.analyze("TESTRUN", "+ STORAGE", {'waited': 0.1*i})
# Now check for the values needed by this specific test
caps = request_handler.get_index(rt1, 'node/capabilities/calvinsys.events.timer')
assert rt1_id in caps['result']
_log.analyze("TESTRUN", "+ RT1 CAPS", {})
caps = request_handler.get_index(rt2, 'node/capabilities/calvinsys.events.timer')
assert rt1_id in caps['result']
_log.analyze("TESTRUN", "+ RT2 CAPS", {})
assert request_handler.get_index(rt1, format_index_string(['node_name', {'name': 'node2'}]))
_log.analyze("TESTRUN", "+ RT1 INDEX", {})
assert request_handler.get_index(rt2, format_index_string(['node_name', {'name': 'node1'}]))
_log.analyze("TESTRUN", "+ RT2 INDEX", {})
@pytest.mark.slow
def testSecureDHTVerifyStorage(self):
_log.analyze("TESTRUN", "+", {})
self.verify_storage()
"""
@pytest.mark.slow
def testDeployStillShadow(self):
_log.analyze("TESTRUN", "+", {})
global rt1
global rt2
global rt3
global rt1_id
global rt2_id
global rt3_id
global test_script_dir
self.verify_storage()
from calvin.Tools.cscontrol import control_deploy as deploy_app
from collections import namedtuple
DeployArgs = namedtuple('DeployArgs', ['node', 'attr', 'script','reqs', 'check'])
args = DeployArgs(node='http://%s:5004' % ip_addr,
script=open(test_script_dir+"test_shadow1.calvin"), attr=None,
reqs=None, check=False)
result = {}
try:
result = deploy_app(args)
except:
raise Exception("Failed deployment of app %s, no use to verify if requirements fulfilled" % args.script.name)
#print "RESULT:", result
assert result['requirements_fulfilled']
time.sleep(1)
request_handler.migrate(rt2, result['actor_map']['test_shadow1:snk'], rt1_id)
time.sleep(1)
actors = [request_handler.get_actors(rt1), request_handler.get_actors(rt2), request_handler.get_actors(rt3)]
# src -> rt2, sum -> rt2, snk -> rt1
assert result['actor_map']['test_shadow1:src'] in actors[1]
assert result['actor_map']['test_shadow1:sum'] in actors[1]
assert result['actor_map']['test_shadow1:snk'] in actors[0]
actual = request_handler.report(rt1, result['actor_map']['test_shadow1:snk'])
assert len(actual) == 0
request_handler.migrate(rt2, result['actor_map']['test_shadow1:src'], rt3_id)
time.sleep(1)
actors = [request_handler.get_actors(rt1), request_handler.get_actors(rt2), request_handler.get_actors(rt3)]
# src -> rt3, sum -> rt2, snk -> rt1
assert result['actor_map']['test_shadow1:src'] in actors[2]
assert result['actor_map']['test_shadow1:sum'] in actors[1]
assert result['actor_map']['test_shadow1:snk'] in actors[0]
actual = request_handler.report(rt1, result['actor_map']['test_shadow1:snk'])
assert len(actual) == 0
request_handler.migrate(rt3, result['actor_map']['test_shadow1:src'], rt1_id)
time.sleep(1)
actors = [request_handler.get_actors(rt1), request_handler.get_actors(rt2), request_handler.get_actors(rt3)]
# src -> rt1, sum -> rt2, snk -> rt1
assert result['actor_map']['test_shadow1:src'] in actors[0]
assert result['actor_map']['test_shadow1:sum'] in actors[1]
assert result['actor_map']['test_shadow1:snk'] in actors[0]
actual = request_handler.report(rt1, result['actor_map']['test_shadow1:snk'])
assert len(actual) > 3
request_handler.delete_application(rt2, result['application_id'])
@pytest.mark.slow
def testDeployFailReqs(self):
_log.analyze("TESTRUN", "+", {})
global rt1
global rt2
global rt3
global rt1_id
global rt2_id
global rt3_id
global test_script_dir
self.verify_storage()
from calvin.Tools.cscontrol import control_deploy as deploy_app
from collections import namedtuple
DeployArgs = namedtuple('DeployArgs', ['node', 'attr', 'script','reqs', 'check'])
args = DeployArgs(node='http://%s:5004' % ip_addr,
script=open(test_script_dir+"test_shadow1.calvin"), attr=None,
reqs=test_script_dir+"test_shadow6.deployjson", check=False)
result = {}
try:
result = deploy_app(args)
except:
raise Exception("Failed deployment of app %s, no use to verify if requirements fulfilled" % args.script.name)
#print "RESULT:", result
time.sleep(1)
assert not result['requirements_fulfilled']
request_handler.delete_application(rt2, result['application_id'])
"""
```
#### File: calvin/tests/test_endpoint.py
```python
import pytest
import unittest
from mock import Mock
from calvin.actor.actorport import InPort, OutPort
from calvin.runtime.north.calvin_token import Token
from calvin.runtime.south.endpoint import LocalInEndpoint, LocalOutEndpoint, TunnelInEndpoint, TunnelOutEndpoint
pytestmark = pytest.mark.unittest
class TestLocalEndpoint(unittest.TestCase):
def setUp(self):
self.port = InPort("port", Mock())
self.peer_port = OutPort("peer_port", Mock())
self.local_in = LocalInEndpoint(self.port, self.peer_port)
self.local_out = LocalOutEndpoint(self.peer_port, self.port)
self.port.attach_endpoint(self.local_in)
self.peer_port.attach_endpoint(self.local_out)
def test_is_connected(self):
assert self.local_in.is_connected
assert self.local_out.is_connected
def test_read_token_fixes_fifo_mismatch(self):
self.local_in.fifo_mismatch = True
token = self.local_in.read_token()
assert token is None
assert self.local_in.fifo_mismatch is False
def test_read_token_commits_if_token_is_not_none(self):
self.local_in.port.fifo.commit_reads = Mock()
self.local_out.port.fifo.commit_reads = Mock()
self.local_in.port.fifo.write(1)
assert self.local_in.data_in_local_fifo is True
assert self.local_in.read_token() == 1
assert self.local_in.data_in_local_fifo is True
self.local_in.port.fifo.commit_reads.assert_called_with(self.port.id, True)
self.local_out.port.fifo.write(2)
assert self.local_in.data_in_local_fifo is True
assert self.local_in.read_token() == 2
assert self.local_in.data_in_local_fifo is False
self.local_out.port.fifo.commit_reads.assert_called_with(self.port.id, True)
def test_peek_token(self):
self.local_in.port.fifo.commit_reads = Mock()
self.local_out.port.fifo.commit_reads = Mock()
self.local_in.port.fifo.write(1)
assert self.local_in.peek_token() == 1
assert not self.local_in.port.fifo.commit_reads.called
assert self.local_in.peek_token() is None
self.local_in.peek_rewind()
self.local_in.commit_peek_as_read()
self.local_in.port.fifo.commit_reads.assert_called_with(self.port.id)
self.local_out.port.fifo.commit_reads.assert_called_with(self.port.id)
self.local_in.port.fifo.commit_reads.reset_mock()
self.local_out.port.fifo.commit_reads.reset_mock()
self.local_out.port.fifo.write(2)
assert self.local_in.peek_token() == 2
assert not self.local_out.port.fifo.commit_reads.called
self.local_in.commit_peek_as_read()
assert not self.local_in.port.fifo.commit_reads.called
self.local_out.port.fifo.commit_reads.assert_called_with(self.port.id)
def test_available_tokens(self):
self.local_in.port.fifo.write(1)
self.local_in.port.fifo.write(1)
assert self.local_in.available_tokens() == 2
self.local_out.port.fifo.write(1)
assert self.local_in.available_tokens() == 3
def test_get_peer(self):
assert self.local_in.get_peer() == ('local', self.peer_port.id)
assert self.local_out.get_peer() == ('local', self.port.id)
class TestTunnelEndpoint(unittest.TestCase):
def setUp(self):
self.port = InPort("port", Mock())
self.peer_port = OutPort("peer_port", Mock())
self.tunnel = Mock()
self.trigger_loop = Mock()
self.node_id = 123
self.peer_node_id = 456
self.tunnel_in = TunnelInEndpoint(self.port, self.tunnel, self.peer_node_id, self.peer_port.id, self.trigger_loop)
self.tunnel_out = TunnelOutEndpoint(self.peer_port, self.tunnel, self.node_id, self.port.id, self.trigger_loop)
self.port.attach_endpoint(self.tunnel_in)
self.peer_port.attach_endpoint(self.tunnel_out)
def test_recv_token(self):
expected_reply = {
'cmd': 'TOKEN_REPLY',
'port_id': self.port.id,
'peer_port_id': self.peer_port.id,
'sequencenbr': 0,
'value': 'ACK'
}
payload = {
'port_id': self.port.id,
'peer_port_id': self.peer_port.id,
'sequencenbr': 0,
'token': {'type': 'Token', 'data': 5}
}
self.tunnel_in.recv_token(payload)
assert self.trigger_loop.called
assert self.port.fifo.fifo[0].value == 5
self.tunnel.send.assert_called_with(expected_reply)
self.trigger_loop.reset_mock()
self.tunnel.send.reset_mock()
payload['sequencenbr'] = 100
self.tunnel_in.recv_token(payload)
assert not self.trigger_loop.called
expected_reply['sequencenbr'] = 100
expected_reply['value'] = 'NACK'
self.tunnel.send.assert_called_with(expected_reply)
self.trigger_loop.reset_mock()
self.tunnel.send.reset_mock()
payload['sequencenbr'] = 0
self.tunnel_in.recv_token(payload)
assert not self.trigger_loop.called
expected_reply['sequencenbr'] = 0
expected_reply['value'] = 'ACK'
self.tunnel.send.assert_called_with(expected_reply)
def test_read_token(self):
self.tunnel_in.port.fifo.write(4)
self.tunnel_in.port.fifo.commit_reads = Mock()
assert self.tunnel_in.read_token() == 4
self.tunnel_in.port.fifo.commit_reads.assert_called_with(self.port.id, True)
def test_peek_token(self):
self.tunnel_in.port.fifo.write(4)
assert self.tunnel_in.peek_token() == 4
assert self.tunnel_in.read_token() is None
self.tunnel_in.peek_rewind()
assert self.tunnel_in.read_token() == 4
def test_available_tokens(self):
self.tunnel_in.port.fifo.write(4)
self.tunnel_in.port.fifo.write(5)
assert self.tunnel_in.available_tokens() == 2
def test_get_peer(self):
assert self.tunnel_in.get_peer() == (self.peer_node_id, self.peer_port.id)
assert self.tunnel_out.get_peer() == (self.node_id, self.port.id)
def test_reply(self):
self.tunnel_out.port.fifo.commit_one_read = Mock()
self.tunnel_out.port.write_token(Token(1))
self.tunnel_out._send_one_token()
self.tunnel_out.reply(0, 'ACK')
self.tunnel_out.port.fifo.commit_one_read.assert_called_with(self.port.id, True)
assert self.trigger_loop.called
self.tunnel_out.port.fifo.commit_one_read.reset_mock()
self.tunnel_out.reply(1, 'NACK')
assert not self.tunnel_out.port.fifo.commit_one_read.called
def test_nack_reply(self):
self.tunnel_out.port.write_token(Token(1))
self.tunnel_out._send_one_token()
self.tunnel_out.port.fifo.commit_reads(self.port.id, True)
assert self.tunnel_out.port.fifo.tentative_read_pos[self.port.id] == 1
assert self.tunnel_out.port.fifo.read_pos[self.port.id] == 1
self.tunnel_out.port.write_token(Token(2))
self.tunnel_out.port.write_token(Token(3))
self.tunnel_out._send_one_token()
self.tunnel_out._send_one_token()
assert self.tunnel_out.port.fifo.read_pos[self.port.id] == 1
assert self.tunnel_out.port.fifo.tentative_read_pos[self.port.id] == 3
self.tunnel_out.reply(1, 'NACK')
assert self.tunnel_out.port.fifo.tentative_read_pos[self.port.id] == 1
assert self.tunnel_out.port.fifo.read_pos[self.port.id] == 1
def test_bulk_communicate(self):
self.tunnel_out.port.write_token(Token(1))
self.tunnel_out.port.write_token(Token(2))
self.tunnel_out.bulk = True
self.tunnel_out.communicate()
assert self.tunnel.send.call_count == 2
def test_communicate(self):
self.tunnel_out.port.write_token(Token(1))
self.tunnel_out.port.write_token(Token(2))
self.tunnel_out.bulk = False
assert self.tunnel_out.communicate() is True
assert self.tunnel.send.call_count == 1
assert self.tunnel_out.communicate() is False
self.tunnel_out.reply(1, 'ACK')
assert self.tunnel_out.communicate() is True
assert self.tunnel.send.call_count == 2
```
#### File: calvin/tests/test_fifo.py
```python
import unittest
from calvin.runtime.north import fifo
from calvin.runtime.north.calvin_token import Token
class FifoTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def verify_data(self, write_data, fifo_data):
print write_data, fifo_data
for a, b in zip(write_data, fifo_data):
d = b.value
self.assertEquals(a, d)
def test1(self):
"""Adding reader again (reconnect)"""
f = fifo.FIFO(5)
f.add_reader('p1.id')
data = ['1', '2', '3', '4']
for token in data:
self.assertTrue(f.can_write())
self.assertTrue(f.write(Token(token)))
self.verify_data(['1', '2'], [f.read('p1.id') for _ in range(2)])
f.commit_reads('p1.id', True)
f.add_reader('p1.id')
self.verify_data(['3', '4'], [f.read('p1.id') for _ in range(2)])
self.assertEquals(None, f.read('p1.id'))
f.commit_reads('p1.id', True)
for token in ['5', '6', '7', '8']:
self.assertTrue(f.can_write())
self.assertTrue(f.write(Token(token)))
self.assertFalse(f.can_write())
self.verify_data(['5', '6', '7', '8'], [f.read('p1.id')
for _ in range(4)])
f.commit_reads('p1.id', True)
def test2(self):
"""Multiple readers"""
f = fifo.FIFO(5)
f.add_reader("r1")
f.add_reader("r2")
# Ensure fifo is empty
self.assertEquals(f.read("r1"), None)
self.assertEquals(len(f), 0)
# Add something
self.assertTrue(f.write(Token('1')))
self.assertEquals(len(f), 1)
# Reader r1 read something
self.assertTrue(f.read('r1'))
f.commit_reads('r1')
self.assertEquals([True] * 3, [f.write(Token(t)) for t in ['2', '3', '4']])
self.assertFalse(f.write(Token('5')))
self.verify_data(['2', '3', '4'], [f.read('r1') for _ in range(3)])
f.commit_reads("r1")
# Reader r1 all done, ensure reader r2 can still read
self.assertEquals(len(f), 4)
self.assertFalse(f.can_write())
self.assertTrue(f.can_read("r2"))
self.assertFalse(f.can_read("r1"))
# Reader r2 reads something
self.verify_data(['1', '2', '3'], [f.read("r2") for _ in range(3)])
f.commit_reads("r2")
self.assertEquals(len(f), 1)
self.assertTrue(f.write(Token('5')))
self.verify_data(['4', '5'], [f.read("r2") for _ in range(2)])
self.assertFalse(f.can_read("r2"))
self.assertEquals(None, f.read("r2"))
self.assertTrue(f.can_read("r1"))
self.verify_data(['5'], [f.read("r1")])
f.commit_reads("r2")
f.commit_reads("r1")
self.assertTrue(f.write(Token('6')))
self.assertTrue(f.write(Token('7')))
self.assertTrue(f.write(Token('8')))
self.assertTrue([f.read("r1")
for _ in range(4)], [f.read("r2") for _ in range(4)])
def test3(self):
"""Testing commit reads"""
f = fifo.FIFO(5)
f.add_reader("r1")
for token in ['1', '2', '3', '4']:
self.assertTrue(f.can_write())
self.assertTrue(f.write(Token(token)))
# Fails, fifo full
self.assertFalse(f.can_write())
self.assertFalse(f.write(Token('5')))
# Tentative, fifo still full
self.verify_data(['1'], [f.read("r1")])
self.assertFalse(f.can_write())
self.assertFalse(f.write(Token('5')))
# commit previous reads, fifo 1 pos free
f.commit_reads('r1')
self.assertTrue(f.can_write())
self.assertTrue(f.write(Token('5')))
# fifo full again
self.assertFalse(f.can_write())
self.assertFalse(f.write(Token('5')))
def test4(self):
"""Testing rollback reads"""
f = fifo.FIFO(5)
f.add_reader('r1')
for token in ['1', '2', '3', '4']:
self.assertTrue(f.can_write())
self.assertTrue(f.write(Token(token)))
# fifo full
self.assertFalse(f.can_write())
self.assertFalse(f.write(Token('5')))
# tentative reads
self.verify_data(['1', '2', '3', '4'], [f.read("r1")
for _ in range(4)])
# len unchanged
self.assertEquals(len(f), 4)
f.rollback_reads("r1")
self.assertFalse(f.can_write())
self.assertFalse(f.write(Token('5')))
self.assertEquals(len(f), 4)
# re-read
self.verify_data(['1'], [f.read("r1")])
f.commit_reads("r1")
self.assertEquals(len(f), 3)
# one pos free in fifo
self.assertTrue(f.can_write())
self.assertTrue(f.write(Token('a')))
self.assertFalse(f.can_write())
self.assertFalse(f.write(Token('b')))
```
#### File: calvin/Tools/cscompiler.py
```python
import os
import sys
import json
import argparse
from calvin.csparser.parser import calvin_parser
from calvin.csparser.checker import check
from calvin.csparser.analyzer import generate_app_info
from calvin.utilities.security import Security
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
def compile(source_text, filename='', content=None, credentials=None, verify=True, node=None):
# Steps taken:
# 1) Verify signature when credentials supplied
# 2) parser .calvin file -> IR. May produce syntax errors/warnings
# 3) checker IR -> IR. May produce syntax errors/warnings
# 4) analyzer IR -> app. Should not fail. Sets 'valid' property of IR to True/False
deployable = {'valid': False, 'actors': {}, 'connections': {}}
errors = [] #TODO: fill in something meaningful
warnings = []
if credentials:
_log.debug("Check credentials...")
sec = Security(node)
sec.set_subject(credentials)
if not sec.authenticate_subject():
_log.error("Check credentials...failed authentication")
# This error reason is detected in calvin control and gives proper REST response
errors.append({'reason': "401: UNAUTHORIZED", 'line': 0, 'col': 0})
return deployable, errors, warnings
if (not sec.verify_signature_content(content, "application") or not sec.check_security_policy()):
# Verification not OK if sign or cert not OK or if the signer is denied by security policies
print "\n IN DEPLOYER\n "
_log.error("Check credentials...failed application verification")
# This error reason is detected in calvin control and gives proper REST response
errors.append({'reason': "401: UNAUTHORIZED", 'line': None, 'col': None})
return deployable, errors, warnings
_log.debug("Parsing...")
ir, errors, warnings = calvin_parser(source_text, filename)
_log.debug("Parsed %s, %s, %s" % (ir, errors, warnings))
# If there were errors during parsing no IR will be generated
if not errors:
c_errors, c_warnings = check(ir, verify=verify)
errors.extend(c_errors)
warnings.extend(c_warnings)
deployable = generate_app_info(ir, verify=verify)
if errors:
deployable['valid'] = False
_log.debug("Compiled %s, %s, %s" % (deployable, errors, warnings))
return deployable, errors, warnings
def compile_file(file, credentials=None):
with open(file, 'r') as source:
sourceText = source.read()
content = None
if credentials:
content = Security.verify_signature_get_files(file, skip_file=True)
if content:
content['file'] = sourceText
return compile(sourceText, file, content=content, credentials=credentials)
def compile_generator(files):
for file in files:
deployable, errors, warnings = compile_file(file)
yield((deployable, errors, warnings, file))
def remove_debug_info(deployable):
pass
# if type(d)==type({}):
# d.pop('dbg_line', None)
# for item in d:
# _remove_debug_symbols(d[item])
# elif type(d)==type([]):
# for item in d:
# _remove_debug_symbols(item)
def main():
long_description = """
Compile a CalvinScript source file, # into a deployable JSON representation.
By default, the output will be written to file with the same name as the input file,
but with the extension replaced by 'json'.
"""
argparser = argparse.ArgumentParser(description=long_description)
argparser.add_argument('files', metavar='#', type=str, nargs='+',
help='source file to compile')
argparser.add_argument('-d', '--debug', dest='debug', action='store_true', default=False,
help='leave debugging information in output')
argparser.add_argument('--stdout', dest='to_stdout', action='store_true',
help='send output to stdout instead of file (default)')
argparser.add_argument('--compact', dest='indent', action='store_const', const=None, default=4,
help='use compact JSON format instead of readable (default)')
argparser.add_argument('--sorted', dest='sorted', action='store_true', default=False,
help='sort resulting JSON output by keys')
argparser.add_argument('--issue-fmt', dest='fmt', type=str,
default='{issue_type}: {reason} {script} [{line}:{col}]',
help='custom format for issue reporting.')
argparser.add_argument('--verbose', action='store_true',
help='informational output from the compiler')
args = argparser.parse_args()
def report_issues(issues, issue_type, file=''):
sorted_issues = sorted(issues, key=lambda k: k.get('line', 0))
for issue in sorted_issues:
sys.stderr.write(args.fmt.format(script=file, issue_type=issue_type, **issue) + '\n')
exit_code = 0
for deployable, errors, warnings, file in compile_generator(args.files):
if errors:
report_issues(errors, 'Error', file)
exit_code = 1
if warnings and args.verbose:
report_issues(warnings, 'Warning', file)
if exit_code == 1:
# Don't produce output if there were errors
continue
if not args.debug:
# FIXME: Debug information is not propagated from IR to deployable by Analyzer.
# When it is, this is the place to remove it
remove_debug_info(deployable)
string_rep = json.dumps(deployable, indent=args.indent, sort_keys=args.sorted)
if args.to_stdout:
print(string_rep)
else:
path, ext = os.path.splitext(file)
dst = path + ".json"
with open(dst, 'w') as f:
f.write(string_rep)
return exit_code
if __name__ == '__main__':
sys.exit(main())
```
#### File: calvin/Tools/csinstaller.py
```python
import sys
import argparse
from calvin.csparser.parser import calvin_parser
from calvin.csparser.checker import check
from calvin.actorstore import store
def check_script(file):
try:
with open(file, 'r') as source:
source_text = source.read()
except:
return {}, [{'reason': 'File not found', 'line': 0, 'col': 0}], []
# Steps taken:
# 1) parser .calvin file -> IR. May produce syntax errors/warnings
# 2) checker IR -> IR. May produce syntax errors/warnings
ir, errors, warnings = calvin_parser(source_text, file)
# If there were errors during parsing no IR will be generated
if not errors:
c_errors, c_warnings = check(ir)
errors.extend(c_errors)
warnings.extend(c_warnings)
return ir, errors, warnings
def install_component(namespace, name, definition, overwrite):
astore = store.ActorStore()
return astore.add_component(namespace, name, definition, overwrite)
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument('--script', type=str, required=True,
help='script file with component definitions')
argparser.add_argument('--namespace', type=str, required=True,
help='namespace to install components under')
group = argparser.add_mutually_exclusive_group()
group.add_argument('--all', dest='component', action='store_const', const=[],
help='install all components found in script')
group.add_argument('--component', type=str, nargs='+',
help='name of component(s) to install')
argparser.add_argument('--force', dest='overwrite', action='store_true',
help='overwrite components that exists at destination')
argparser.add_argument('--issue-fmt', dest='fmt', type=str,
default='{issue_type}: {reason} {script} [{line}:{col}]',
help='custom format for issue reporting.')
args = argparser.parse_args()
def report_issues(issues, issue_type, file=''):
sorted_issues = sorted(issues, key=lambda k: k.get('line', 0))
for issue in sorted_issues:
sys.stderr.write(args.fmt.format(script=file, issue_type=issue_type, **issue) + '\n')
ir, errors, warnings = check_script(args.script)
if warnings:
report_issues(warnings, 'Warning', args.script)
if errors:
report_issues(errors, 'Error', args.script)
return 1
errors = []
for comp_name, comp_def in ir['components'].items():
if args.component and comp_name not in args.component:
continue
ok = install_component(args.namespace, comp_name, comp_def, args.overwrite)
if not ok:
errors.append({'reason': 'Failed to install "{0}"'.format(comp_name),
'line': comp_def['dbg_line'], 'col': 0})
if errors:
report_issues(errors, 'Error', args.script)
return 1
if __name__ == '__main__':
sys.exit(main())
```
#### File: calvin/Tools/csmanage.py
```python
import sys
import argparse
import os
import glob
import shutil
from calvin.csparser.parser import calvin_parser
from calvin.csparser.checker import check
from calvin.actorstore import store
from calvin.utilities import certificate
from calvin.actorstore.store import ActorStore
def check_script(file):
try:
with open(file, 'r') as source:
source_text = source.read()
except:
return {}, [{'reason': 'File not found', 'line': 0, 'col': 0}], []
# Steps taken:
# 1) parser .calvin file -> IR. May produce syntax errors/warnings
# 2) checker IR -> IR. May produce syntax errors/warnings
ir, errors, warnings = calvin_parser(source_text, file)
# If there were errors during parsing no IR will be generated
if not errors:
c_errors, c_warnings = check(ir)
errors.extend(c_errors)
warnings.extend(c_warnings)
return ir, errors, warnings
def install_component(namespace, name, definition, overwrite):
astore = store.ActorStore()
return astore.add_component(namespace, name, definition, overwrite)
def parse_args():
long_desc = """Manage the host's actor store and credentials"""
# top level arguments
argparser = argparse.ArgumentParser(description=long_desc)
cmdparsers = argparser.add_subparsers(help="command help")
# parser for install cmd
install_commands = ['component', 'actor']
cmd_install = cmdparsers.add_parser('install', help='install components and actors')
cmd_install.add_argument('cmd', metavar='<command>', choices=install_commands, type=str,
help="one of %s" % ", ".join(install_commands))
cmd_install.add_argument('--force', dest='force', action='store_true',
help='overwrite components or actor that exists at destination')
cmd_install.add_argument('--sign', dest='sign', action='store_true',
help='sign actor or component')
cmd_install.add_argument('--org', metavar='<name>', dest='org', type=str,
help='Code Signer org name used, assumes default location when no calvin.conf')
cmd_install.add_argument('--namespace', metavar='<ns.sub-ns>', type=str, required=True,
help='namespace to install actor or components under')
aargs = cmd_install.add_argument_group("actor")
aargs.add_argument('--actor', metavar='<path>', action='append', default=[], required=True,
help='actor file to install, can be repeated')
gargs = cmd_install.add_argument_group("component")
gargs.add_argument('--script', metavar='<path>', type=str, required=True,
help='script file with component definitions')
whichcomp = gargs.add_mutually_exclusive_group(required=True)
whichcomp.add_argument('--all', dest='component', action='store_const', const=[],
help='install all components found in script')
whichcomp.add_argument('--component', metavar='<component>', type=str, nargs='+',
help='name of component(s) to install')
gargs.add_argument('--issue-fmt', dest='fmt', type=str,
default='{issue_type}: {reason} {script} [{line}:{col}]',
help='custom format for issue reporting.')
cmd_install.set_defaults(func=manage_install)
# parser for trust cmd
trust_commands = ['trust']
cmd_trust = cmdparsers.add_parser('trust', help='manage trusted certificates')
etargs = cmd_trust.add_argument_group("mandatory argument")
etargs.add_argument('--path', metavar='<path>', type=str,
help='certificate to trust')
cmd_trust.add_argument('--dir', metavar='<directory>', type=str, default="",
help='security directory, defaults to ~/.calvin/security')
cmd_trust.set_defaults(func=manage_trust)
# parser for sign cmd
# Might later need to specify what is signed to add extra verification
# sign_commands = ['app', 'component', 'actor']
cmd_sign = cmdparsers.add_parser('sign', help='sign a file')
# cmd_sign.add_argument('cmd', metavar='<command>', choices=sign_commands, type=str,
# help="one of %s" % ", ".join(sign_commands))
cmd_sign.add_argument('--org', metavar='<name>', dest='org', type=str, required=True,
help='Code Signer org name used')
cmd_sign.add_argument('--dir', metavar='<directory>', type=str, default="",
help='security directory, defaults to ~/.calvin/security')
cmd_sign.add_argument('--file', metavar='<path>', action='append', default=[],
help='file to sign, can be repeated')
storeargs = cmd_sign.add_argument_group("actor and component")
storeargs.add_argument('--nsfile', metavar='<ns.sub-ns.actor>', action='append', default=[],
help='namespaced store path to actor or components, can be repeated')
cmd_sign.set_defaults(func=manage_sign)
# parser for CA cmd
ca_commands = ['create', 'remove', 'export']
cmd_ca = cmdparsers.add_parser('ca', help='manage CA')
cmd_ca.add_argument('cmd', metavar='<command>', choices=ca_commands, type=str,
help="one of %s" % ", ".join(ca_commands))
etargs = cmd_ca.add_argument_group("export")
etargs.add_argument('--path', metavar='<path>', type=str,
help='export to directory')
cargs = cmd_ca.add_argument_group("create")
cmd_ca.add_argument('--force', dest='force', action='store_true',
help='overwrite file that exists at destination')
cmd_ca.add_argument('--domain', metavar='<name>', dest='domain', type=str, required=True,
help='CA domain name used')
cargs.add_argument('--name', metavar='<commonName>', type=str,
help='common name of Certificate Authority')
cmd_ca.add_argument('--dir', metavar='<directory>', type=str, default="",
help='security directory, defaults to ~/.calvin/security')
cmd_ca.set_defaults(func=manage_ca)
# parser for code_signer cmd
cs_commands = ['create', 'remove', 'export']
cmd_cs = cmdparsers.add_parser('code_signer', help='manage Code Signer')
cmd_cs.add_argument('cmd', metavar='<command>', choices=cs_commands, type=str,
help="one of %s" % ", ".join(cs_commands))
etargs = cmd_cs.add_argument_group("export")
etargs.add_argument('--path', metavar='<path>', type=str,
help='export to directory')
cargs = cmd_cs.add_argument_group("create")
cmd_cs.add_argument('--force', dest='force', action='store_true',
help='overwrite file that exists at destination')
cmd_cs.add_argument('--org', metavar='<name>', dest='org', type=str, required=True,
help='Organizational name used')
cargs.add_argument('--name', metavar='<commonName>', type=str,
help='common name of Code Signer')
cmd_cs.add_argument('--dir', metavar='<directory>', type=str, default="",
help='security directory, defaults to ~/.calvin/security')
cmd_cs.set_defaults(func=manage_cs)
return argparser.parse_args()
def manage_install(args):
def report_issues(issues, issue_type, file=''):
sorted_issues = sorted(issues, key=lambda k: k.get('line', 0))
for issue in sorted_issues:
sys.stderr.write(args.fmt.format(script=file, issue_type=issue_type, **issue) + '\n')
ir, errors, warnings = check_script(args.script)
if warnings:
report_issues(warnings, 'Warning', args.script)
if errors:
report_issues(errors, 'Error', args.script)
return 1
errors = []
for comp_name, comp_def in ir['components'].items():
if args.component and comp_name not in args.component:
continue
ok = install_component(args.namespace, comp_name, comp_def, args.overwrite)
if not ok:
errors.append({'reason': 'Failed to install "{0}"'.format(comp_name),
'line': comp_def['dbg_line'], 'col': 0})
if errors:
report_issues(errors, 'Error', args.script)
return 1
def manage_trust(args):
if not args.path:
raise Exception("No path supplied")
cert_name = os.path.basename(args.path)
if args.dir:
truststore_cert = os.path.join(args.dir, "trustStore", cert_name)
else:
homefolder = os.getenv("HOME")
truststore_cert = os.path.join(homefolder, ".calvin", "security", "trustStore", cert_name)
if not os.path.isdir(os.path.dirname(truststore_cert)):
os.makedirs(os.path.dirname(truststore_cert), 0700)
shutil.copy(args.path, truststore_cert)
def manage_sign(args):
# Collect files to sign
files = []
if args.file:
for f in args.file:
files.extend(glob.glob(f))
if args.nsfile:
store = ActorStore()
for m in args.nsfile:
files.extend(store.actor_paths(m))
# Filter out any files not *.calvin, *.comp, *.py
files = [f for f in files if f.endswith(('.calvin', '.comp', '.py')) and not f.endswith('__init__.py')]
if not files:
raise Exception("No (*.calvin, *.comp, *py) files supplied")
if not args.org:
raise Exception("No org supplied")
configfile = os.path.join(args.dir, args.org, "openssl.conf") if args.dir else None
# When conf missing the exception is printed by main
conf = certificate.Config(configfile=configfile, domain=args.org, readonly=True)
exceptions = []
for f in files:
try:
certificate.sign_file(conf, f)
except Exception as e:
exceptions.append(e)
for e in exceptions:
print "Error {}".format(e)
def manage_ca(args):
if args.cmd == 'create' and args.domain and args.name:
if not args.domain:
raise Exception("No domain supplied")
configfile = os.path.join(args.dir, args.domain, "openssl.conf") if args.dir else None
conf = certificate.Config(configfile=configfile, domain=args.domain, commonName=args.name, force=args.force)
certificate.new_domain(conf)
elif args.cmd == 'remove':
if not args.domain:
raise Exception("No domain supplied")
domaindir = os.path.join(args.dir, args.domain) if args.dir else None
certificate.remove_domain(args.domain, domaindir)
elif args.cmd == 'export':
if not args.domain:
raise Exception("No domain supplied")
if not args.path:
raise Exception("No path supplied")
configfile = os.path.join(args.dir, args.domain, "openssl.conf") if args.dir else None
conf = certificate.Config(configfile=configfile, domain=args.domain, readonly=True)
certificate.copy_cert(conf, args.path)
def manage_cs(args):
if args.cmd == 'create' and args.org and args.name:
if not args.org:
raise Exception("No organization supplied")
configfile = os.path.join(args.dir, args.org, "openssl.conf") if args.dir else None
conf = certificate.Config(configfile=configfile, domain=args.org, commonName=args.name, force=args.force)
certificate.new_domain(conf)
# Add certificate to truststore
if args.dir:
truststore_path = os.path.join(args.dir, "trustStore")
else:
homefolder = os.getenv("HOME")
truststore_path = os.path.join(homefolder, ".calvin", "security", "trustStore")
if not os.path.isdir(truststore_path):
os.makedirs(truststore_path, 0700)
certificate.copy_cert(conf, truststore_path)
elif args.cmd == 'remove':
if not args.org:
raise Exception("No organization supplied")
orgdir = os.path.join(args.dir, args.org) if args.dir else None
# Remove certificate from truststore
configfile = os.path.join(orgdir, "openssl.conf") if args.dir else None
conf = certificate.Config(configfile=configfile, domain=args.org, readonly=True)
cert_file = conf.configuration["CA_default"]["certificate"]
if args.dir:
truststore_path = os.path.join(args.dir, "trustStore")
else:
homefolder = os.getenv("HOME")
truststore_path = os.path.join(homefolder, ".calvin", "security", "trustStore")
try:
os.remove(os.path.join(truststore_path, certificate.cert_hash(cert_file) + ".0"))
except OSError:
pass # The certificate is not in the truststore
certificate.remove_domain(args.org, orgdir)
elif args.cmd == 'export':
if not args.org:
raise Exception("No organization supplied")
if not args.path:
raise Exception("No path supplied")
configfile = os.path.join(args.dir, args.org, "openssl.conf") if args.dir else None
conf = certificate.Config(configfile=configfile, domain=args.org, readonly=True)
certificate.copy_cert(conf, args.path)
def main():
args = parse_args()
try:
args.func(args)
except Exception as e:
print "Error {}".format(e)
if __name__ == '__main__':
sys.exit(main())
```
#### File: calvin/Tools/deployer.py
```python
from calvin.utilities.calvinlogger import get_logger
from calvin.requests.request_handler import RequestHandler
_log = get_logger(__name__)
class Deployer(object):
"""
Deprecated!
Thin layer to support legacy users.
New users should use the control REST API or the RequestHandler.deploy_application or RequestHandler.deploy_app_info
Deploys an application to a runtime.
"""
def __init__(self, runtime, deployable, credentials=None, verify=True):
super(Deployer, self).__init__()
self.runtime = runtime
self.deployable = deployable
self.credentials = credentials
self.actor_map = {}
self.app_id = None
self.verify = verify
self.request_handler = RequestHandler()
if "name" in self.deployable:
self.name = self.deployable["name"]
else:
self.name = None
def deploy(self):
"""
Ask a runtime to instantiate actors and link them together.
"""
if not self.deployable['valid']:
raise Exception("Deploy information is not valid")
result = self.request_handler.deploy_app_info(self.runtime, self.name, self.deployable,
credentials=self.credentials, check=self.verify)
self.app_id = result['application_id']
self.actor_map = result['actor_map']
return self.app_id
def destroy(self):
return self.request_handler.delete_application(self.runtime, self.app_id)
```
#### File: calvin/Tools/log_analyze.py
```python
import argparse
import json
import pprint
import traceback
from datetime import datetime
import re
import textwrap
WIDTH = 80
def parse_arguments():
long_description = """
Analyze calvin log.
"""
argparser = argparse.ArgumentParser(description=long_description)
argparser.add_argument('files', metavar='<filenames>', type=str, nargs='+',
default=[], help='logfiles to display')
argparser.add_argument('-i', '--interleaved', dest='interleave', action='store_true',
help='The none analyze log messages are printed interleaved')
argparser.add_argument('-l', '--limit', dest='limit', type=int, default=0,
help='Limit stack trace print to specified nbr of frames')
argparser.add_argument('-w', '--width', dest='width', type=int, default=80,
help='Width of node column')
argparser.add_argument('-c', '--text-width', dest='text_width', type=int, default=None,
help='Width of text in node column')
argparser.add_argument('-f', '--first', dest='first', type=str, default=None,
help='A node id that should be in first column')
argparser.add_argument('-x', '--exclude', dest='excludes', action='append', default=[],
help="Exclude logged module, can be repeated")
return argparser.parse_args()
re_pid = re.compile("^[0-9,\-,\,, ,:]*[A-Z]* *([0-9]*)-.*")
class MyPrettyPrinter(pprint.PrettyPrinter):
def format(self, object, context, maxlevels, level):
# Pretty print strings unescaped
if isinstance(object, basestring):
return (object.decode('string_escape'), True, False)
return pprint.PrettyPrinter.format(self, object, context, maxlevels, level)
def main():
global WIDTH
args = parse_arguments()
WIDTH = args.width or WIDTH
text_width = args.text_width or WIDTH + 20
print "Analyze", args.files
files = []
for name in set(args.files):
files.append(open(name, 'r'))
log = []
pid_to_node_id = {}
for file in files:
for line in file:
try:
t=datetime.strptime(line[:23], "%Y-%m-%d %H:%M:%S,%f")
except:
t=None
if line.find('[[ANALYZE]]')==-1:
if args.interleave:
# None ANALYZE log lines might contain line breaks, then following lines (without time) need to
# be sorted under the first line, hence combine them
if t:
log.append({'time': t,
'func': 'OTHER', 'param': line, 'node_id': None})
else:
if log:
log[-1]['param'] += line
continue
try:
lineparts = line.split('[[ANALYZE]]',1)
logline = json.loads(lineparts[1])
logline['match_exclude'] = lineparts[0]
except:
# For some reason could not handle it, treat it as a normal other log level line
logline = {'func': 'OTHER', 'param': line, 'node_id': None}
if logline['node_id']:
try:
pid = re.match(re_pid, line).group(1)
if int(pid) != logline['node_id']:
pid_to_node_id[pid] = logline['node_id']
except:
pass
logline['time'] = t
#pprint.pprint(logline)
log.append(logline)
pprint.pprint(pid_to_node_id)
int_pid_to_node_id = {int(k): v for k,v in pid_to_node_id.iteritems()}
for l in log:
if l['node_id'] in int_pid_to_node_id:
l['node_id'] = int_pid_to_node_id[l['node_id']]
if len(files)>1:
log = sorted(log, key=lambda k: k['time'])
# Collect all node ids and remove "TESTRUN" string as node id since it is used when logging py.test name
nodes = list(set([l['node_id'] for l in log] + [l.get('peer_node_id', None) for l in log]) - set([None, "TESTRUN"]))
if args.first in nodes:
nodes.remove(args.first)
nodes.insert(0, args.first)
line = ""
for n in nodes:
line += n + " "*(WIDTH-35)
print line
for l in log:
if 'match_exclude' in l:
exclude_line = l['match_exclude']
else:
exclude_line = l['param']
if any([exclude_line.find(excl) > -1 for excl in args.excludes]):
continue
if l['node_id'] == "TESTRUN":
print l['func'] + "%"*(len(nodes)*WIDTH-len(l['func']))
if 'param' in l and l['param']:
pprint.pprint(l['param'])
continue
if l['func'] == "OTHER" and l['node_id'] is None:
try:
ind = nodes.index(pid_to_node_id[re.match(re_pid, l['param']).group(1)])*WIDTH
except Exception as e:
ind = 0
pass
lines = str.splitlines(l['param'].rstrip())
pre = "<>"
for line in lines:
wrapped_lines = textwrap.wrap(line, width=text_width,
replace_whitespace=False, drop_whitespace=False)
for wl in wrapped_lines:
print " "*ind + pre + wl
pre = ""
continue
ind = nodes.index(l['node_id'])*WIDTH
if l['func']=="SEND":
ends = nodes.index(l['param']['to_rt_uuid'])*WIDTH
if ind < ends:
print " "*ind + "-"*(ends-1-ind) + ">"
else:
print " "*ends + "<" + "-"*(ind - ends-1)
if l['param']['cmd'] == "REPLY":
id_ = l['param']['msg_uuid']
print (" "*ind + [c['param']['cmd'] for c in log
if c['func'] == "SEND" and "msg_uuid" in c['param'] and c['param']['msg_uuid'] == id_][0] +
" reply")
pp = pprint.pformat(l['param'], indent=1, width=text_width)
for p in pp.split("\n"):
print " "*ind + p
elif l['func']!="RECV":
if l['peer_node_id']:
ends = nodes.index(l['peer_node_id'])*WIDTH
if ind < ends:
print " "*ind + "# " + l['func'] + " #" + "="*(ends-4-ind-len(l['func'])) + "*"
else:
print " "*ends + "*" + "="*(ind - ends-1) + "# " + l['func'] + " #"
else:
print " "*ind + "# " + l['func'] + " #"
pp = MyPrettyPrinter(indent=1, width=WIDTH).pformat(l['param'])
for p in pp.split("\n"):
print " "*ind + p
if l['stack'] and args.limit >= 0:
tb = traceback.format_list(l['stack'][-(args.limit+2):-1])
for s in tb:
for sl in s.split("\n"):
if sl:
print " "*ind + ">" + sl.rstrip()
if __name__ == '__main__':
main()
```
#### File: utilities/authorization/policy_decision_point.py
```python
import re
from calvin.utilities.authorization.policy_retrieval_point import FilePolicyRetrievalPoint
from calvin.utilities.authorization.policy_information_point import PolicyInformationPoint
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
class PolicyDecisionPoint(object):
def __init__(self, config=None):
# Default config
self.config = {
"policy_combining": "permit_overrides",
"policy_storage": "files",
"policy_storage_path": "~/.calvin/security/policies",
"policy_name_pattern": "*"
}
if config is not None:
# Change some of the default values of the config.
self.config.update(config)
# TODO: implement other policy storage alternatives
# if self.config["policy_storage"] == "db":
# self.prp = DbPolicyRetrievalPoint(self.config["policy_storage_path"])
# else:
self.prp = FilePolicyRetrievalPoint(self.config["policy_storage_path"])
self.pip = PolicyInformationPoint()
self.registered_nodes = {}
def register_node(self, node_id, node_attributes):
"""
Register node attributes for authorization.
Node attributes example:
{
"node_name.name": "testNode",
"node_name.organization": "com.ericsson",
"owner.organization": "com.ericsson",
"address.country": "SE"
}
"""
self.registered_nodes[node_id] = node_attributes
def authorize(self, request):
"""
Use policies to return access decision for the request.
The request and response format is inspired by the XACML JSON Profile
but has been simplified to be more compact.
Request (example):
{
"subject": {
"user": ["user1"],
"actor_signer": ["signer"]
},
"action": {
"requires": ["runtime", "calvinsys.events.timer"]
},
"resource": {
"node_id": "a77c0687-dce8-496f-8d81-571333be6116"
}
}
Response (example):
{
"decision": "permit",
"obligations": [
{
"id": "time_range",
"attributes": {
"start_time": "09:00",
"end_time": "17:00"
}
}
]
}
"""
_log.info("Authorization request received: %s" % request)
if "resource" in request and "node_id" in request["resource"]:
try:
request["resource"] = self.registered_nodes[request["resource"]["node_id"]]
except Exception:
request["resource"] = {}
if "action" in request and "requires" in request["action"]:
requires = request["action"]["requires"]
_log.debug("PolicyDecisionPoint: Requires %s" % requires)
if len(requires) > 1:
decisions = []
obligations = []
# Create one request for each requirement.
for req in requires:
requirement_request = request.copy()
requirement_request["action"]["requires"] = [req]
policy_decision, policy_obligations = self.combined_policy_decision(requirement_request)
decisions.append(policy_decision)
if policy_obligations:
obligations.append(policy_obligations)
# If the policy decisions for all requirements are the same, respond with that decision.
if all(x == decisions[0] for x in decisions):
return self.create_response(decisions[0], obligations)
else:
return self.create_response("indeterminate", [])
return self.create_response(*self.combined_policy_decision(request))
def combined_policy_decision(self, request):
"""
Return (decision, obligations) for request using policy combining algorithm in config.
Possible decisions: permit, deny, indeterminate, not_applicable
Policy format (example):
{
"id": "policy1",
"description": "Security policy for user1/user2 with
actor signed by 'signer'",
"rule_combining": "permit_overrides",
"target": {
"subject": {
"user": ["user1", "user2"],
"actor_signer": "signer"
}
},
"rules": [
{
"id": "policy1_rule1",
"description": "Permit access to 'calvinsys.events.timer',
'calvinsys.io.*' and 'runtime' between
09:00 and 17:00 if condition is true",
"effect": "permit",
"target": {
"action": {
"requires": ["calvinsys.events.timer",
"calvinsys.io.*", "runtime"]
}
},
"condition": {
"function": "and",
"attributes": [
{
"function": "equal",
"attributes": ["attr:resource:address.country",
["SE", "DK"]]
},
{
"function": "greater_than_or_equal",
"attributes": ["attr:environment:current_date",
"2016-03-04"]
}
]
},
"obligations": [
{
"id": "time_range",
"attributes": {
"start_time": "09:00",
"end_time": "17:00"
}
}
]
}
]
}
"""
policy_decisions = []
policy_obligations = []
try:
# Get policies from PRP (Policy Retrieval Point).
# TODO: policy needs to be signed if external PRP is used.
# In most cases the PRP and the PDP will be located on the same physical machine.
# TODO: if database is used, policies should be indexed based on their Target constraints
policies = self.prp.get_policies(self.config["policy_name_pattern"])
for policy_id in policies:
policy = policies[policy_id]
# Check if policy target matches (policy without target matches everything).
if "target" not in policy or self.target_matches(policy["target"], request):
# Get a policy decision if target matches.
decision, obligations = self.policy_decision(policy, request)
_log.info("decisions: %s" % decision)
if ((decision == "permit" and not obligations and self.config["policy_combining"] == "permit_overrides") or
(decision == "deny" and self.config["policy_combining"] == "deny_overrides")):
# Stop checking further rules.
# If "permit" with obligations, continue since "permit" without obligations may be found.
return (decision, [])
policy_decisions.append(decision)
policy_obligations += obligations
if "indeterminate" in policy_decisions:
return ("indeterminate", [])
if not all(x == "not_applicable" for x in policy_decisions):
if self.config["policy_combining"] == "deny_overrides" or policy_obligations:
return ("permit", policy_obligations)
else:
return ("deny", [])
else:
return ("not_applicable", [])
except Exception:
return ("indeterminate", [])
def create_response(self, decision, obligations):
"""Return authorization response including decision and obligations."""
# TODO: include more information to make it possible to send the response to other nodes within the domain
# when an actor is migrated, e.g. node IDs for which the decision is valid.
response = {}
response["decision"] = decision
if obligations:
response["obligations"] = obligations
return response
def target_matches(self, target, request):
"""Return True if policy target matches request, else False."""
for attribute_type in target:
for attribute in target[attribute_type]:
try:
request_value = request[attribute_type][attribute]
except KeyError:
try:
# Try to fetch missing attribute from Policy Information Point (PIP).
# TODO: cache this value.
# Same value should be used for future tests in this policy or other policies when handling this request.
request_value = self.pip.get_attribute_value(attribute_type, attribute)
except KeyError:
_log.info("PolicyDecisionPoint: Attribute not found: %s %s" % (attribute_type, attribute))
return False # Or 'indeterminate' (if MustBePresent is True and none of the other targets return False)?
# Accept both single object and lists by turning single objects into a list.
if not isinstance(request_value, list):
request_value = [request_value]
policy_value = target[attribute_type][attribute]
if not isinstance(policy_value, list):
policy_value = [policy_value]
try:
# If the lists contain many values, only one of the values need to match.
# Regular expressions are allowed for strings in policies
# (re.match checks for a match at the beginning of the string, $ marks the end of the string).
if not any([re.match(r+'$', x) for r in policy_value for x in request_value]):
_log.info("PolicyDecisionPoint: Not matching: %s %s %s" % (attribute_type, attribute, policy_value))
return False
except TypeError: # If the value is not a string
if set(request_value).isdisjoint(policy_value):
_log.info("PolicyDecisionPoint: Not matching: %s %s %s" % (attribute_type, attribute, policy_value))
return False
# True is returned if every attribute in the policy target matches the corresponding request attribute.
return True
def policy_decision(self, policy, request):
"""Use policy to return (access decision, obligations) for the request."""
rule_decisions = []
rule_obligations = []
for rule in policy["rules"]:
# Check if rule target matches (rule without target matches everything).
if "target" not in rule or self.target_matches(rule["target"], request):
# Get a rule decision if target matches.
decision, obligations = self.rule_decision(rule, request)
if ((decision == "permit" and not obligations and policy["rule_combining"] == "permit_overrides") or
(decision == "deny" and policy["rule_combining"] == "deny_overrides")):
# Stop checking further rules.
# If "permit" with obligations, continue since "permit" without obligations may be found.
return (decision, [])
rule_decisions.append(decision)
if decision == "permit" and obligations:
# Obligations are only accepted if the decision is "permit".
rule_obligations += obligations
if "indeterminate" in rule_decisions:
return ("indeterminate", [])
if not all(x == "not_applicable" for x in rule_decisions):
if policy["rule_combining"] == "deny_overrides" or rule_obligations:
return ("permit", rule_obligations)
else:
return ("deny", [])
else:
return ("not_applicable", [])
def rule_decision(self, rule, request):
"""Return (rule decision, obligations) for the request"""
# Check condition if it exists.
if "condition" in rule:
try:
args = []
for attribute in rule["condition"]["attributes"]:
if isinstance(attribute, dict): # Contains another function
args.append(self.evaluate_function(attribute["function"], attribute["attributes"], request))
else:
args.append(attribute)
rule_satisfied = self.evaluate_function(rule["condition"]["function"], args, request)
if rule_satisfied:
return (rule["effect"], rule.get("obligations", []))
else:
return ("not_applicable", [])
except Exception:
return ("indeterminate", [])
else:
# If no condition in the rule, return the rule effect directly.
return (rule["effect"], rule.get("obligations", []))
def evaluate_function(self, func, args, request):
"""
Return result of function func with arguments args.
If a function argument starts with 'attr', e.g. 'attr:resource:address.country',
the value is retrieved from the request or the Policy Information Point.
"""
# Check each function argument
for index, arg in enumerate(args):
if isinstance(arg, basestring):
if arg.startswith("attr"):
# Get value from request if the argument starts with "attr".
path = arg.split(":")
try:
args[index] = request[path[1]][path[2]] # path[0] is "attr"
except KeyError:
# TODO: check in attribute cache first
try:
# Try to fetch missing attribute from Policy Information Point (PIP).
# TODO: cache this value.
# Same value should be used for future tests in this policy or other policies when handling this request
args[index] = self.pip.get_attribute_value(path[1], path[2])
except KeyError:
_log.debug("PolicyDecisionPoint: Attribute not found: %s %s" % (path[1], path[2]))
return False
# Accept both strings and lists by turning strings into single element lists.
if isinstance(args[index], basestring):
args[index] = [args[index]]
if func == "equal":
try:
# If the lists contain many values, only one of the values need to match.
# Regular expressions (has to be args[1]) are allowed for strings in policies
# (re.match checks for a match at the beginning of the string, $ marks the end of the string).
return any([re.match(r+'$', x) for r in args[1] for x in args[0]])
except TypeError: # If the value is not a string
return not set(args[0]).isdisjoint(args[1])
elif func == "not_equal":
try:
# If the lists contain many values, only one of the values need to match.
# Regular expressions (has to be args[1]) are allowed for strings in policies
# (re.match checks for a match at the beginning of the string, $ marks the end of the string).
return not any([re.match(r+'$', x) for r in args[1] for x in args[0]])
except TypeError: # If the value is not a string
return set(args[0]).isdisjoint(args[1])
elif func == "and":
return all(args) # True if all elements of the list are True
elif func == "or":
return True in args # True if any True exists in the list
elif func == "less_than_or_equal":
return args[0] <= args[1]
elif func == "greater_than_or_equal":
return args[0] >= args[1]
```
#### File: calvin/utilities/security.py
```python
import os
import glob
from datetime import datetime, timedelta
try:
import OpenSSL.crypto
HAS_OPENSSL = True
except:
HAS_OPENSSL = False
try:
import pyrad.packet
from pyrad.client import Client
from pyrad.dictionary import Dictionary
HAS_PYRAD = True
except:
HAS_PYRAD = False
try:
import jwt
HAS_JWT = True
except:
HAS_JWT = False
from calvin.utilities.authorization.policy_decision_point import PolicyDecisionPoint
from calvin.utilities import certificate
from calvin.utilities.calvinlogger import get_logger
from calvin.utilities import calvinconfig
from calvin.requests.request_handler import RequestHandler
_conf = calvinconfig.get()
_log = get_logger(__name__)
# Default timeout
TIMEOUT=5
def security_modules_check():
if _conf.get("security","security_conf"):
# Want security
if not HAS_OPENSSL:
# Miss OpenSSL
_log.error("Security: Install openssl to allow verification of signatures and certificates")
return False
_conf.get("security","security_conf")['authentication']
if _conf.get("security","security_conf")['authentication']['procedure'] == "radius" and not HAS_PYRAD:
_log.error("Security: Install pyrad to use radius server as authentication method.")
return False
return True
def security_needed_check():
if _conf.get("security","security_conf"):
# Want security
return True
else:
return False
class Security(object):
def __init__(self, node):
_log.debug("Security: _init_")
self.sec_conf = _conf.get("security","security_conf")
if self.sec_conf is not None and not self.sec_conf.get('signature_trust_store', None):
# Set default directory for trust store
homefolder = os.path.expanduser("~")
truststore_dir = os.path.join(homefolder, ".calvin", "security", "trustStore")
self.sec_conf['signature_trust_store'] = truststore_dir
self.node = node
self.subject = {}
self.auth = {}
self.request_handler = RequestHandler()
def __str__(self):
return "Subject: %s\nAuth: %s" % (self.subject, self.auth)
def set_subject(self, subject):
"""Set subject attributes and mark them as unauthenticated"""
_log.debug("Security: set_subject %s" % subject)
if not isinstance(subject, dict):
return False
# Make sure that all subject values are lists.
self.subject = {k: list(v) if isinstance(v, (list, tuple, set)) else [v]
for k, v in subject.iteritems()}
# Set the corresponding values of self.auth to False to indicate that they are unauthenticated.
self.auth = {k: [False]*len(v) for k, v in self.subject.iteritems()}
def authenticate_subject(self):
"""Authenticate subject using the authentication procedure specified in config."""
_log.debug("Security: authenticate_subject")
if not security_needed_check():
_log.debug("Security: authenticate_subject no security needed")
return True
if self.sec_conf['authentication']['procedure'] == "local":
_log.debug("Security: local authentication method chosen")
return self.authenticate_using_local_database()
if self.sec_conf['authentication']['procedure'] == "radius":
if not HAS_PYRAD:
_log.error("Security: Install pyrad to use radius server as authentication method.\n" +
"Note! NO AUTHENTICATION USED")
return False
_log.debug("Security: Radius authentication method chosen")
return self.authenticate_using_radius_server()
_log.debug("Security: No security config, so authentication disabled")
return True
def authenticate_using_radius_server(self):
"""
Authenticate a subject using a RADIUS server.
The corresponding value in self.auth is set to True
if authentication is successful.
"""
auth = []
if self.subject['user']:
root_dir = os.path.abspath(os.path.join(_conf.install_location(), '..'))
srv=Client(server=self.sec_conf['authentication']['server_ip'],
secret= bytes(self.sec_conf['authentication']['secret']),
dict=Dictionary(os.path.join(root_dir, "pyrad_dicts", "dictionary"),
os.path.join(root_dir, "pyrad_dicts", "dictionary.acc")))
req=srv.CreateAuthPacket(code=pyrad.packet.AccessRequest,
User_Name=self.subject['user'][0],
NAS_Identifier="localhost")
req["User-Password"]=req.PwCrypt(self.subject['password'][0])
# FIXME is this over socket? then we should not block here
reply=srv.SendPacket(req)
_log.debug("Attributes returned by server:")
for i in reply.keys():
_log.debug("%s: %s" % (i, reply[i]))
if reply.code==pyrad.packet.AccessAccept:
_log.debug("Security: access accepted")
auth.append(True)
# return True
else:
_log.debug("Security: access denied")
auth.append(False)
# return False
self.auth['user']=auth
return any(auth)
def authenticate_using_local_database(self):
"""
Authenticate a subject against information stored in config.
The corresponding value in self.auth is set to True
if authentication is successful.
This is primarily intended for testing purposes,
since passwords aren't stored securely.
"""
if 'local_users' not in self.sec_conf['authentication']:
_log.debug("local_users not found in security_conf: %s" % self.sec_conf['authentication'])
return False
# Verify users against stored passwords
# TODO expand with other subject types
d = self.sec_conf['authentication']['local_users']
if not ('user' in self.subject and 'password' in self.subject):
return False
if len(self.subject['user']) != len(self.subject['password']):
return False
auth = []
for user, password in zip(self.subject['user'], self.subject['password']):
if user in d.keys():
if d[user] == password:
_log.debug("Security: found user: %s",user)
auth.append(True)
else:
_log.debug("Security: incorrect username or password")
auth.append(False)
else:
auth.append(False)
self.auth['user'] = auth
return any(auth)
def get_authenticated_subject_attributes(self):
"""Return a dictionary with all authenticated subject attributes."""
return {key: [self.subject[key][i] for i, auth in enumerate(values) if auth]
for key, values in self.auth.iteritems() if any(values)}
def check_security_policy(self, requires=None):
"""Check if access is permitted for the actor by the security policy"""
_log.debug("Security: check_security_policy")
if self.sec_conf and "authorization" in self.sec_conf:
return self.get_authorization_decision(requires)
# No security config, so access control is disabled
return True
def get_authorization_decision(self, requires=None):
"""Get authorization decision using the authorization procedure specified in config."""
#Delete
return True
request = {}
request["subject"] = self.get_authenticated_subject_attributes()
request["resource"] = {"node_id": self.node.id}
if requires is not None:
request["action"] = {"requires": requires}
_log.debug("Security: authorization request: %s" % request)
# Check if the authorization server is local (the runtime itself) or external.
if self.sec_conf['authorization']['procedure'] == "external":
if not HAS_JWT:
_log.error("Security: Install JWT to use external server as authorization method.\n" +
"Note: NO AUTHORIZATION USED")
return False
_log.debug("Security: external authorization method chosen")
decision, obligations = self.authorize_using_external_server(request)
else:
_log.debug("Security: local authorization method chosen")
decision, obligations = self.authorize_using_local_policies(request)
if decision == "permit":
_log.debug("Security: access permitted to resources")
if obligations:
return (True, obligations)
return True
elif decision == "deny":
_log.debug("Security: access denied to resources")
return False
elif decision == "indeterminate":
_log.debug("Security: access denied to resources. Error occured when evaluating policies.")
return False
else:
_log.debug("Security: access denied to resources. No matching policies.")
return False
def authorize_using_external_server(self, request):
"""
Access authorization using an external authorization server.
The request is put in a JSON Web Token (JWT) that is signed
and includes timestamps and information about sender and receiver.
"""
ip_addr = self.sec_conf['authorization']['server_ip']
port = self.sec_conf['authorization']['server_port']
# Alternative: specify node_id/dnQualifier instead in sec_conf and create a tunnel for the
# runtime-to-runtime communication (see calvin_proto.py). Could also add node_id as "aud" (audience) in jwt payload.
authorization_server_uri = "http://%s:%d" % (ip_addr, port)
payload = {
"iss": self.node.id,
"iat": datetime.utcnow(),
"exp": datetime.utcnow() + timedelta(seconds=60),
"request": request
}
cert_conffile = _conf.get("security", "certificate_conf")
domain = _conf.get("security", "certificate_domain")
cert_conf = certificate.Config(cert_conffile, domain)
node_name = self.node.attributes.get_node_name_as_str()
private_key = certificate.get_private_key(cert_conf, node_name)
# Create a JSON Web Token signed using the node's Elliptic Curve private key.
jwt_request = jwt.encode(payload, private_key, algorithm='ES256')
# cert_name is this node's certificate filename (without file extension)
cert_name = certificate.get_own_cert_name(cert_conf, node_name)
try:
# Send request to authorization server.
response = self.request_handler.get_authorization_decision(authorization_server_uri, jwt_request, cert_name)
except Exception as e:
_log.error("Security: authorization server error - %s" % str(e))
return ("indeterminate", [])
try:
# Get authorization server certificate from disk.
# TODO: get certificate from DHT if it wasn't found on disk.
certificate_authz_server = certificate.get_other_certificate(cert_conf, node_name, response["cert_name"])
public_key_authz_server = certificate.get_public_key(certificate_authz_server)
authz_server_id = certificate_authz_server.get_subject().dnQualifier
# Decode the JSON Web Token returned from the authorization server.
# The signature is verified using the Elliptic Curve public key of the authorization server.
# Exception raised if signature verification fails or if issuer and/or audience are incorrect.
decoded = jwt.decode(response["jwt"], public_key_authz_server, algorithms=['ES256'],
issuer=authz_server_id, audience=self.node.id)
response = decoded['response']
return (response['decision'], response.get("obligations", []))
except Exception as e:
_log.error("Security: JWT decoding error - %s" % str(e))
return ("indeterminate", [])
def authorize_using_local_policies(self, request):
"""Authorize access using a local Policy Decision Point (PDP)."""
try:
response = self.node.pdp.authorize(request)
return (response['decision'], response.get("obligations", []))
except Exception as e:
_log.error("Security: local authorization error - %s" % str(e))
return ("indeterminate", [])
@staticmethod
def verify_signature_get_files(filename, skip_file=False):
"""Get files needed for signature verification of the specified file."""
# Get the data
sign_filenames = filename + ".sign.*"
sign_content = {}
file_content = ""
# Filename is *.sign.<cert_hash>
sign_files = {os.path.basename(f).split(".sign.")[1]: f for f in glob.glob(sign_filenames)}
for cert_hash, sign_filename in sign_files.iteritems():
try:
with open(sign_filename, 'rt') as f:
sign_content[cert_hash] = f.read()
_log.debug("Security: found signature for %s" % cert_hash)
except:
pass
if not skip_file:
try:
with open(filename, 'rt') as f:
file_content = f.read()
except:
return None
_log.debug("Security: file can't be opened")
return {'sign': sign_content, 'file': file_content}
def verify_signature(self, file, flag):
"""Verify the signature of the specified file of type flag."""
content = Security.verify_signature_get_files(file)
if content:
return self.verify_signature_content(content, flag)
else:
return False
def verify_signature_content(self, content, flag):
"""Verify the signature of the content of type flag."""
_log.debug("Security: verify %s signature of %s" % (flag, content))
#if not self.sec_conf:
#delete:
if True:
_log.debug("Security: no signature verification required: %s" % content['file'])
return True
if flag not in ["application", "actor"]:
# TODO add component verification
raise NotImplementedError
self.auth[flag + "_signer"] = [True] # Needed to include the signer attribute in authorization requests.
if content is None or not content['sign']:
_log.debug("Security: signature information missing")
self.subject[flag + "_signer"] = ["__unsigned__"]
return True # True is returned to allow authorization request with the signer attribute '__unsigned__'.
if not HAS_OPENSSL:
_log.error("Security: install OpenSSL to allow verification of signatures and certificates")
_log.error("Security: verification of %s signature failed" % flag)
self.subject[flag + "_signer"] = ["__invalid__"]
return False
# If any of the signatures is verified correctly, True is returned.
for cert_hash, signature in content['sign'].iteritems():
try:
# Check if the certificate is stored in the truststore (name is <cert_hash>.0)
trusted_cert_path = os.path.join(self.sec_conf['signature_trust_store'], cert_hash + ".0")
with open(trusted_cert_path, 'rt') as f:
trusted_cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, f.read())
try:
# Verify signature
OpenSSL.crypto.verify(trusted_cert, signature, content['file'], 'sha256')
_log.debug("Security: signature correct")
self.subject[flag + "_signer"] = [trusted_cert.get_issuer().CN] # The Common Name field for the issuer
return True
except Exception as e:
_log.debug("Security: OpenSSL verification error", exc_info=True)
continue
except Exception as e:
_log.debug("Security: error opening one of the needed certificates", exc_info=True)
continue
_log.error("Security: verification of %s signature failed" % flag)
self.subject[flag + "_signer"] = ["__invalid__"]
return False
```
#### File: calvin/utilities/storage_node.py
```python
from multiprocessing import Process
# For trace
import sys
import trace
import logging
from calvin.runtime.north import scheduler
from calvin.runtime.north import storage
from calvin.runtime.north import calvincontrol
from calvin.runtime.south.plugins.async import async
from calvin.utilities import calvinuuid
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
class FakeAM(object):
def enabled_actors():
return []
class FakeMonitor(object):
def loop():
return False
class StorageNode(object):
def __init__(self, control_uri):
super(StorageNode, self).__init__()
self.id = calvinuuid.uuid("NODE")
self.control_uri = control_uri
self.control = calvincontrol.get_calvincontrol()
_scheduler = scheduler.DebugScheduler if _log.getEffectiveLevel() <= logging.DEBUG else scheduler.Scheduler
self.sched = _scheduler(self, FakeAM(), FakeMonitor())
self.control.start(node=self, uri=control_uri)
self.storage = storage.Storage(self)
async.DelayedCall(0, self.start)
#
# Event loop
#
def run(self):
"""main loop on node"""
_log.debug("Node %s is running" % self.id)
self.sched.run()
def start(self):
""" Run once when main loop is started """
self.storage.start()
def stop(self, callback=None):
def stopped(*args):
_log.analyze(self.id, "+", {'args': args})
self.sched.stop()
self.control.stop()
_log.analyze(self.id, "+", {})
self.storage.stop(stopped)
def create_node(uri, control_uri, attributes=None):
n = StorageNode(control_uri)
n.run()
_log.info('Quitting node "%s"' % n.control_uri)
def create_tracing_node(uri, control_uri, attributes=None):
"""
Same as create_node, but will trace every line of execution.
Creates trace dump in output file '<host>_<port>.trace'
"""
n = StorageNode(control_uri)
_, host = uri.split('://')
with open("%s.trace" % (host, ), "w") as f:
tmp = sys.stdout
# Modules to ignore
ignore = [
'fifo', 'calvin', 'actor', 'pickle', 'socket',
'uuid', 'codecs', 'copy_reg', 'string_escape', '__init__',
'colorlog', 'posixpath', 'glob', 'genericpath', 'base',
'sre_parse', 'sre_compile', 'fdesc', 'posixbase', 'escape_codes',
'fnmatch', 'urlparse', 're', 'stat', 'six'
]
with f as sys.stdout:
tracer = trace.Trace(trace=1, count=0, ignoremods=ignore)
tracer.runfunc(n.run)
sys.stdout = tmp
_log.info('Quitting node "%s"' % n.control_uri)
def start_node(uri, control_uri, trace_exec=False, attributes=None):
""" Start storage only node, keeps same param list as full node, but
uses only the control_uri
"""
_create_node = create_tracing_node if trace_exec else create_node
p = Process(target=_create_node, args=(uri, control_uri, attributes))
p.daemon = True
p.start()
return p
```
#### File: josrolgil/exjobbCalvin/setup.py
```python
import os
from setuptools import setup
def read_description(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as fp:
return fp.read()
setup(name='calvin',
version='0.4',
url="http://github.com/EricssonResearch/calvin-base",
license="Apache Software License",
author="<NAME>",
author_email="N/A",
tests_require=[
'mock>1.0.1',
'pytest>=1.4.25',
'pytest-twisted'
],
install_requires=[
'colorlog>=2.6.0',
'kademlia>=0.4',
'ply>=3.6',
'Twisted>=15.0.0',
'requests >= 2.6.0',
'infi.traceback>=0.3.11',
'wrapt==1.10.2',
'pyserial>=2.6',
'netifaces>=0.10.4'
],
description="Calvin is a distributed runtime and development framework for an actor based dataflow"
"programming methodology",
long_description=read_description('README.md'),
packages=["calvin"],
include_package_data=True,
platforms='any',
test_suite="calvin.test.test_calvin",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Framework :: Twisted",
"Natural Language :: English",
"Intended Audience :: Developers",
"Topic :: Software Development",
],
extras_require={
'crypto': 'pyOpenSSL==0.15.1'
},
entry_points={
'console_scripts': [
'csruntime=calvin.Tools.csruntime:main',
'cscontrol=calvin.Tools.cscontrol:main',
'csdocs=calvin.Tools.calvindoc:main',
'cscompile=calvin.Tools.cscompiler:main',
'csinstall=calvin.Tools.csinstaller:main',
'csmanage=calvin.Tools.csmanage:main',
'csweb=calvin.Tools.www.csweb:main',
'csviz=calvin.Tools.csviz:main'
]
}
)
``` |
{
"source": "jossafossa/Project24_backend",
"score": 3
} |
#### File: Project24_backend/friendcircle/models.py
```python
from django.db import models
class FriendCircle(models.Model):
name = models.CharField(blank=True, max_length=255)
description = models.CharField(blank=True, max_length=1000)
interests = models.ManyToManyField('interests.Interest', blank=True)
members = models.ManyToManyField(
'users.CustomUser',
through='friendcircle.FriendCircleMembership',
through_fields=('friendcircle', 'user'),
related_name='memberships',
)
def __str__(self):
return self.name
# Keeps track of FriendCircle memberships
class FriendCircleMembership(models.Model):
user = models.ForeignKey('users.CustomUser', on_delete=models.CASCADE)
friendcircle = models.ForeignKey('friendcircle.FriendCircle', on_delete=models.CASCADE)
startdate = models.DateTimeField(auto_now_add=True)
enddate = models.DateTimeField(null=True, blank=True)
def __str__(self):
return self.user.name + " member at " + self.friendcircle.name
class Meta:
unique_together = (('user', 'friendcircle'))
MATCH_STATUS = (
('O', 'Not swiped',),
('V', 'Swiped Right',),
('X', 'Swiped Left',),
)
# Keeps track of matches. If both parties swiped right, the user can be added to FriendCircleMembership
class FriendCircleMatcher(models.Model):
user = models.ForeignKey('users.CustomUser', on_delete=models.CASCADE)
user_match_status = models.CharField(max_length=1,
choices=MATCH_STATUS,
default="O")
friendcircle = models.ForeignKey('friendcircle.FriendCircle', on_delete=models.CASCADE)
friendcircle_match_status = models.CharField(max_length=1,
choices=MATCH_STATUS,
default="O")
def __str__(self):
return self.user.email + " + " + self.friendcircle.name
class Meta:
unique_together = (('user', 'friendcircle'))
```
#### File: Project24_backend/prikmuur/models.py
```python
from django.db import models
from django.contrib.auth.models import Group
from users.models import CustomUser
from friendcircle.models import FriendCircle
class Post(models.Model):
created = models.DateTimeField(auto_now_add=True)
group = models.ForeignKey(FriendCircle, related_name='prikmuurpost', on_delete=models.CASCADE)
postedBy = models.ForeignKey(CustomUser, related_name='prikmuurpost', on_delete=models.CASCADE)
subject = models.CharField(max_length=45)
noticeText = models.TextField()
class Meta:
ordering = ('created',)
def __str__(self):
return self.subject
``` |
{
"source": "jossalgon/Telethon",
"score": 2
} |
#### File: telethon/events/common.py
```python
import abc
import itertools
import warnings
from .. import utils
from ..errors import RPCError
from ..tl import TLObject, types, functions
def _into_id_set(client, chats):
"""Helper util to turn the input chat or chats into a set of IDs."""
if chats is None:
return None
if not utils.is_list_like(chats):
chats = (chats,)
result = set()
for chat in chats:
if isinstance(chat, int):
if chat < 0:
result.add(chat) # Explicitly marked IDs are negative
else:
result.update({ # Support all valid types of peers
utils.get_peer_id(types.PeerUser(chat)),
utils.get_peer_id(types.PeerChat(chat)),
utils.get_peer_id(types.PeerChannel(chat)),
})
elif isinstance(chat, TLObject) and chat.SUBCLASS_OF_ID == 0x2d45687:
# 0x2d45687 == crc32(b'Peer')
result.add(utils.get_peer_id(chat))
else:
chat = client.get_input_entity(chat)
if isinstance(chat, types.InputPeerSelf):
chat = client.get_me(input_peer=True)
result.add(utils.get_peer_id(chat))
return result
class EventBuilder(abc.ABC):
"""
The common event builder, with builtin support to filter per chat.
Args:
chats (`entity`, optional):
May be one or more entities (username/peer/etc.). By default,
only matching chats will be handled.
blacklist_chats (`bool`, optional):
Whether to treat the chats as a blacklist instead of
as a whitelist (default). This means that every chat
will be handled *except* those specified in ``chats``
which will be ignored if ``blacklist_chats=True``.
"""
def __init__(self, chats=None, blacklist_chats=False):
self.chats = chats
self.blacklist_chats = blacklist_chats
self._self_id = None
@abc.abstractmethod
def build(self, update):
"""Builds an event for the given update if possible, or returns None"""
def resolve(self, client):
"""Helper method to allow event builders to be resolved before usage"""
self.chats = _into_id_set(client, self.chats)
self._self_id = client.get_me(input_peer=True).user_id
def _filter_event(self, event):
"""
If the ID of ``event._chat_peer`` isn't in the chats set (or it is
but the set is a blacklist) returns ``None``, otherwise the event.
"""
if self.chats is not None:
inside = utils.get_peer_id(event._chat_peer) in self.chats
if inside == self.blacklist_chats:
# If this chat matches but it's a blacklist ignore.
# If it doesn't match but it's a whitelist ignore.
return None
return event
class EventCommon(abc.ABC):
"""Intermediate class with common things to all events"""
_event_name = 'Event'
def __init__(self, chat_peer=None, msg_id=None, broadcast=False):
self._entities = {}
self._client = None
self._chat_peer = chat_peer
self._message_id = msg_id
self._input_chat = None
self._chat = None
self.pattern_match = None
self.original_update = None
self.is_private = isinstance(chat_peer, types.PeerUser)
self.is_group = (
isinstance(chat_peer, (types.PeerChat, types.PeerChannel))
and not broadcast
)
self.is_channel = isinstance(chat_peer, types.PeerChannel)
def _get_entity(self, msg_id, entity_id, chat=None):
"""
Helper function to call :tl:`GetMessages` on the give msg_id and
return the input entity whose ID is the given entity ID.
If ``chat`` is present it must be an :tl:`InputPeer`.
Returns a tuple of ``(entity, input_peer)`` if it was found, or
a tuple of ``(None, None)`` if it couldn't be.
"""
try:
if isinstance(chat, types.InputPeerChannel):
result = self._client(
functions.channels.GetMessagesRequest(chat, [msg_id])
)
else:
result = self._client(
functions.messages.GetMessagesRequest([msg_id])
)
except RPCError:
return None, None
entity = {
utils.get_peer_id(x): x for x in itertools.chain(
getattr(result, 'chats', []),
getattr(result, 'users', []))
}.get(entity_id)
if entity:
return entity, utils.get_input_peer(entity)
else:
return None, None
@property
def input_chat(self):
"""
The (:tl:`InputPeer`) (group, megagroup or channel) on which
the event occurred. This doesn't have the title or anything,
but is useful if you don't need those to avoid further
requests.
Note that this might be ``None`` if the library can't find it.
"""
if self._input_chat is None and self._chat_peer is not None:
try:
self._input_chat = self._client.get_input_entity(
self._chat_peer
)
except (ValueError, TypeError):
# The library hasn't seen this chat, get the message
if not isinstance(self._chat_peer, types.PeerChannel):
# TODO For channels, getDifference? Maybe looking
# in the dialogs (which is already done) is enough.
if self._message_id is not None:
self._chat, self._input_chat = self._get_entity(
self._message_id,
utils.get_peer_id(self._chat_peer)
)
return self._input_chat
@property
def client(self):
return self._client
@property
def chat(self):
"""
The (:tl:`User` | :tl:`Chat` | :tl:`Channel`, optional) on which
the event occurred. This property may make an API call the first time
to get the most up to date version of the chat (mostly when the event
doesn't belong to a channel), so keep that in mind.
"""
if not self.input_chat:
return None
if self._chat is None:
self._chat = self._entities.get(utils.get_peer_id(self._input_chat))
if self._chat is None:
self._chat = self._client.get_entity(self._input_chat)
return self._chat
@property
def chat_id(self):
"""
Returns the marked integer ID of the chat, if any.
"""
if self._chat_peer:
return utils.get_peer_id(self._chat_peer)
def __str__(self):
return TLObject.pretty_format(self.to_dict())
def stringify(self):
return TLObject.pretty_format(self.to_dict(), indent=0)
def to_dict(self):
d = {k: v for k, v in self.__dict__.items() if k[0] != '_'}
d['_'] = self._event_name
return d
def name_inner_event(cls):
"""Decorator to rename cls.Event 'Event' as 'cls.Event'"""
if hasattr(cls, 'Event'):
cls.Event._event_name = '{}.Event'.format(cls.__name__)
else:
warnings.warn('Class {} does not have a inner Event'.format(cls))
return cls
``` |
{
"source": "josse995/lanaChallenge",
"score": 2
} |
#### File: shop/migrations/0001_initial.py
```python
from django.db import migrations
def load_products(apps, schema_editor):
Product = apps.get_model("core", "Product")
product_to_save = Product(code='PEN', name='Lana Pen', price=5.00)
product_to_save.save()
product_to_save = Product(code='TSHIRT', name='Lana TShirt', price=20.00)
product_to_save.save()
product_to_save = Product(code='MUG', name='Lana Coffee Mug', price=7.50)
product_to_save.save()
def delete_products(apps, schema_editor):
Product = apps.get_model("core", "Product")
Product.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.RunPython(load_products, delete_products),
]
```
#### File: shop/tests/test_product_api.py
```python
from django.test import TestCase
from rest_framework.test import APIClient
from rest_framework import status
from core.models import Product, Basket, BasketItem
from django.urls import reverse
BASKET_URL = reverse('shop:basket-list')
BASKET_URL_ADD = reverse('shop:basket-add')
BASKET_URL_CHECKOUT = '{0}{1}'.format(
reverse('shop:basket-checkout'), '?basket={0}')
def insert_into_basket_pen_product(basket, qty):
basketItem = BasketItem.objects.create(
basket=basket, product=Product.objects.get(code='PEN'))
basketItem.qty = qty
basketItem.save()
def insert_into_basket_tshirt_product(basket, qty):
basketItem = BasketItem.objects.create(
basket=basket, product=Product.objects.get(code='TSHIRT'))
basketItem.qty = qty
basketItem.save()
def insert_into_basket_mug_product(basket, qty):
basketItem = BasketItem.objects.create(
basket=basket, product=Product.objects.get(code='MUG'))
basketItem.qty = qty
basketItem.save()
class ShopApiTests(TestCase):
def setUp(self):
self.client = APIClient()
# Create basket
def test_create_basket(self):
"""Test that checks if a basket has been created properly"""
payload = {}
res = self.client.post(BASKET_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
basket = Basket.objects.get(id=res.data['id'])
self.assertTrue(basket.products)
# Delete basket
def test_delete_basket(self):
"""Test that check if a basket has been deleted properly"""
basket = Basket.objects.create()
self.client.delete('{0}{1}/'.format(BASKET_URL, basket.id))
basket = Basket.objects.filter(id=basket.id).first()
self.assertFalse(basket)
# Add to basket
def test_add_without_parameters(self):
"""Test that executes add without parameters"""
res = self.client.post(BASKET_URL_ADD, {})
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_add_to_non_existing_basket(self):
"""Test that adds a product to a non existing basket"""
payload = {'basket': 0,
'product': 'PEN'}
res = self.client.post(BASKET_URL_ADD, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_add_non_existing_product_to_a_basket(self):
"""Test that adds a non existing product to a basket"""
# Creates the basket
basket = Basket.objects.create()
payload = {'basket': basket.id,
'product': 'NOEXIST'}
res = self.client.post(BASKET_URL_ADD, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_add_product_to_a_basket(self):
"""Test that adds a product to a basket"""
# Creates the basket
basket = Basket.objects.create()
payload = {'basket': basket.id,
'product': 'PEN'}
res = self.client.post(BASKET_URL_ADD, payload)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_add_two_same_prodcuts_to_a_basket(self):
"""Tests that adds a prodcut twice to a basket"""
# Creates the basket
basket = Basket.objects.create()
payload = {'basket': basket.id,
'product': 'PEN'}
# Add the item to the basket twice
res = self.client.post(BASKET_URL_ADD, payload)
self.assertEqual(res.status_code, status.HTTP_200_OK)
basket = Basket.objects.get(id=basket.id)
basketItems = basket.products.all()
self.assertEqual(basketItems[0].qty, 1)
res = self.client.post(BASKET_URL_ADD, payload)
basket.refresh_from_db()
self.assertEqual(basketItems[0].qty, 2)
# Checkout
def test_checkout_example_1(self):
"""Test that retrieve the total of 1 pen, 1 t-shirt and 1 mug"""
basket = Basket.objects.create()
insert_into_basket_pen_product(basket, 1)
insert_into_basket_tshirt_product(basket, 1)
insert_into_basket_mug_product(basket, 1)
res = self.client.get(BASKET_URL_CHECKOUT.format(basket.id))
self.assertEqual(res.data['total'], "32.50€")
def test_checkout_example_2(self):
"""Test that retrieve the total of 2 pens and 1 t-shirt"""
basket = Basket.objects.create()
insert_into_basket_pen_product(basket, 2)
insert_into_basket_tshirt_product(basket, 1)
res = self.client.get(BASKET_URL_CHECKOUT.format(basket.id))
self.assertEqual(res.data['total'], "25.00€")
def test_checkout_example_3(self):
"""Test that retrieve the total of 4 t-shirts and 1 pen"""
basket = Basket.objects.create()
insert_into_basket_tshirt_product(basket, 4)
insert_into_basket_pen_product(basket, 1)
res = self.client.get(BASKET_URL_CHECKOUT.format(basket.id))
self.assertEqual(res.data['total'], "65.00€")
def test_checkout_example_4(self):
"""Test that retrieve the total of 3 pens, 3 t-shirts and 1 mug"""
basket = Basket.objects.create()
insert_into_basket_pen_product(basket, 3)
insert_into_basket_tshirt_product(basket, 3)
insert_into_basket_mug_product(basket, 1)
res = self.client.get(BASKET_URL_CHECKOUT.format(basket.id))
self.assertEqual(res.data['total'], "62.50€")
``` |
{
"source": "JosseArturo/CarND-Behavioral-Cloning-P3",
"score": 3
} |
#### File: JosseArturo/CarND-Behavioral-Cloning-P3/model.py
```python
import cv2
import numpy as np
import csv
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from keras.models import Sequential
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Flatten, Dense, Lambda, MaxPooling2D, Dropout, Convolution2D, Cropping2D
import numpy as np
import csv
import cv2
import sklearn
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, MaxPooling2D, Dropout
from keras.layers.convolutional import Convolution2D
## Pre-process function
def pre_process_image(image):
#Change format from BGR to RGB
colored_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#Modify the scale to make the model faster in learning module
#resized_image = cv2.resize(cropped_image, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)
return colored_image
## Network Model
def nvidiaModel():
# Model based on Nvidia network
model = Sequential()
# Normalization
model.add(Lambda(lambda x: (x / 255.0) - 0.5 , input_shape=processed_image_shape))
# Crop the image, to used just the important part.
model.add(Cropping2D(cropping=((50,20), (0,0))))
#Network
model.add(Convolution2D(24,5,5, subsample=(2,2), activation='relu'))
model.add(Convolution2D(36,5,5, subsample=(2,2), activation='relu'))
model.add(Convolution2D(48,5,5, subsample=(2,2), activation='relu'))
model.add(Convolution2D(64,3,3, activation='relu'))
model.add(Convolution2D(64,3,3, activation='relu'))
model.add(Flatten())
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
# Alternative Model
# model = Sequential()
# model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=processed_image_shape))
# #In case to set the shape of the image manually
# #model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(32,80,3)))
# model.add(Conv2D(24, (5,5), activation='relu'))
# #model.add(BatchNormalization())
# model.add(Conv2D(36, (5, 5), activation='relu'))
# #model.add(BatchNormalization())
# model.add(Conv2D(48, (5, 5), activation='relu'))
# #model.add(BatchNormalization())
# model.add(Conv2D(64, (3, 3), activation='relu'))
# #model.add(BatchNormalization())
# model.add(Conv2D(64, (3, 3), activation='relu'))
# #model.add(BatchNormalization())
# model.add(Flatten())
# model.add(Dense(100))
# #model.add(BatchNormalization())
# #model.add(Dropout(0.25))
# model.add(Dense(50))
# #model.add(BatchNormalization())
# #model.add(Dropout(0.25))
# model.add(Dense(10))
# #model.add(BatchNormalization())
# #model.add(Dropout(0.25))
# model.add(Dense(1))
return model
## Gnerator created to define the trainig through batches
def generator(input_data, image_path, batch_size=32, left_image_angle_correction = 0.20, right_image_angle_correction = -0.20):
#Create batch to process
processing_batch_size = int(batch_size)
number_of_entries = len(input_data)
while 1:
#Always
for offset in range(0, number_of_entries, processing_batch_size):
#Processing each sample of the batch
#Define the batch data
batch_data = input_data[offset:offset + processing_batch_size]
images = []
angles = []
for batch_sample in batch_data:
##Process to each sample of the batch
#First take the center image and its angle
path_center_image = image_path+(batch_sample[0].strip()).split('/')[-1]
angle_for_centre_image = float(batch_sample[3])
center_image = cv2.imread(path_center_image)
#Check if the image is OK
if center_image is not None:
# Pre-process the center image
processed_center_image = pre_process_image(center_image)
#And start to populate the vector of images
images.append(processed_center_image)
angles.append(angle_for_centre_image)
## Flipping the image
images.append(cv2.flip(processed_center_image, 1))
angles.append(-angle_for_centre_image)
#IMPORTANT- For the Dataset will be used the left, right and center view, as well as the flipping center view
#In case needed flipp the left and the right view
## Pre-process the left image
left_image_path = image_path + batch_sample[1].split('/')[-1]
left_image = cv2.imread(left_image_path)
if left_image is not None:
images.append(pre_process_image(left_image))
angles.append(angle_for_centre_image + left_image_angle_correction)
# Pre-process the right image
right_image_path = image_path + batch_sample[2].split('/')[-1]
right_image = cv2.imread(right_image_path)
if right_image is not None:
images.append(pre_process_image(right_image))
angles.append(angle_for_centre_image + right_image_angle_correction)
# Shuffling and returning the image data back to the calling function
yield sklearn.utils.shuffle(np.array(images), np.array(angles))
# Define Constants and Paths
data_path = "data/"
image_path = data_path + "IMG/"
csv_data = []
processed_csv_data = []
csvPath = 'data/driving_log.csv'
#Loading Data (CSV reference from the data)
with open(csvPath) as csv_file:
csv_reader = csv.reader(csv_file)
# Skipping the headers
next(csv_reader, None)
for each_line in csv_reader:
csv_data.append(each_line)
# Shuffle the csv entries and split the train and validation dataset
csv_data = sklearn.utils.shuffle(csv_data)
train_samples, validation_samples = train_test_split(csv_data, test_size=0.2)
#Datasets
train_generator = generator(train_samples, image_path)
validation_generator = generator(validation_samples, image_path)
#Get the shape of the process
first_img_path = image_path + csv_data[0][0].split('/')[-1]
first_image = cv2.imread(first_img_path)
processed_image_shape = pre_process_image(first_image).shape
print (processed_image_shape)
#Compile the model
model = nvidiaModel()
model.compile(optimizer= 'adam', loss='mse', metrics=['acc'])
# Name of the model to save
file = 'model.h5'
##Define some features to save time in the training and save the beset result
#Stop training in case of no improvement
stopper = EarlyStopping(patience=5, verbose=1)
#Save the best model
checkpointer = ModelCheckpoint(file, monitor='val_loss', verbose=1, save_best_only=True)
print("Trainning")
epoch = 1
history_object = model.fit_generator(train_generator,
samples_per_epoch = 4*len(train_samples),
validation_data = validation_generator,
nb_val_samples = 4*len(validation_samples),
nb_epoch=epoch,
verbose=1)
#saving model
print("Saving model")
model.save(file)
print("Model Saved")
# keras method to print the model summary
model.summary()
##Take some information about the training and the final model
print(history_object.history.keys())
print('Loss')
print(history_object.history['loss'])
print('Validation Loss')
print(history_object.history['val_loss'])
#Plot results validation_loss and train_loss
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
#plt.show()
plt.savefig('Train_Valid_Loss.png')
``` |
{
"source": "Jossec101/PartAlertLinkOpener",
"score": 3
} |
#### File: Jossec101/PartAlertLinkOpener/main.py
```python
from telethon import TelegramClient, events, sync
import winsound, webbrowser
import urllib.parse as urlparse
from urllib.parse import parse_qs
import re
from bs4 import BeautifulSoup
import requests
############################### TO BE CHANGED -> check https://core.telegram.org/api/obtaining_api_id ###################################################
api_id = CHANGEME
api_hash = CHANGEME
##########################################################################################################################################################
def open_url(url):
webbrowser.get().open(url)
print(f'Opened {url}')
frequency = 2500 # Set Frequency To 2500 Hertz
duration = 5000 # Set Duration To 1000 ms == 1 second
winsound.Beep(frequency, duration)
def main():
client = TelegramClient('session_name', api_id, api_hash)
client.start()
# Declare your channel id variables here
channel3080Id = None
channel3090Id = None
for dialog in client.iter_dialogs():
########################################################## Search your channels like those examples for RTX 3080 and RTX 3090 ######################################
if '3080' in dialog.name:
print(f"PartAlert 3080 channel id: {dialog.id}")
channel3080Id = dialog.id
if '3090' in dialog.name:
print(f"PartAlert 3090 channel id: {dialog.id}")
channel3090Id = dialog.id
##########################################################################################################################################################
@client.on(events.NewMessage(chats=[channel3080Id,channel3090Id]))
async def my_event_handler(event):
url = event.message.message
url = re.findall(r'(https?://\S+)', url)[0]
# Change this in the case the change the url
if("alert.partalert.net" in url):
req = requests.get(url)
soup = BeautifulSoup(req.text, "html.parser")
for link in soup.find_all('a'):
href = link.get("href")
if("amazon") in href:
url = href.split('?')[0]
open_url(url)
else:
open_url(url)
client.run_until_disconnected()
main()
``` |
{
"source": "jossefaz/async-http-client",
"score": 2
} |
#### File: async-http-client/http_async_client/base.py
```python
from functools import partial
from typing import Union, Dict, Optional
from http_async_client.enums import SupportedProtocols, Methods
import httpx
import re
from dataclasses import dataclass
from httpx._types import RequestContent, URLTypes, RequestData, RequestFiles, QueryParamTypes, HeaderTypes, CookieTypes
from nanoid import generate
import base64
import threading
from httpx import Request
class EndPointRegistry(type):
"""This Class is a singleton that inherits from the `type` class, in order to provide it as a metaclass to other classes
This class is the core of the HTTP client that differs from others client, because it will allow to manage different
domains within the same class
This is very useful for example if you need to send request to different third party APIS and you want to follow the
way of that request with a same request ID.
With this class you can keep a domain registry. Every new domain will be registered to this class. On each new call,
it will check if the domain exists in the registry and if not il will
create and entry for it. Afterward it will set this domain as the current domain.
"""
def __init__(cls, *args, **kwargs):
cls.__instance = None
cls._locker = threading.Lock()
cls.endpoints_registry: Dict[bytes, EndPoint] = {}
cls.current = bytes()
super().__init__(*args, **kwargs)
def __call__(cls, *args, **kwargs):
"""
Instantiate the Singleton using the thread library in order to guarantee only one instance !
Arguments:
host: string, the domain's host
port: int : Optional
protocol : string, must be a member of the SupportedProtocol Enum
Returns:
cls.__instance : EndPointRegistry instance
"""
if cls.__instance is None:
with cls._locker:
if cls.__instance is None:
cls.__instance = super().__call__(*args, **kwargs)
# On each call : add to registry (if it is already in the reg, it wont be added but only defined as current)
cls.add_to_reg(**kwargs)
return cls.__instance
def add_to_reg(cls, **kwargs):
"""Method that will create and eventually add a class EndPoint instance object and will add it to the registry if its base64 url is not present in it
In that way, if there is the same origin with two different ports, it will be two different entry in the registry
Arguments:
host: string, the domain's host
port: int : Optional
protocol : string, must be a member of the SupportedProtocol Enum
"""
port = kwargs.get("port", None)
protocol = kwargs.get("protocol", None)
host = kwargs.get("host", None)
end_point = EndPoint(host, port, protocol)
if not end_point.base_url:
raise ValueError("EndPointRegistry error trying to add new client : host is missing")
try:
end_point_key = base64.b64encode(bytes(end_point.base_url, encoding='utf-8'))
if end_point_key not in cls.endpoints_registry:
cls.endpoints_registry[end_point_key] = end_point
cls.current = end_point_key
except TypeError as te:
raise TypeError(f"Cannot encode base url to registry : {str(te)}")
@dataclass
class EndPoint:
host: str
port: int
_protocol: str
@property
def base_url(self) -> Union[bool, str]:
"""Build the base url based on the protocol, the host and the port. Only host is mandatory, others will be ignored or given default value.
Returns:
The Base URL following this template "{protocol}://{host}:{port}"
"""
if not self.host:
return False
return f"{self.protocol.value}://{self.host}:{self.port}" if self.port \
else f"{self.protocol.value}://{self.host}"
@property
def protocol(self) -> SupportedProtocols:
"""Get the protocol if the one that was given in constructor is supported, otherwise give the default http protocol
Returns:
Entry of the enum SupportedProtocols
"""
if self._protocol in SupportedProtocols.__members__:
return SupportedProtocols[self._protocol]
return SupportedProtocols.http
class BaseRESTAsyncClient(metaclass=EndPointRegistry):
def __init__(self, *, host, port=None, protocol=None):
self._request_id = None
@classmethod
def get_instance(cls, *, host: str, port: Optional[int] = None,
protocol: Optional[str] = None) -> "partial[BaseRESTAsyncClient]":
"""Will return a factory (as a partial function) in order to always ensure the current endpoint is selected in the endpoints registry
Arguments:
host: domain's host
port: listening port
protocol: Network Protocol (must be a value of the SupportedProtocols Enum)
Returns:
partial function (BaseRESTAsyncClient factory)
Example:
```python
client = BaseRESTAsyncClient.get_instance("example.com", 8080, "https")
```
"""
return partial(BaseRESTAsyncClient, host=host, port=port, protocol=protocol)
@property
def request_id(self) -> str:
"""Getter for the request id
Returns:
nanoid: uid of the current request
"""
if not self._request_id:
return None
return str(self._request_id)
@request_id.setter
def request_id(self, req_id):
"""Setter for the request id
Arguments:
req_id : UID (nanoid) of the request
Todo:
* Check if there is any pre existing request ID from the incoming request headers and generate one ONLY IF there is no
"""
self._request_id = generate()
def get_base_url(self) -> str:
return self.endpoints_registry[self.current].base_url
def make_url(self, url: str = ""):
"""Url builder based on the host base url
Arguments:
url: relative url that will be concatenate wil the host base url
Returns:
string: An absolute url including the protocol, the host base url, port (if any) and the relative url if any
"""
# Ensure to remove keep only one "/" along all the url
url = re.sub('/+', '/', url)
# remove the first "/" at the beginning
url = re.sub('^/', '', url)
return f"{self.get_base_url()}/{url}"
async def _send_request(self, req: Request):
"""
Arguments:
req: a Request ([httpx](https://www.python-httpx.org/api/#request) type)
Returns:
coroutine: handle the HTTP response by awaiting it
"""
async with httpx.AsyncClient() as client:
return await client.send(req)
async def get(self,
url: URLTypes = "",
*,
params: QueryParamTypes = None,
headers: HeaderTypes = None,
cookies: CookieTypes = None):
"""Prepare an HTTP `GET` request and send it asynchronously
Arguments:
url: Relative URL (from the base URL)
params: Query string
headers: HTTP Headers (Key Value)
cookies: HTTP Cookies
Returns:
coroutine : result of the `_send_request` method. It need to be awaited in order to get the HTTP response
"""
request = Request(Methods.get.value, self.make_url(url), params=params, headers=headers, cookies=cookies)
return await self._send_request(request)
async def post(self,
url: URLTypes = "",
*,
headers: HeaderTypes = None,
cookies: CookieTypes = None,
content: RequestContent = None,
data: RequestData = None,
files: RequestFiles = None):
"""Prepare an HTTP `POST` request and send it asynchronously
Arguments:
url: Relative URL (from the base URL)
headers: HTTP Headers (Key Value)
cookies: HTTP Cookies
data: JSON, Files, Form,
content: All contents that are NOT one of : Form encoded, Multipart files, JSON. Could be use for text or binaries
files: Blob stream
Returns:
coroutine : result of the `_send_request` method. It need to be awaited in order to get the HTTP response
"""
request = Request(Methods.post.value, self.make_url(url),
content=content,
data=data,
files=files,
headers=headers,
cookies=cookies)
return await self._send_request(request)
async def put(self,
url: URLTypes = "",
*,
headers: HeaderTypes = None,
cookies: CookieTypes = None,
data: RequestData = None):
"""Prepare an HTTP `PUT` request and send it asynchronously
Arguments:
url: Relative URL (from the base URL)
headers: HTTP Headers (Key Value)
cookies: HTTP Cookies
data: JSON, Files, Form,
Returns:
coroutine : result of the `_send_request` method. It need to be awaited in order to get the HTTP response
"""
request = Request(Methods.put.value, self.make_url(url),
data=data,
headers=headers,
cookies=cookies)
return await self._send_request(request)
async def patch(self,
url: URLTypes = "",
*,
headers: HeaderTypes = None,
cookies: CookieTypes = None,
data: RequestData = None):
"""Prepare an HTTP `PATCH` request and send it asynchronously
Arguments:
url: Relative URL (from the base URL)
headers: HTTP Headers (Key Value)
cookies: HTTP Cookies
data: JSON, Files, Form,
Returns:
coroutine : result of the `_send_request` method. It need to be awaited in order to get the HTTP response
"""
request = Request(Methods.patch.value, self.make_url(url),
data=data,
headers=headers,
cookies=cookies)
return await self._send_request(request)
async def delete(self,
url: URLTypes = "",
*,
params: QueryParamTypes = None,
headers: HeaderTypes = None,
cookies: CookieTypes = None):
"""Prepare an HTTP `DELETE` request and send it asynchronously
Arguments:
url: Relative URL (from the base URL)
params: Query string
headers: HTTP Headers (Key Value)
cookies: HTTP Cookies
Returns:
coroutine : result of the `_send_request` method. It need to be awaited in order to get the HTTP response
"""
request = Request(Methods.delete.value, self.make_url(url), params=params, headers=headers, cookies=cookies)
return await self._send_request(request)
def __call__(self, *args, **kwargs):
"""
Will trow an error that avoid BaseRESTAsyncClient to be called directly and force use the get_instance class method
"""
raise TypeError("BaseClient cannot be called directly use get_instance class method instead")
async_client_factory = BaseRESTAsyncClient.get_instance
```
#### File: async-http-client/tests/test_main.py
```python
from typing import Type, Any
import pytest
from http_async_client.base import BaseRESTAsyncClient, EndPoint
@pytest.mark.http_async
@pytest.mark.parametrize("host, port, protocol, rel_url, expected_full_url", [
("example.com", None, None, "/gene/diseases/query",
"http://example.com/gene/diseases/query"),
("example.com", 8080, "https", "/gene/diseases/query",
"https://example.com:8080/gene/diseases/query"),
("example.com", None, None, "/////gene///diseases//query",
"http://example.com/gene/diseases/query"),
("example.com", None, None, "gene///diseases//query",
"http://example.com/gene/diseases/query"),
], ids=["Build a url with no port or protocol",
"Build a url with a port and protocol",
"rall redundant slashes are removed from url",
"work without the first slash"])
def test_make_url(http_client: Type[BaseRESTAsyncClient], host, port, protocol, rel_url, expected_full_url):
rest_client = http_client(host=host, port=port, protocol=protocol)
assert rest_client().make_url(rel_url) == expected_full_url
@pytest.mark.http_async
def test_cannot_instantiate_http_client_if_no_host(http_client: Type[BaseRESTAsyncClient]):
with pytest.raises(ValueError, match="EndPointRegistry error trying to add new client : host is missing"):
rest_client = http_client(host=None, port=None, protocol=None)
rest_client()
@pytest.mark.http_async
@pytest.mark.parametrize("host, host2, url1, url2", [
("first_domain.com", "second_domain.com", "http://first_domain.com", "http://second_domain.com"),
], ids=["Singleton that can change domain on call"])
def test_http_client_singleton(http_client: Type[BaseRESTAsyncClient], host: str, host2: str, url1: str, url2: str):
rest_client1 = http_client(host=host)
rest_client2 = http_client(host=host2)
# assert that addresses are the same (singleton)
assert hex(id(rest_client1())) == hex(id(rest_client2()))
# assert that calling each client base url getter will return the corresponding url from the registry
assert rest_client1().get_base_url() == url1
assert rest_client2().get_base_url() == url2
@pytest.mark.http_async
@pytest.mark.parametrize("host, port, protocol, expected_url", [
("first_domain.com", 8080, "https", "https://first_domain.com:8080"),
], ids=["Url is build with domain, port and protocol"])
def test_end_point_data_class(host: str, port: int, protocol: str,
expected_url: str):
endpoint = EndPoint(host, port, protocol)
assert endpoint.base_url == expected_url
@pytest.mark.http_async
@pytest.mark.asyncio
@pytest.mark.parametrize("host, port, protocol, expected_status_code", [
("localhost", 3000, "http", 200),
], ids=["Async get return expected status code"])
async def test_get_async(http_client, internet_connection, httpx_mock, monkeypatch, host: str, port: int, protocol: str,
expected_status_code: int):
if not internet_connection:
host = "local"
httpx_mock.add_response(status_code=expected_status_code)
rest_client = http_client(host=host, port=port, protocol=protocol)
response = await rest_client().get("content")
assert response.status_code == expected_status_code
@pytest.mark.http_async
@pytest.mark.asyncio
@pytest.mark.parametrize("host, port, protocol, expected_status_code", [
("localhost", 3000, "http", 204),
], ids=["Async delete return expected status code"])
async def test_delete_async(http_client, internet_connection, httpx_mock, monkeypatch, host: str, port: int,
protocol: str,
expected_status_code: int):
if not internet_connection:
host = "local"
httpx_mock.add_response(status_code=expected_status_code)
rest_client = http_client(host=host, port=port, protocol=protocol)
response = await rest_client().delete("content")
assert response.status_code == expected_status_code
@pytest.mark.http_async
@pytest.mark.asyncio
@pytest.mark.parametrize("host, port, protocol, body, headers, expected_status_code", [
("localhost", 3000, "http",
{
"title": 'foo',
"body": 'bar',
"userId": 1,
},
{
'Content-type': 'application/json; charset=UTF-8',
},
201),
], ids=["Async post return expected status code"])
async def test_post_async(http_client, internet_connection, httpx_mock, monkeypatch, host: str, port: int,
protocol: str, body: dict[str, Any], headers: dict[str, str],
expected_status_code: int):
if not internet_connection:
host = "local"
httpx_mock.add_response(status_code=expected_status_code)
rest_client = http_client(host=host, port=port, protocol=protocol)
response = await rest_client().post("content", headers=headers, data=body)
assert response.status_code == expected_status_code
@pytest.mark.http_async
@pytest.mark.asyncio
@pytest.mark.parametrize("host, port, protocol, body, headers, expected_status_code", [
("localhost", 3000, "http",
{
"title": "",
"body": "",
"userId": 1,
},
{
'Content-type': 'application/json; charset=UTF-8',
},
204),
], ids=["Async put return expected status code"])
async def test_put_async(http_client, internet_connection, httpx_mock, monkeypatch, host: str, port: int,
protocol: str, body: dict[str, Any], headers: dict[str, str],
expected_status_code: int):
if not internet_connection:
host = "local"
httpx_mock.add_response(status_code=expected_status_code)
rest_client = http_client(host=host, port=port, protocol=protocol)
response = await rest_client().put("content", headers=headers, data=body)
assert response.status_code == expected_status_code
``` |
{
"source": "jossef/power-scanner",
"score": 2
} |
#### File: jossef/power-scanner/powmap.py
```python
import argparse
import logging
import re
import struct
import uuid
import sys
from struct import *
import sys
import os
import array
import fcntl
import socket
import fcntl
import struct
import array
from multiprocessing.pool import ThreadPool
from powscan_common.banner_grabber import *
from powscan_common.network_mapper import *
from powscan_common.port_helper import *
from powscan_common.port_scanner import *
from powscan_common.networking_helper import *
from prettytable import PrettyTable
__author__ = '<NAME>'
def parse_command_line_args():
parser = argparse.ArgumentParser(description='Powmap - Cyber security cource TASK 2 Building Scanning tool')
parser.add_argument('-iface_ip', dest='interface_ip_address', metavar='IP', help='network ip', required=True)
parser.add_argument('-timeout', dest='timeout', metavar='TIME', type=int, help='timeout on socket connections (milliseconds)', required=True)
args = parser.parse_args()
ip_address_regex = re.compile(
"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$")
interface_ip_address = args.interface_ip_address
if not ip_address_regex.match(interface_ip_address):
print 'Invalid interface ip address: {0}'.format(interface_ip_address)
parser.print_help()
sys.exit(1)
timeout = args.timeout
return interface_ip_address, timeout
def main():
# Verify user root
if not os.geteuid() == 0:
print "root required! please use 'sudo' to run as root"
return 1
# Verify not windows
if sys.platform == 'win32':
print "not supported on windows. linux only"
return 1
interface_ip_address, timeout = parse_command_line_args()
# Create the mapper
icmp_mapper = IcmpNetworkMapper(timeout=timeout)
endpoints = icmp_mapper.map(interface_ip_address=interface_ip_address)
# ------- === ------
# Printing in a table
print 'mapping ... (please be patient)'
print
for endpoint in endpoints:
print endpoint
if __name__ == "__main__":
main()
```
#### File: power-scanner/powscan_common/banner_helper.py
```python
__author__ = '<NAME>'
def get_ftp_banner_info(banner):
# Lower the banner's case in order to get case insensitive match
banner = banner.lower()
server = None
operating_system = None
if any(hint for hint, os in ftp_servers.iteritems() if hint in banner):
server, operating_system = ((hint, os) for hint, os in ftp_servers.iteritems() if hint in banner).next()
return server, operating_system
def get_smtp_banner_info(banner):
# Lower the banner's case in order to get case insensitive match
banner = banner.lower()
server = None
operating_system = None
if any(hint for hint, os in smtp_servers.iteritems() if hint in banner):
server, operating_system = ((hint, os) for hint, os in smtp_servers.iteritems() if hint in banner).next()
return server, operating_system
def get_http_banner_info(banner):
# Lower the banner's case in order to get case insensitive match
banner = banner.lower()
server = known_banner_web_servers.get(banner, None)
operating_system = None
# If we successfully matched a server
if server:
if any(item in banner for item in windows_hints):
operating_system = 'windows'
elif any(item in banner for item in linux_hints):
distribution = (item in banner for item in linux_hints).next()
operating_system = 'linux ({0})'.format(distribution)
elif any(item in banner for item in mac_os_hints):
operating_system = 'mac os'
# Otherwise, let's try to guess using hints
else:
if any(item in banner for item in hosting_hints):
operating_system = 'filtered (hosting protection)'
server = banner
return server, operating_system
# -------------------------------------------------------------------
# Static hard-coded data below (in real life should be more dynamic..)
# -- -- -- -- -- -- -- --
# Most info has been scarped from http://www.computec.ch/projekte/httprecon/?s=database&t=head_existing&f=banner
known_banner_web_servers = {
'0w/0.8c': '0w 0.8c',
'webstar/2.0 id/33333': '4d webstar 2.0',
'webstar/2.1.1 id/33333': '4d webstar 2.1.1',
'webstar/3.0.2 id/878810': '4d webstar 3.0.2',
'webstar/4.2(ssl) id/79106': '4d webstar 4.2',
'webstar/4.5(ssl) id/878810': '4d webstar 4.5',
'4d_webstar_s/5.3.1 (macos x)': '4d webstar 5.3.1',
'4d_webstar_s/5.3.3 (macos x)': '4d webstar 5.3.3',
'4d_webstar_s/5.4.0 (macos x)': '4d webstar 5.4.0',
'aidex/1.1 (win32)': 'aidex mini-webserver 1.1',
'naviserver/2.0 aolserver/2.3.3': 'aolserver 2.3.3',
'aolserver/3.3.1+ad13': 'aolserver 3.3.1',
'aolserver 3.4.2': 'aolserver 3.4.2',
'aolserver/3.4.2 sp/1': 'aolserver 3.4.2',
'aolserver/3.5.10': 'aolserver 3.4.2',
'aolserver/3.5.0': 'aolserver 3.5.0',
'aolserver/4.0.10': 'aolserver 4.0.10',
'aolserver/4.0.10a': 'aolserver 4.0.10a',
'aolserver/4.0.11a': 'aolserver 4.0.11a',
'aolserver/4.5.0': 'aolserver 4.5.0',
'abyss/2.0.0.20-x2-win32 abysslib/2.0.0.20': 'abyss 2.0.0.20 x2',
'abyss/2.4.0.3-x2-win32 abysslib/2.4.0.3': 'abyss 2.4.0.3 x2',
'abyss/2.5.0.0-x1-win32 abysslib/2.5.0.0': 'abyss 2.5.0.0 x1',
'abyss/2.5.0.0-x2-linux abysslib/2.5.0.0': 'abyss 2.5.0.0 x2',
'abyss/2.5.0.0-x2-macos x abysslib/2.5.0.0': 'abyss 2.5.0.0 x2',
'abyss/2.5.0.0-x2-win32 abysslib/2.5.0.0': 'abyss 2.5.0.0 x2',
'abyss/2.6.0.0-x2-linux abysslib/2.6.0.0': 'abyss 2.6.0.0 x2',
'allegroserve/1.2.50': 'allegroserve 1.2.50',
'anti-web v3.0.7 (fear and loathing on the www)': 'anti-web httpd 3.0.7',
'antiweb/4.0beta13': 'anti-web httpd 4.0beta13',
'apache/1.2.6': 'apache 1.2.6',
'apache/1.3.12 (unix) php/3.0.14': 'apache 1.3.12',
'apache/1.3.17 (win32)': 'apache 1.3.17',
'apache/1.3.26 (linux/suse) mod_ssl/2.8.10 openssl/0.9.6g php/4.2.2': 'apache 1.3.26',
'apache/1.3.26 (unitedlinux) mod_python/2.7.8 python/2.2.1 php/4.2.2': 'apache 1.3.26',
'apache/1.3.26 (unix)': 'apache 1.3.26',
'apache/1.3.26 (unix) debian gnu/linux php/4.1.2': 'apache 1.3.26',
'apache/1.3.26 (unix) debian gnu/linux mod_ssl/2.8.9 openssl/0.9.6g': 'apache 1.3.26',
'mit web server apache/1.3.26 mark/1.5 (unix) mod_ssl/2.8.9': 'apache 1.3.26',
'apache/1.3.27 (linux/suse) mod_ssl/2.8.12 openssl/0.9.6i php/4.3.1': 'apache 1.3.27',
'apache/1.3.27 (turbolinux) mod_throttle/3.1.2 mod_ruby/0.9.7 ruby/1.6.4': 'apache 1.3.27',
'apache/1.3.27 (unix) (red-hat/linux)': 'apache 1.3.27',
'apache/1.3.27 (unix) (red-hat/linux) mod_python/2.7.8 python/1.5.2': 'apache 1.3.27',
'apache/1.3.27 (unix) (red-hat/linux) mod_ssl/2.8.12 openssl/0.9.6b': 'apache 1.3.27',
'apache/1.3.27 (unix) php/4.3.1': 'apache 1.3.27',
'apache/1.3.27 (unix) mod_perl/1.27': 'apache 1.3.27',
'apache/1.3.27 (win32)': 'apache 1.3.27',
'apache/1.3.28 (unix) mod_perl/1.27 php/4.3.3': 'apache 1.3.28',
'apache/1.3.29 (debian gnu/linux) mod_perl/1.29': 'apache 1.3.29',
'apache/1.3.29 (unix)': 'apache 1.3.29',
'apache/1.3.31 (unix)': 'apache 1.3.31',
'anu_webapp': 'apache 1.3.33',
'apache/1.3.33 (darwin) php/5.2.1': 'apache 1.3.33',
'apache/1.3.33 (darwin) mod_ssl/2.8.24 openssl/0.9.7l mod_jk/1.2.25': 'apache 1.3.33',
'apache/1.3.33 (debian gnu/linux) php/4.3.10-20 mod_perl/1.29': 'apache 1.3.33',
'apache/1.3.33 (debian gnu/linux) php/4.3.8-9 mod_ssl/2.8.22': 'apache 1.3.33',
'apache/1.3.33 (debian gnu/linux) mod_gzip/1.3.26.1a php/4.3.10-22': 'apache 1.3.33',
'apache/1.3.33 (debian gnu/linux) mod_python/2.7.10 python/2.3.4': 'apache 1.3.33',
'apache/1.3.33 (openpkg/2.4) mod_gzip/1.3.26.1a php/4.3.11 mod_watch/3.17': 'apache 1.3.33',
'apache/1.3.33 (unix) php/4.3.10 frontpage/5.0.2.2510': 'apache 1.3.33',
'apache/1.3.33 (unix) mod_auth_passthrough/1.8 mod_bwlimited/1.4': 'apache 1.3.33',
'apache/1.3.33 (unix) mod_fastcgi/2.4.2 mod_gzip/1.3.26.1a mod_ssl/2.8.22': 'apache 1.3.33',
'apache/1.3.33 (unix) mod_perl/1.29': 'apache 1.3.33',
'apache/1.3.33 (unix) mod_ssl/2.8.22 openssl/0.9.7d php/4.3.10': 'apache 1.3.33',
'apache/1.3.34': 'apache 1.3.34',
'apache/1.3.34 (debian) authmysql/4.3.9-2 mod_ssl/2.8.25 openssl/0.9.8c': 'apache 1.3.34',
'apache/1.3.34 (debian) php/4.4.4-8+etch4': 'apache 1.3.34',
'apache/1.3.34 (debian) php/5.2.0-8+etch7 mod_ssl/2.8.25 openssl/0.9.8c': 'apache 1.3.34',
'apache/1.3.34 (unix) (gentoo) mod_fastcgi/2.4.2': 'apache 1.3.34',
'apache/1.3.34 (unix) (gentoo) mod_perl/1.30': 'apache 1.3.34',
'apache/1.3.34 (unix) (gentoo) mod_ssl/2.8.25 openssl/0.9.7e': 'apache 1.3.34',
'apache/1.3.34 (unix) php/4.4.2 mod_perl/1.29 dav/1.0.3 mod_ssl/2.8.25': 'apache 1.3.34',
'apache/1.3.34 (unix) mod_jk/1.2.15 mod_perl/1.29 mod_gzip/1.3.26.1a': 'apache 1.3.34',
'apache/1.3.35 (unix)': 'apache 1.3.35',
'apache/1.3.27 (unix) (red-hat/linux) mod_perl/1.26 php/4.3.3': 'apache 1.3.37',
'apache/1.3.37 (unix) frontpage/5.0.2.2635 mod_ssl/2.8.28 openssl/0.9.7m': 'apache 1.3.37',
'apache/1.3.37 (unix) php/4.3.11': 'apache 1.3.37',
'apache/1.3.37 (unix) php/4.4.7 mod_throttle/3.1.2 frontpage/5.0.2.2635': 'apache 1.3.37',
'apache/1.3.37 (unix) php/5.1.2': 'apache 1.3.37',
'apache/1.3.37 (unix) php/5.2.0': 'apache 1.3.37',
'apache/1.3.37 (unix) php/5.2.1': 'apache 1.3.37',
'apache/1.3.37 (unix) php/5.2.3 mod_auth_passthrough/1.8': 'apache 1.3.37',
'apache/1.3.37 (unix) mod_auth_passthrough/1.8 mod_log_bytes/1.2': 'apache 1.3.37',
'apache/1.3.37 (unix) mod_perl/1.29': 'apache 1.3.37',
'apache/1.3.37 (unix) mod_perl/1.30 mod_ssl/2.8.28 openssl/0.9.7e-p1': 'apache 1.3.37',
'apache/1.3.37 (unix) mod_ssl/2.8.28 openssl/0.9.7d': 'apache 1.3.37',
'apache/1.3.37 (unix) mod_ssl/2.8.28 openssl/0.9.8d': 'apache 1.3.37',
'apache/1.3.37 (unix) mod_throttle/3.1.2 dav/1.0.3 mod_fastcgi/2.4.2': 'apache 1.3.37',
'apache/1.3.37 ben-ssl/1.57 (unix) mod_gzip/1.3.26.1a mod_fastcgi/2.4.2': 'apache 1.3.37',
'apache/1.3.37.fb1': 'apache 1.3.37',
'apache/1.3.39 (unix) dav/1.0.3 mod_auth_passthrough/1.8': 'apache 1.3.39',
'apache/1.3.39 (unix) php/4.4.7': 'apache 1.3.39',
'apache/1.3.39 (unix) php/5.2.3 mod_bwlimited/1.4': 'apache 1.3.39',
'apache/1.3.39 (unix) php/5.2.5 dav/1.0.3 mod_ssl/2.8.30 openssl/0.9.7c': 'apache 1.3.39',
'apache/1.3.39 (unix) mod_auth_passthrough/1.8 mod_log_bytes/1.2': 'apache 1.3.39',
'apache/1.3.39 (unix) mod_fastcgi/2.4.2 mod_auth_passthrough/1.8': 'apache 1.3.39',
'apache/1.3.39 ben-ssl/1.57 (unix) mod_perl/1.30 frontpage/5.0.2.2624': 'apache 1.3.39',
'apache/1.3.41 (unix) php/5.2.8': 'apache 1.3.41',
'apache/2.0.45 (unix) mod_jk2/2.0.3-dev': 'apache 2.0.45',
'apache/2.0.45 (unix) mod_perl/1.99_09-dev perl/v5.6.1 covalent_auth/2.3': 'apache 2.0.45',
'apache/2.0.46 (centos)': 'apache 2.0.46',
'apache/2.0.46 (red hat)': 'apache 2.0.46',
'apache/2.0.46 (white box)': 'apache 2.0.46',
'apache/2.0.48 (redhat 9/server4you)': 'apache 2.0.48',
'apache/2.0.49 (linux/suse)': 'apache 2.0.49',
'apache/2.0.49 (unix) php/4.3.9': 'apache 2.0.49',
'apache/2.0.50 (linux/suse)': 'apache 2.0.50',
'apache/2.0.50 (ydl)': 'apache 2.0.50',
'apache/2.0.51 (fedora)': 'apache 2.0.51',
'apache/2.0.52 (centos)': 'apache 2.0.52',
'apache/2.0.52 (fedora)': 'apache 2.0.52',
'apache/2.0.52 (red hat)': 'apache 2.0.52',
'apache/2.0.52 (unix)': 'apache 2.0.52',
'apache/2.0.52 (unix) dav/2 php/4.4.1': 'apache 2.0.52',
'apache/2.0.52 (win32)': 'apache 2.0.52',
'apache/2.0.52 (win32) mod_ssl/2.0.52 openssl/0.9.7e mod_auth_sspi/1.0.1': 'apache 2.0.52',
'apache/2.0.53 (linux/suse)': 'apache 2.0.53',
'apache/2.0.54 (debian gnu/linux) dav/2 svn/1.1.4': 'apache 2.0.54',
'apache/2.0.54 (debian gnu/linux) php/4.3.10-18': 'apache 2.0.54',
'apache/2.0.54 (debian gnu/linux) php/4.3.10-22 mod_ssl/2.0.54': 'apache 2.0.54',
'apache/2.0.54 (debian gnu/linux) php/5.1.2': 'apache 2.0.54',
'apache/2.0.54 (debian gnu/linux) mod_jk/1.2.14 php/5.2.4-0.dotdeb.0 with': 'apache 2.0.54',
'apache/2.0.54 (debian gnu/linux) mod_ssl/2.0.54 openssl/0.9.7e php/4.4.6': 'apache 2.0.54',
'apache/2.0.54 (fedora)': 'apache 2.0.54',
'apache/2.0.54 (linux/suse)': 'apache 2.0.54',
'apache/2.0.54 (netware) mod_jk/1.2.14': 'apache 2.0.54',
'apache/2.0.54 (unix) php/4.4.7 mod_ssl/2.0.54 openssl/0.9.7e': 'apache 2.0.54',
'apache/2.0.55': 'apache 2.0.55',
'apache/2.0.55 (freebsd) php/5.2.3 with suhosin-patch': 'apache 2.0.55',
'apache/2.0.55 (ubuntu) dav/2 php/4.4.2-1.1 mod_ssl/2.0.55 openssl/0.9.8b': 'apache 2.0.55',
'apache/2.0.55 (ubuntu) php/5.1.2': 'apache 2.0.55',
'apache/2.0.55 (unix) dav/2 mod_ssl/2.0.55 openssl/0.9.8a php/4.4.4': 'apache 2.0.55',
'apache/2.0.55 (unix) mod_ssl/2.0.55 openssl/0.9.7i mod_jk/1.2.15': 'apache 2.0.55',
'apache/2.0.55 (unix) mod_ssl/2.0.55 openssl/0.9.8a jrun/4.0': 'apache 2.0.55',
'apache/2.0.58 (unix)': 'apache 2.0.58',
'apache/2.0.58 (win32) php/5.1.4': 'apache 2.0.58',
'apache/2.0.58 (win32) php/5.1.5': 'apache 2.0.58',
'apache/2.0.59 (freebsd) dav/2 php/5.2.1 with suhosin-patch': 'apache 2.0.59',
'apache/2.0.59 (freebsd) mod_fastcgi/2.4.2 php/4.4.4 with suhosin-patch': 'apache 2.0.59',
'apache/2.0.59 (netware) mod_jk/1.2.15': 'apache 2.0.59',
'apache/2.0.59 (unix) mod_ssl/2.0.59 openssl/0.9.7e mod_jk/1.2.15': 'apache 2.0.59',
'apache/2.0.59 (unix) mod_ssl/2.0.59 openssl/0.9.8d mod_fastcgi/2.4.2': 'apache 2.0.59',
'apache/2.0.63 (red hat)': 'apache 2.0.63',
'apache/2.2.0 (freebsd) mod_ssl/2.2.0 openssl/0.9.7e-p1 dav/2 php/5.1.2': 'apache 2.2.0',
'apache/2.2.11 (freebsd)': 'apache 2.2.11',
'apache/2.2.2 (fedora)': 'apache 2.2.2',
'apache/2.2.3 (centos)': 'apache 2.2.3',
'apache/2.2.3 (debian) dav/2 svn/1.4.2 mod_python/3.2.10 python/2.4.4': 'apache 2.2.3',
'apache/2.2.3 (debian) php/4.4.4-8+etch4': 'apache 2.2.3',
'apache/2.2.3 (debian) php/5.2.0-8+etch7': 'apache 2.2.3',
'apache/2.2.3 (debian) php/5.2.0-8+etch7 mod_ssl/2.2.3 openssl/0.9.8c': 'apache 2.2.3',
'apache/2.2.3 (debian) php/5.2.0-8+etch7 mod_ssl/2.2.3 openssl/0.9.8e': 'apache 2.2.3',
'apache/2.2.3 (debian) php/5.2.0-8+etch9': 'apache 2.2.3',
'apache/2.2.3 (debian) mod_fastcgi/2.4.2 php/5.2.0-8+etch7 mod_ssl/2.2.3': 'apache 2.2.3',
'apache/2.2.3 (debian) mod_jk/1.2.18 php/5.2.0-8+etch5~pu1 mod_ssl/2.2.3': 'apache 2.2.3',
'apache/2.2.3 (debian) mod_jk/1.2.18 php/5.2.0-8+etch7': 'apache 2.2.3',
'apache/2.2.3 (debian) mod_ssl/2.2.3 openssl/0.9.8c php/5.2.4': 'apache 2.2.3',
'apache/2.2.3 (linux/suse)': 'apache 2.2.3',
'apache/2.2.3 (mandriva linux/prefork-1mdv2007.0)': 'apache 2.2.3',
'apache/2.2.3 (red hat)': 'apache 2.2.3',
'apache/2.2.3 (unix) php/5.2.1': 'apache 2.2.3',
'apache/2.2.4 (debian) php/4.4.4-9+lenny1 mod_ssl/2.2.4 openssl/0.9.8e': 'apache 2.2.4',
'apache/2.2.4 (fedora)': 'apache 2.2.4',
'apache/2.2.4 (fedora) mod_ssl/2.2.4 openssl/0.9.8b dav/2': 'apache 2.2.4',
'apache/2.2.4 (freebsd)': 'apache 2.2.4',
'apache/2.2.4 (unix) dav/2 php/5.2.1rc3-dev mod_ruby/1.2.5': 'apache 2.2.4',
'apache/2.2.4 (unix) mod_ssl/2.2.4 openssl/0.9.7e dav/2 svn/1.4.2': 'apache 2.2.4',
'apache/2.2.4 (win32)': 'apache 2.2.4',
'apache/2.2.6 (debian) dav/2 php/4.4.4-9 mod_ssl/2.2.6 openssl/0.9.8g': 'apache 2.2.6',
'apache/2.2.6 (debian) dav/2 svn/1.4.4 mod_python/3.3.1 python/2.4.4': 'apache 2.2.6',
'apache/2.2.6 (debian) php/5.2.4-2 with suhosin-patch mod_ssl/2.2.6': 'apache 2.2.6',
'apache/2.2.6 (freebsd) mod_ssl/2.2.6 openssl/0.9.8e dav/2': 'apache 2.2.6',
'apache/2.2.6 (unix) mod_ssl/2.2.6 openssl/0.9.7a dav/2 mod_mono/1.2.4': 'apache 2.2.6',
'apache/2.2.6 (unix) mod_ssl/2.2.6 openssl/0.9.7a mod_jk/1.2.25': 'apache 2.2.6',
'apache/2.2.6 (unix) mod_ssl/2.2.6 openssl/0.9.8b dav/2 php/5.2.5 with': 'apache 2.2.6',
'apache': 'apache 2.2.8',
'apache/2.2.8 (freebsd) mod_ssl/2.2.8 openssl/0.9.8g dav/2 php/5.2.5': 'apache 2.2.8',
'apache/2.2.8 (unix) mod_ssl/2.2.8 openssl/0.9.8g': 'apache 2.2.8',
'apache/2.2.8 (unix)': 'apache 2.2.9',
'apache/2.3.0-dev (unix)': 'apache 2.3.0',
'araneida/0.84': 'araneida 0.84',
'\'s webserver': 'ashleys webserver',
'badblue/2.4': 'badblue 2.4',
'badblue/2.5': 'badblue 2.5',
'badblue/2.6': 'badblue 2.6',
'badblue/2.7': 'badblue 2.7',
'barracudaserver.com (posix)': 'barracudadrive 3.9.1',
'basehttp/0.3 python/2.4.4': 'basehttpserver 0.3',
'boa/0.92o': 'boa 0.92o',
'boa/0.93.15': 'boa 0.93.15',
'boa/0.94.14rc21': 'boa 0.94.14rc21',
'cl-http/70.216 (lispworks': 'cl-http 70.216',
'caudium/1.4.9 stable': 'caudium 1.4.9',
'cherokee': 'cherokee 0.6.0',
'cherokee/0.99': 'cherokee 0.99',
'virata-emweb/r6_0_1': 'cisco vpn 3000 concentrator virata emweb r6.2.0',
'virata-emweb/r6_2_0': 'cisco vpn 3000 concentrator virata emweb r6.2.0',
'compaqhttpserver/5.2': 'compaq http server 5.2',
'compaqhttpserver/5.7': 'compaq http server 5.7',
'compaqhttpserver/5.91': 'compaq http server 5.91',
'compaqhttpserver/5.94': 'compaq http server 5.94',
'compaqhttpserver/9.9 hp system management homepage/192.168.3.11': 'compaq http server 9.9',
'cougar/9.5.6001.6264': 'cougar 9.5.6001.6264',
'goahead-webs': 'flexwatch fw-3440-b',
'gatling/0.10': 'gatling 0.10',
'gatling/0.9': 'gatling 0.9',
'globalscape-secure server/3.3': 'globalscape secure server 3.3',
'gws': 'google web server 2.1',
'mfe': 'google web server 2.1',
'sffe': 'google web server 2.1',
'httpi/1.5.2 (demonic/aix)': 'httpi 1.5.2',
'httpi/1.6.1 (demonic/aix)': 'httpi 1.6.1',
'hiawatha v6.11': 'hiawatha 6.11',
'hiawatha/6.2 mod_gwbasic/1.7.3 openxor/0.3.1a': 'hiawatha 6.2',
'ibm_http_server/192.168.127.12 apache/2.0.47 (unix)': 'ibm http server 192.168.127.12',
'ibm_http_server/172.16.17.32 apache/2.0.47 (unix)': 'ibm http server 172.16.17.32',
'ibm_http_server/172.16.58.39 apache/2.0.47 (unix) dav/2': 'ibm http server 172.16.17.32',
'ibm_http_server': 'ibm http server 172.16.31.10',
'ipc@chip': 'ipc@chip 1.04',
'icewarp/8.3': 'icewarp 8.3.0',
'indy/9.00.10': 'indy idhttpserver 9.00.10',
'jana-server/172.16.58.3': 'jana-server 172.16.58.3',
'jetty/5.1.10 (linux/2.6.12 i386 java/1.5.0_05': 'jetty 5.1.10',
'jetty/5.1.1 (linux/2.6.9-5.elsmp i386 java/1.5.0_09': 'jetty 5.1.1',
'jetty(6.1.1)': 'jetty 6.1.1',
'jigsaw/2.2.5': 'jigsaw 2.2.5',
'jigsaw/2.2.6': 'jigsaw 2.2.6',
'jigsaw/2.3.0-beta1': 'jigsaw 2.3.0-beta1',
'kget': 'kget web interface 2.1.3',
'klone/2.1.0rc1': 'klone 2.1.0rc1',
'allegro-software-rompager/2.00': 'konica ip-421/7020 allegro rompager 2.00',
'boa/0.94.13': 'linksys wvc54gc boa 0.94.13',
'listmanagerweb/8.8c (based on tcl-webserver/3.4.2)': 'listmanagerweb 8.8c',
'litespeed': 'litespeed web server 3.3',
'domino-go-webserver/4.6.2.5': 'lotus domino go webserver 192.168.3.11',
'mathopd/1.5p6': 'mathopd 1.5p6',
'microsoft-iis/5.0': 'microsoft iis 5.0',
'microsoft-iis/5.1': 'microsoft iis 5.1',
'microsoft-iis/6.0': 'microsoft iis 6.0',
'microsoft-iis/6.0.0': 'microsoft iis 6.0',
'microsoft-iis/7.0': 'microsoft iis 7.0',
'mongrel 1.0': 'mongrel 1.0',
'aegis_nanoweb/2.2.10-dev (linux': 'nanoweb 2.2.10',
'rapid logic/1.1': 'net2phone rapid logic 1.1',
'thttpd/2.25b 29dec2003': 'netbotz 500 thttpd 2.25b',
'netware-enterprise-web-server/5.1': 'netware enterprise web server 5.1',
'zyxel-rompager/3.02': 'netgear rp114 3.26',
'allegro-software-rompager/2.10': 'netopia router allegro rompager 2.10',
'netscape-enterprise/2.01': 'netscape enterprise server 2.01',
'netscape-enterprise/3.5.1': 'netscape enterprise server 3.5.1',
'netscape-enterprise/3.5.1g': 'netscape enterprise server 3.5.1g',
'netscape-enterprise/4.1': 'netscape enterprise server 4.1',
'netscape-enterprise/6.0': 'netscape enterprise server 6.0',
'netscape-fasttrack/3.02': 'netscape fasttrack 3.02a',
'osu/3.12alpha': 'osu 3.12alpha',
'osu/3.9': 'osu 3.9',
'omnihttpd/2.06': 'omnihttpd 2.06',
'omnihttpd/2.09': 'omnihttpd 2.09',
'omnihttpd/2.10': 'omnihttpd 2.10',
'opensa/1.0.1 / apache/1.3.23 (win32) php/4.1.1 dav/1.0.2': 'opensa 1.0.1',
'opensa/1.0.3 / apache/1.3.26 (win32) mod_ssl/2.8.9 openssl/0.9.6g': 'opensa 1.0.3',
'opensa/1.0.4 / apache/1.3.27 (win32) php/4.2.2 mod_gzip/1.3.19.1a': 'opensa 1.0.4',
'opensa/1.0.5 / apache/1.3.27 (win32) (using ihtml/2.20.500)': 'opensa 1.0.5',
'oracle-application-server-10g oracleas-web-cache-10g/10.1.2.0.0 (n': 'oracle application server 10g 10.1.2.0.0',
'oracle-application-server-10g/10.1.2.0.0 oracle-http-server': 'oracle application server 10g 10.1.2.0.0',
'oracle-application-server-10g/10.1.2.0.2 oracle-http-server': 'oracle application server 10g 10.1.2.0.2',
'oracle-application-server-10g oracleas-web-cache-10g/10.1.2.2.0 (tn': 'oracle application server 10g 10.1.2.2.0',
'oracle-application-server-10g/10.1.2.2.0 oracle-http-server': 'oracle application server 10g 10.1.2.2.0',
'oracle-application-server-10g/10.1.3.0.0 oracle-http-server': 'oracle application server 10g 10.1.3.0.0',
'oracle-application-server-10g/10.1.3.1.0 oracle-http-server': 'oracle application server 10g 10.1.3.1.0',
'oracle-application-server-10g/9.0.4.0.0 oracle-http-server': 'oracle application server 10g 9.0.4.0.0',
'oracle-application-server-10g/9.0.4.1.0 oracle-http-server': 'oracle application server 10g 9.0.4.1.0',
'oracle-application-server-10g/9.0.4.2.0 oracle-http-server': 'oracle application server 10g 9.0.4.2.0',
'oracle-application-server-10g/9.0.4.3.0 oracle-http-server': 'oracle application server 10g 9.0.4.3.0',
'oracle9ias/9.0.2.3.0 oracle http server': 'oracle application server 9i 9.0.2.3.0',
'oracle9ias/9.0.2 oracle http server': 'oracle application server 9i 9.0.2',
'oracle9ias/192.168.3.11 oracle http server': 'oracle application server 9i 192.168.3.11',
'orion/2.0.7': 'orion 2.0.7',
'oversee webserver v1.3.18': 'oversee webserver 1.3.18',
'httpd/1.00': 'packetshaper httpd 1.00',
'wg_httpd/1.0(based boa/0.92q)': 'philips netcam 1.4.8 wg_httpd 1.0',
'thttpd/2.20b 10oct00': 'qnap nas-4100 2.26.0517',
'http server 1.0': 'qnap ts-411u 1.2.0.0531',
'resin/3.0.23': 'resin 3.0.23',
'resin/3.0.6': 'resin 3.0.6',
'web-server/3.0': 'ricoh aficio 6002 3.53.3 web-server 3.0',
'roxen/2.2.213': 'roxen 2.2.213',
'roxen/4.5.111-release2': 'roxen 4.5.111',
'roxen/4.5.145-rc2': 'roxen 4.5.145',
'snap appliances, inc./3.1.603': 'snap appliance 3.1.603',
'snap appliance, inc./3.4.803': 'snap appliance 3.4.803',
'snap appliance, inc./3.4.805': 'snap appliance 3.4.805',
'snap appliance, inc./4.0.830': 'snap appliance 4.0.830',
'snap appliance, inc./4.0.854': 'snap appliance 4.0.854',
'snap appliance, inc./4.0.860': 'snap appliance 4.0.860',
'snapstream': 'snapstream digital video recorder',
'netevi/1.09': 'sony snc-rz30 netevi 1.09',
'netevi/2.05': 'sony snc-rz30 netevi 2.05',
'netevi/2.05g': 'sony snc-rz30 netevi 2.05g',
'netevi/2.06': 'sony snc-rz30 netevi 2.06',
'netevi/2.13': 'sony snc-rz30 netevi 2.13',
'netevi/2.14': 'sony snc-rz30 netevi 2.14',
'netevi/2.24': 'sony snc-rz30 netevi 2.24',
'netevi/3.01': 'sony snc-rz30 netevi 3.01',
'netevi/3.02': 'sony snc-rz30 netevi 3.02',
'netevi/3.03': 'sony snc-rz30 netevi 3.03',
'netevi/3.10': 'sony snc-rz30 netevi 3.10',
'netevi/3.10a': 'sony snc-rz30 netevi 3.10a',
'netevi/3.14': 'sony snc-rz30 netevi 3.14',
'netzoom/1.00': 'sony snc-z20 netzoom 1.00',
'squid/2.5.stable5': 'squid 2.5.stable5',
'squid/2.5.stable6': 'squid 2.5.stable6',
'squid/2.5.stable9': 'squid 2.5.stable9',
'squid/2.6.stable13': 'squid 2.6.stable13',
'squid/2.6.stable4': 'squid 2.6.stable4',
'squid/2.6.stable7': 'squid 2.6.stable7',
'stweb/1.3.27 (unix) authmysql/3.1 mod_jk/1.1.0 php/3.0.18 php/4.2.3 with': 'stweb 1.3.27',
'sun-java-system-web-server/6.1': 'sun java system web server 6.1',
'sun-java-system-web-server/7.0': 'sun java system web server 7.0',
'sun-one-web-server/6.1': 'sun one web server 6.1',
'smssmtphttp': 'symantec mail security for smtp',
'tcl-webserver/3.5.1 may 27, 2004': 'tclhttpd 3.5.1',
'theserver/2.21l': 'theserver 2.21l',
'userland frontier/9.0.1-winnt': 'userland frontier 9.0.1',
'userland frontier/9.5-winnt': 'userland frontier 9.5',
'realvnc/4.0': 'vnc server enterprise edition e4.2.5',
'vswebserver/01.00 index/01.02.01': 'vs web server 01.00.00',
'virtuoso/05.00.3021 (linux) i686-generic-linux-glibc23-32 vdb': 'virtuoso 5.0.3',
'wdaemon/9.6.1': 'wdaemon 9.6.1',
'webrick/1.3.1 (ruby/1.9.0/2006-07-13)': 'webrick 1.3.1',
'wn/2.4.7': 'wn server 2.4.7',
'allegro-software-rompager/3.06b1': 'xerox docuprint n4025 allegro rompager 3.06b1',
'spyglass_microserver/2.01fc1': 'xerox phaser 6200',
'yaws/1.65 yet another web server': 'yaws 1.65',
'yaws/1.68 yet another web server': 'yaws 1.68',
'yaws/1.72 yet another web server': 'yaws 1.72',
'yaws/sys_6.0.5 yet another web server': 'yaws 6.0.5',
'zeus/4.3': 'zeus 4.3',
'zeus/4.41': 'zeus 4.41',
'unknown/0.0 upnp/1.0 conexant-emweb/r6_1_0': 'zoom adsl',
'zope/(zope 2.10.4-final, python 2.4.4, linux2) zserver/1.1 plone/3.0.1': 'zope 2.10.4',
'zope/(zope 2.5.0 (binary release, python 2.1, linux2-x86), python 2.1.2,': 'zope 2.5.0',
'zope/(zope 2.5.1 (source release, python 2.1, linux2), python 2.1.3,': 'zope 2.5.1',
'zope/(zope 2.6.0 (binary release, python 2.1, linux2-x86), python 2.1.3,': 'zope 2.6.0',
'zope/(zope 2.6.1 (source release, python 2.1, linux2), python 2.2.3,': 'zope 2.6.1',
'zope/(zope 2.6.4 (source release, python 2.1, linux2), python 2.2.3,': 'zope 2.6.4',
'zope/(zope 2.7.4-0, python 2.3.5, linux2) zserver/1.1': 'zope 2.7.4',
'squid/2.5.stable12': 'zope 2.7.4',
'zope/(zope 2.7.5-final, python 2.3.4, linux2) zserver/1.1 plone/2.0.5': 'zope 2.7.5',
'zope/(zope 2.7.5-final, python 2.3.5, linux2) zserver/1.1': 'zope 2.7.5',
'zope/(zope 2.7.6-final, python 2.3.5, linux2) zserver/1.1 plone/2.0.5': 'zope 2.7.6',
'zope/(zope 2.7.6-final, python 2.4.0, linux2) zserver/1.1': 'zope 2.7.6',
'zope/(zope 2.7.7-final, python 2.3.5, linux2) zserver/1.1 plone/2.0.5': 'zope 2.7.7',
'zope/(zope 2.7.7-final, python 2.4.4, linux2) zserver/1.1': 'zope 2.7.7',
'zope/(zope 2.7.8-final, python 2.3.5, linux2) zserver/1.1 plone/2.0.5': 'zope 2.7.8',
'zope/(zope 2.7.9-final, python 2.3.5, linux2) zserver/1.1 plone/2.0.4': 'zope 2.7.9',
'zope/(zope 2.8.0-a0, python 2.3.4, linux2) zserver/1.1 plone/2.0-rc3': 'zope 2.8.0',
'zope/(zope 2.8.2-final, python 2.3.5, linux2) zserver/1.1 plone/unknown': 'zope 2.8.2',
'zope/(zope 2.8.4-final, python 2.3.5, linux2) zserver/1.1 plone/unknown': 'zope 2.8.4',
'zope/(zope 2.8.6-final, python 2.3.5, linux2) zserver/1.1 plone/unknown': 'zope 2.8.6',
'zope/(zope 2.8.6-final, python 2.4.4, linux2) zserver/1.1 plone/unknown': 'zope 2.8.6',
'zope/(zope 2.8.7-final, python 2.4.4, linux2) zserver/1.1 plone/unknown': 'zope 2.8.7',
'zope/(zope 2.9.2-, python 2.4.3, linux2) zserver/1.1 plone/unknown': 'zope 2.9.2',
'zope/(zope 2.9.3-, python 2.4.0, linux2) zserver/1.1': 'zope 2.9.3',
'zope/(zope 2.9.3-, python 2.4.2, linux2) zserver/1.1 plone/2.5': 'zope 2.9.3',
'zope/(zope 2.9.5-final, python 2.4.3, linux2) zserver/1.1 plone/2.5.1': 'zope 2.9.5',
'zope/(zope 2.9.6-final, python 2.4.3, linux2) zserver/1.1 plone/2.5.1': 'zope 2.9.6',
'zope/(zope 2.9.6-final, python 2.4.3, linux2) zserver/1.1 plone/2.5.2': 'zope 2.9.6',
'zope/(zope 2.9.7-final, python 2.4.4, linux2) zserver/1.1': 'zope 2.9.7',
'zope/(zope 2.9.8-final, python 2.4.4, linux2) zserver/1.1': 'zope 2.9.8',
'rompager/4.07 upnp/1.0': 'zyxel zywall 10w rompager 4.07',
'and-httpd/0.99.11': 'and-httpd 0.99.11',
'bozohttpd/20060517': 'bozohttpd 20060517',
'bozohttpd/20080303': 'bozohttpd 20080303',
'dwhttpd/4.0.2a7a (inso': 'dwhttpd 4.0.2a7a',
'dwhttpd/4.1a6 (inso': 'dwhttpd 4.1a6',
'dwhttpd/4.2a7 (inso': 'dwhttpd 4.2a7',
'emule': 'emule 0.48a',
'ns-firecat/1.0.x': 'firecat 1.0.0 beta',
'fnord/1.8a': 'fnord 1.8a',
'lighttpd/1.4.13': 'lighttpd 1.4.13',
'lighttpd/1.4.16': 'lighttpd 1.4.16',
'lighttpd/1.4.18': 'lighttpd 1.4.18',
'lighttpd/1.4.19': 'lighttpd 1.4.19',
'lighttpd/1.4.22': 'lighttpd 1.4.22',
'lighttpd/1.5.0': 'lighttpd 1.5.0',
'nginx/0.5.19': 'nginx 0.5.19',
'nginx/0.5.30': 'nginx 0.5.30',
'nginx/0.5.31': 'nginx 0.5.31',
'nginx/0.5.32': 'nginx 0.5.32',
'nginx/0.5.33': 'nginx 0.5.33',
'nginx/0.5.35': 'nginx 0.5.35',
'nginx/0.6.13': 'nginx 0.6.13',
'nginx/0.6.16': 'nginx 0.6.16',
'nginx/0.6.20': 'nginx 0.6.20',
'nginx/0.6.31': 'nginx 0.6.26',
'nostromo 1.9.1': 'nostromo 1.9.1',
'publicfile': 'publicfile',
'thttpd/2.19-mx apr 25 2002': 'thttpd 2.19-mx',
'thttpd/2.19-mx dec 2 2002': 'thttpd 2.19-mx',
'thttpd/2.19-mx jan 24 2006': 'thttpd 2.19-mx',
'thttpd/2.19-mx oct 20 2003': 'thttpd 2.19-mx',
'thttpd/2.23beta1 26may2002': 'thttpd 2.23beta1',
'thttpd/2.24 26oct2003': 'thttpd 2.24',
'thttpd/2.26 ??apr2004': 'thttpd 2.26',
'vqserver/1.9.56 the world\'s most friendly web server': 'vqserver 1.9.56',
'webcamxp': 'webcamxp pro 2007 3.96.000 beta',
}
windows_hints = ['microsoft', 'windows', 'win32']
mac_os_hints = ['macos']
linux_hints = ['suse', 'linux', 'debian', 'solaris', 'red hat', 'unix', 'ubuntu', 'centos']
hosting_hints = ['host', 'hosting']
ftp_servers = {
'crushftp': '*',
'glftpd': 'unix',
'goanywhere ': 'unix',
'proftpd': '*',
'pro-ftpd ': '*',
'pure-ftpd': 'unix',
'pureftpd': 'unix',
'slimftpd ': 'windows',
'slim-ftpd ': 'windows',
'vsftpd ': 'unix',
'wu-ftpd': 'unix',
'wuftpd ': 'unix',
'crushftp': '*',
'alftp': 'windows',
'cerberus ': 'windows',
'completeftp': 'windows',
'filezilla': '*',
'logicaldoc': '*',
'iis': 'windows',
'naslite': 'unix',
'syncplify': 'windows',
'sysax': 'windows',
'war ftp': 'windows',
'ws ftp': 'windows',
'ncftpd': 'unix',
}
smtp_servers = {
'gws': 'google web services',
'ncftpd': 'unix',
'agorum': 'unix',
'atmail': 'unix',
'axigen': 'unix',
'bongo': 'unix',
'citadel': 'unix',
'contactoffice': 'unix',
'communigate': 'unix',
'courier': 'unix',
'critical path': 'unix',
'imail': 'unix',
'eudora': 'unix',
'evo': 'unix',
'exim': 'unix',
'firstclass': 'unix',
'gammadyne': 'unix',
'gordano': 'unix',
'haraka': 'unix',
'hmailserver': 'unix',
'ibm lotus domino': 'unix',
'icewarp': 'unix',
'ipswitch': 'unix',
'ironport': 'unix',
'james': 'unix',
'kerio': 'unix',
'magicmail': 'unix',
'mailenable': 'unix',
'mailtraq': 'unix',
'mdaemon': 'windows',
'mercury': 'unix',
'meta1': 'unix',
'microsoft': 'windows',
'exchange': 'windows',
'mmdf': 'unix',
'momentum': 'unix',
'groupwise': 'unix',
'netmail': 'unix',
'opensmtpd': 'unix',
'openwave': 'unix',
'open-xchange': 'unix',
'beehive': 'unix',
'oracle': 'unix',
'port25': 'unix',
'postfix': 'unix',
'postmaster': 'unix',
'qmail': 'unix',
'qpsmtpd': 'unix',
'scalix': 'unix',
'sendmail': 'unix',
'slmail pro': 'unix',
'smail': 'unix',
'sparkengine': 'unix',
'smtp proxy': 'unix',
'strongmail': 'unix',
'sun java system': 'unix',
'synovel collabsuite': 'unix',
'wingate': 'windows',
'xmail': 'unix',
'xms': 'unix',
'zarafa': 'unix',
'zimbra': 'unix',
'zmailer': 'unix',
}
```
#### File: power-scanner/powscan_common/packet_helper.py
```python
import time
__author__ = '<NAME>'
import os
import struct
import array
import struct
from socket import htons, ntohs
from powscan_common.networking_helper import *
import abc
class Packet(object):
__metaclass__ = abc.ABCMeta
def _checksum(self, msg):
s = 0
if len(msg) % 2 != 0:
msg += chr(0)
# loop taking 2 characters at a time
for i in range(0, len(msg), 2):
w = ord(msg[i]) + (ord(msg[i + 1]) << 8 )
s = s + w
s = (s >> 16) + (s & 0xffff);
s = s + (s >> 16);
#complement and mask to 4 byte short
s = ~s & 0xffff
return s
def serialize(self):
"""
Serializes the current packet instance into a raw packet bytes
returns packet bytes
"""
return self._serialize()
@abc.abstractmethod
def _serialize(self):
"""
Abstract method -
Serializes the current packet instance into a raw packet bytes
returns packet bytes
"""
pass
def deserialize(self, raw_packet_bytes):
"""
Deserializes the raw_packet_bytes into an instance of the inheritor
"""
self._deserialize(raw_packet_bytes)
@abc.abstractmethod
def _deserialize(self, raw_packet_bytes):
"""
Abstract method -
Deserializes the raw_packet_bytes into an instance of the inheritor
"""
pass
# --==-- ==--== ==---== --==-- ==--== ==---== --==-- ==--== ==---== --==-- ==--== ==---==
# Internet Control Message Protocol (ICMP) :
# -- == --
# Enum IcmpPacketType
class IcmpType(object):
minlen = 8
masklen = 12
echoreply = 0
unreach = 3
unreach_net = 0
unreach_host = 1
unreach_protocol = 2
unreach_port = 3
unreach_needfrag = 4
unreach_srcfail = 5
sourcequench = 4
redirect = 5
redirect_net = 0
redirect_host = 1
redirect_tosnet = 2
redirect_toshost = 3
echo = 8
timxceed = 11
timxceed_intrans = 0
timxceed_reass = 1
paramprob = 12
tstamp = 13
tstampreply = 14
ireq = 15
ireqreply = 16
maskreq = 17
maskreply = 18
# -- == --
# IcmpPacket
# ICMP Header Format http://en.wikipedia.org/wiki/Internet_Control_Message_Protocol
# |type(8)|code(8)|checksum(16)|id(4)|sequence(4)|dynamic structure aka data(32)|
class IcmpPacket(Packet):
def __init__(self,
type=IcmpType.echo,
code=0,
id=None,
sequence=0,
checksum=0,
payload='Power Scanner ICMP'):
self.type = type
self.code = code
# id not initialized
if not id:
# Treat the hosting process's pid as id
self.id = os.getpid() & 0xFFFF
else:
self.id = id
self.sequence = sequence
self.checksum = checksum
self.payload = payload
def _serialize(self):
# icmp request :
# |type(8)|code(8)|checksum(16)|id(4)|sequence(4)|dynamic structure aka data(variable)|
# Q - 8 bytes
# L - 4 bytes
# H - 2 bytes
# B - 1 byte
type = struct.pack('!B', self.type)
code = struct.pack('!B', self.code)
checksum_result = struct.pack('H', 0)
id = struct.pack('!H', self.id)
sequence = struct.pack('!H', self.sequence)
packet_without_checksum = type + \
code + \
checksum_result + \
id + \
sequence + \
self.payload
checksum_result = self._checksum(packet_without_checksum)
checksum_result = struct.pack('H', checksum_result)
packet = type + \
code + \
checksum_result + \
id + \
sequence + \
self.payload
return packet
def _deserialize(self, raw_packet_bytes):
self.type = struct.unpack('!B', raw_packet_bytes[0:1])[0]
self.code = struct.unpack('!B', raw_packet_bytes[1:2])[0]
self.checksum = struct.unpack('H', raw_packet_bytes[2:4])[0]
self.id = struct.unpack('!H', raw_packet_bytes[4:6])[0]
self.sequence = struct.unpack('!H', raw_packet_bytes[6:8])[0]
self.payload = raw_packet_bytes[8:]
# --==-- ==--== ==---== --==-- ==--== ==---== --==-- ==--== ==---== --==-- ==--== ==---==
# Internet Protocol (IP) :
class IpServiceType(object):
lowdelay = 0x10
throughput = 0x08
reliability = 0x04
netcontrol = 0xe0
internetcontrol = 0xc0
critic_ecp = 0xa0
flashoverride = 0x80
flash = 0x60
immediate = 0x40
priority = 0x20
routine = 0x00
class IpFlags(object):
dont_fragment = int('010', 2)
more_fragment = int('001', 2)
fragment_if_necessary = int('000', 2)
class IpProtocol(object):
hopopt = 0 # ipv6 hop_by_hop option
icmp = 1 # internet control message protocol
igmp = 2 # internet group management protocol
ggp = 3 # gateway_to_gateway protocol
ip_in_ip = 4 # ip_within_ip (encapsulation)
st = 5 # internet stream protocol
tcp = 6 # transmission control protocol
cbt = 7 # core_based trees
egp = 8 # exterior gateway protocol
igp = 9 # interior gateway protocol (any private interior gateway (used by cisco for their igrp))
bbn_rcc_mon = 10 # bbn rcc monitoring
nvp_ii = 11 # network voice protocol
pup = 12 # xerox pup
argus = 13 # argus
emcon = 14 # emcon
xnet = 15 # cross net debugger
chaos = 16 # chaos
udp = 17 # user datagram protocol
mux = 18 # multiplexing
dcn_meas = 19 # dcn measurement subsystems
hmp = 20 # host monitoring protocol
prm = 21 # packet radio measurement
xns_idp = 22 # xerox ns idp
trunk_1 = 23 # trunk_1
trunk_2 = 24 # trunk_2
leaf_1 = 25 # leaf_1
leaf_2 = 26 # leaf_2
rdp = 27 # reliable datagram protocol
irtp = 28 # internet reliable transaction protocol
iso_tp4 = 29 # iso transport protocol class 4
netblt = 30 # bulk data transfer protocol
mfe_nsp = 31 # mfe network services protocol
merit_inp = 32 # merit internodal protocol
dccp = 33 # datagram congestion control protocol
_3pc = 34 # third party connect protocol
idpr = 35 # inter_domain policy routing protocol
xtp = 36 # xpress transport protocol
ddp = 37 # datagram delivery protocol
idpr_cmtp = 38 # idpr control message transport protocol
tp_plus_plus = 39 # tp++ transport protocol
il = 40 # il transport protocol
ipv6 = 41 # ipv6 encapsulation
sdrp = 42 # source demand routing protocol
ipv6_route = 43 # routing header for ipv6
ipv6_frag = 44 # fragment header for ipv6
idrp = 45 # inter_domain routing protocol
rsvp = 46 # resource reservation protocol
gre = 47 # generic routing encapsulation
mhrp = 48 # mobile host routing protocol
bna = 49 # bna
esp = 50 # encapsulating security payload
ah = 51 # authentication header
i_nlsp = 52 # integrated net layer security protocol
swipe = 53 # swipe
narp = 54 # nbma address resolution protocol
mobile = 55 # ip mobility (min encap)
tlsp = 56 # transport layer security protocol (using kryptonet key management)
skip = 57 # simple key_management for internet protocol
ipv6_icmp = 58 # icmp for ipv6
ipv6_nonxt = 59 # no next header for ipv6
ipv6_opts = 60 # destination options for ipv6
internal = 61 # any host internal protocol
cftp = 62 # cftp
local = 63 # any local network
sat_expak = 64 # satnet and backroom expak
kryptolan = 65 # kryptolan
rvd = 66 # mit remote virtual disk protocol
ippc = 67 # internet pluribus packet core
distributed_file_system = 68 # any distributed file system
sat_mon = 69 # satnet monitoring
visa = 70 # visa protocol
ipcv = 71 # internet packet core utility
cpnx = 72 # computer protocol network executive
cphb = 73 # computer protocol heart beat
wsn = 74 # wang span network
pvp = 75 # packet video protocol
br_sat_mon = 76 # backroom satnet monitoring
sun_nd = 77 # sun nd protocol_temporary
wb_mon = 78 # wideband monitoring
wb_expak = 79 # wideband expak
iso_ip = 80 # international organization for standardization internet protocol
vmtp = 81 # versatile message transaction protocol
secure_vmtp = 82 # secure versatile message transaction protocol
vines = 83 # vines
ttp = 84 # ttp
iptm = 84 # internet protocol traffic manager
nsfnet_igp = 85 # nsfnet_igp
dgp = 86 # dissimilar gateway protocol
tcf = 87 # tcf
eigrp = 88 # eigrp
ospf = 89 # open shortest path first
sprite_rpc = 90 # sprite rpc protocol
larp = 91 # locus address resolution protocol
mtp = 92 # multicast transport protocol
ax_25 = 93 # ax.25
ipip = 94 # ip_within_ip encapsulation protocol
micp = 95 # mobile internetworking control protocol
scc_sp = 96 # semaphore communications sec. pro
etherip = 97 # ethernet_within_ip encapsulation
encap = 98 # encapsulation header
private_encryption = 99 # any private encryption scheme
gmtp = 100 # gmtp
ifmp = 101 # ipsilon flow management protocol
pnni = 102 # pnni over ip
pim = 103 # protocol independent multicast
aris = 104 # ibm's aris (aggregate route ip switching) protocol
scps = 105 # scps (space communications protocol standards)
qnx = 106 # qnx
a_n = 107 # active networks
ipcomp = 108 # ip payload compression protocol
snp = 109 # sitara networks protocol
compaq_peer = 110 # compaq peer protocol
ipx_in_ip = 111 # ipx in ip
vrrp = 112 # virtual router redundancy protocol, common address redundancy protocol (not iana assigned)
pgm = 113 # pgm reliable transport protocol
_0_hop = 114 # any 0_hop protocol
l2tp = 115 # layer two tunneling protocol version 3
ddx = 116 # d_ii data exchange (ddx)
iatp = 117 # interactive agent transfer protocol
stp = 118 # schedule transfer protocol
srp = 119 # spectralink radio protocol
uti = 120 # universal transport interface protocol
smp = 121 # simple message protocol
sm = 122 # simple multicast protocol
ptp = 123 # performance transparency protocol
is_is_over_ipv4 = 124 # intermediate system to intermediate system (is_is) protocol over ipv4
fire = 125 # flexible intra_as routing environment
crtp = 126 # combat radio transport protocol
crudp = 127 # combat radio user datagram
sscopmce = 128 # service_specific connection_oriented protocol in a multilink and connectionless environment
iplt = 129 #
sps = 130 # secure packet shield
pipe = 131 # private ip encapsulation within ip
sctp = 132 # stream control transmission protocol
fc = 133 # fibre channel
rsvp_e2e_ignore = 134 # reservation protocol (rsvp) end_to_end ignore
mobility_header = 135 # mobility extension header for ipv6
udplite = 136 # lightweight user datagram protocol
mpls_in_ip = 137 # multiprotocol label switching encapsulated in ip
manet = 138 # manet protocols
hip = 139 # host identity protocol
shim6 = 140 # site multihoming by ipv6 intermediation
wesp = 141 # wrapped encapsulating security payload
rohc = 142 # robust header compression
# unassigned 143-252 #
# testing 253 - 254 # rfc 3692
# reserved 255 #
class IpTimeToLive(object):
# data is from http://www.howtogeek.com/104337/hacker-geek-os-fingerprinting-with-ttl-and-tcp-window-sizes/
linux = 64
windows = 128
ios = 225
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Version| IHL |Type of Service| Total Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Identification |Flags| Fragment Offset |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Time to Live | Protocol | Header Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Destination Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Options | Padding |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
class IpPacket(Packet):
def __init__(self,
version=4,
header_length=5,
type_of_service=IpServiceType.routine,
total_length=None,
identification=0,
flags=0,
fragment_offset=0,
ttl=IpTimeToLive.linux,
protocol=IpProtocol.icmp,
checksum=0,
source_ip=0,
destination_ip=0,
payload=''):
"""
IP Packet
"""
self.version = version
self.header_length = header_length
self.type_of_service = type_of_service
self.total_length = total_length
self.identification = identification
self.flags = flags
self.fragment_offset = fragment_offset
self.ttl = ttl
self.protocol = protocol
self.checksum = checksum
self.source_ip = source_ip
self.destination_ip = destination_ip
self.payload = payload
def _serialize(self):
# IP Packet Structure http://www.networksorcery.com/enp/protocol/ip.htm
# If the total_length left blank, let's calculate it
if not self.total_length:
self.total_length = self.header_length * 4 + len(self.payload)
version_and_header_length = struct.pack('!B', (self.version << 4) | self.header_length)
type_of_service = struct.pack('!B', self.type_of_service)
total_length = struct.pack('!H', self.total_length)
identification = struct.pack('!H', self.identification)
flags = struct.pack('!H', (self.flags << 13) | self.fragment_offset)
ttl = struct.pack('!B', self.ttl)
protocol = struct.pack('!B', self.protocol)
checksum = struct.pack('H', 0)
source_ip = convert_v4_address_string_to_hex(self.source_ip)
destination_ip = convert_v4_address_string_to_hex(self.destination_ip)
# Data is not included in the checksum
packet_without_checksum = version_and_header_length + \
type_of_service + \
total_length + \
identification + \
flags + \
ttl + \
protocol + \
checksum + \
source_ip + \
destination_ip
checksum = self._checksum(packet_without_checksum)
checksum = struct.pack('H', checksum)
payload = self.payload
packet = version_and_header_length + \
type_of_service + \
total_length + \
identification + \
flags + \
ttl + \
protocol + \
checksum + \
source_ip + \
destination_ip + \
payload
return packet
def _deserialize(self, raw_packet_bytes):
version_and_header_length = struct.unpack('B', raw_packet_bytes[0:1])[0]
self.version = (version_and_header_length & int('11110000', 2)) >> 4
self.header_length = (version_and_header_length & int('00001111', 2))
self.type_of_service = struct.unpack('B', raw_packet_bytes[1:2])[0]
self.total_length = struct.unpack('!H', raw_packet_bytes[2:4])[0]
self.identification = struct.unpack('!H', raw_packet_bytes[4:6])[0]
flags_and_fragment_offset = struct.unpack('!H', raw_packet_bytes[6:8])[0]
self.flags = (flags_and_fragment_offset & int('1110000000000000', 2)) >> 13
self.fragment_offset = (flags_and_fragment_offset & int('0001111111111111', 2))
self.ttl = struct.unpack('B', raw_packet_bytes[8:9])[0]
self.protocol = struct.unpack('B', raw_packet_bytes[9:10])[0]
# Remember that checksum is not big-endian
self.checksum = struct.unpack('H', raw_packet_bytes[10:12])[0]
source_ip_hex = struct.unpack('!I', raw_packet_bytes[12:16])[0]
self.source_ip = convert_v4_address_hex_to_string(source_ip_hex)
destination_ip_hex = struct.unpack('!I', raw_packet_bytes[16:20])[0]
self.destination_ip = convert_v4_address_hex_to_string(destination_ip_hex)
self.payload = raw_packet_bytes[20:]
# --==-- ==--== ==---== --==-- ==--== ==---== --==-- ==--== ==---== --==-- ==--== ==---==
# Transmission Control Protocol (TCP) :
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source Port | Destination Port |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Sequence Number |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Acknowledgment Number |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Data | |U|A|P|R|S|F| |
# | Offset| Reserved |R|C|S|S|Y|I| Window |
# | | |G|K|H|T|N|N| |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Checksum | Urgent Pointer |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Options | Padding |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
class TcpPacket(Packet):
def __init__(self,
source_ip=None,
destination_ip=None,
protocol=None,
source_port=0,
destination_port=0,
sequence_number=123,
ack_number=0,
data_offset=5,
fin=False,
syn=False,
rst=False,
psh=False,
ack=False,
urg=False,
window_size=53270,
checksum=0,
urgent_pointer=0,
options=None,
payload=''):
self.source_ip = source_ip
self.destination_ip = destination_ip
self.protocol = protocol
self.source_port = source_port
self.destination_port = destination_port
self.sequence_number = sequence_number
self.ack_number = ack_number
self.data_offset = data_offset
self.fin = fin
self.syn = syn
self.rst = rst
self.psh = psh
self.ack = ack
self.urg = urg
self.window_size = window_size
self.checksum = checksum
self.urgent_pointer = urgent_pointer
self.options = options
self.payload = payload
def _serialize(self):
# TCP Packet Structure http://en.wikipedia.org/wiki/Transmission_Control_Protocol
# Why pseudo header for checksum calculation?
# Read more http://www.tcpipguide.com/free/t_TCPChecksumCalculationandtheTCPPseudoHeader-2.htm
# --==-- --==--
# Options
if not self.options:
# Create the default options
self.options = \
struct.pack('!BBH', 2, 4, 1460) + \
struct.pack('!BB', 4, 2) + \
struct.pack('!BBII', 8, 10, 63022427, 0) + \
struct.pack('!B', 1) + \
struct.pack('!BBB', 3, 3, 7)
options_length_in_bytes = len(self.options)
options_length_with_padding_in_bytes = (i for i in range(0, 140, 4) if i >= options_length_in_bytes).next()
if options_length_with_padding_in_bytes > options_length_in_bytes:
self.options += '\000' * (options_length_with_padding_in_bytes - options_length_in_bytes)
options_length_with_padding_in_words = options_length_with_padding_in_bytes / 4
data_offset = (((self.data_offset + options_length_with_padding_in_words) << 4) | 0)
# --==-- --==--
# Pseudo Header
flags = self.fin + (self.syn << 1) + (self.rst << 2) + (self.psh << 3) + (self.ack << 4) + (self.urg << 5)
tcp_header = struct.pack('!HHIIBBHHH',
self.source_port,
self.destination_port,
self.sequence_number,
self.ack_number,
data_offset,
flags,
self.window_size,
0,
self.urgent_pointer) + self.options
tcp_header_length = len(tcp_header) + len(self.payload)
pseudo_header = convert_v4_address_string_to_hex(self.source_ip) + \
convert_v4_address_string_to_hex(self.destination_ip) + \
struct.pack('!BBH', 0, self.protocol, tcp_header_length)
packet_to_checksum = pseudo_header + tcp_header + self.payload
# --==-- --==--
# The actual Packet
checksum = self._checksum(packet_to_checksum)
packet_with_checksum = struct.pack('!HHIIBBH',
self.source_port,
self.destination_port,
self.sequence_number,
self.ack_number,
data_offset,
flags,
self.window_size) + \
struct.pack('H', checksum) + \
struct.pack('!H', self.urgent_pointer) + self.options
packet = packet_with_checksum + self.payload
return packet
def _deserialize(self, raw_packet_bytes):
self.source_port = struct.unpack('!H', raw_packet_bytes[0:2])[0]
self.destination_port = struct.unpack('!H', raw_packet_bytes[2:4])[0]
self.sequence_number = struct.unpack('!I', raw_packet_bytes[4:8])[0]
self.ack_number = struct.unpack('!I', raw_packet_bytes[8:12])[0]
self.data_offset = (struct.unpack('!B', raw_packet_bytes[12:13])[0] & int('11110000', 2)) >> 4
flags = struct.unpack('!B', raw_packet_bytes[13:14])[0]
self.fin = (flags & int('00000001', 2)) != 0
self.syn = (flags & int('00000010', 2)) != 0
self.rst = (flags & int('00000100', 2)) != 0
self.psh = (flags & int('00001000', 2)) != 0
self.ack = (flags & int('00010000', 2)) != 0
self.urg = (flags & int('00100000', 2)) != 0
self.window_size = struct.unpack('!H', raw_packet_bytes[14:16])[0]
# Checksum is little endian (no !)
self.checksum = struct.unpack('H', raw_packet_bytes[16:18])[0]
self.urgent_pointer = struct.unpack('!H', raw_packet_bytes[18:20])[0]
if self.data_offset > 5:
options_start = 20
options_end = self.data_offset * 4
self.options = raw_packet_bytes[options_start:options_end]
self.payload = raw_packet_bytes[options_end:]
else:
self.payload = raw_packet_bytes[20:]
# --==-- ==--== ==---== --==-- ==--== ==---== --==-- ==--== ==---== --==-- ==--== ==---==
# User Datagram Protocol (UDP) :
# 0 7 8 15 16 23 24 31
# +--------+--------+--------+--------+
# | Source | Destination |
# | Port | Port |
# +--------+--------+--------+--------+
# | | |
# | Length | Checksum |
# +--------+--------+--------+--------+
# |
# | data octets ...
# +---------------- ...
class UdpPacket(Packet):
def __init__(self,
source_ip=None,
destination_ip=None,
protocol=None,
source_port=0,
destination_port=0,
length=0,
checksum=0,
payload=''):
self.source_port = source_port
self.destination_port = destination_port
self.length = length
self.checksum = checksum
self.source_ip = source_ip
self.destination_ip = destination_ip
self.protocol = protocol
self.payload = ''
def _serialize(self):
self.length = 8 + len(self.payload)
pseudo_header = convert_v4_address_string_to_hex(self.source_ip) + \
convert_v4_address_string_to_hex(self.destination_ip) + \
struct.pack('!BBH',
0,
self.protocol,
self.length)
udp_header = struct.pack('!HHHH',
self.source_port,
self.destination_port,
self.length,
0)
packet_to_checksum = pseudo_header + udp_header
self.checksum = self._checksum(packet_to_checksum)
packet = struct.pack('!HHH',
self.source_port,
self.destination_port,
self.length) + \
struct.pack('H', self.checksum) + \
self.payload
return packet
def _deserialize(self, raw_packet_bytes):
self.source_port = struct.unpack('!H', raw_packet_bytes[0:2])[0]
self.destination_port = struct.unpack('!H', raw_packet_bytes[2:4])[0]
self.length = struct.unpack('!H', raw_packet_bytes[4:6])[0]
self.checksum = struct.unpack('!H', raw_packet_bytes[6:8])[0]
``` |
{
"source": "jossef/python-vmware-client",
"score": 2
} |
#### File: python-vmware-client/examples/replace-license.py
```python
from vmwc import VMWareClient
def main():
host = '192.168.1.1'
username = '<username>'
password = '<password>'
license = 'XXXXX-XXXXX-XXXXX-XXXXX-XXXXX'
with VMWareClient(host, username, password) as client:
client.replace_license(license)
if __name__ == '__main__':
main()
```
#### File: python-vmware-client/examples/virtual-machines-reconfigure-bios.py
```python
from vmwc import VMWareClient
def main():
host = '192.168.1.1'
username = '<username>'
password = '<password>'
with VMWareClient(host, username, password) as client:
for vm in client.get_virtual_machines():
vm.power_off()
# Good configuration for PXE boot from lan (you'd like that the network will prioritize pre-disk, the boot delay)
vm.configure_bios(boot_delay=5000, boot_order=['network', 'disk'])
if __name__ == '__main__':
main()
``` |
{
"source": "jossehblanco/ProgramacionVisual",
"score": 2
} |
#### File: teoserver/api/serializers.py
```python
from rest_framework import serializers
class ParamsSerializer(serializers.Serializer):
MAXLINEA = serializers.CharField()
MAXDIGIT = serializers.CharField()
MAXID = serializers.CharField()
def create(self, validated_data):
return Params.objects.create(**validated_data)
```
#### File: teoserver/IDE/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index (request):
return render(request, "IDE/ide.html",{})
``` |
{
"source": "josselinauguste/python-monads",
"score": 3
} |
#### File: python-monads/monads/future.py
```python
import threading
from functools import reduce
from monads.maybe import Just, Nothing
from monads.either import Either, Right, Left
class Future:
# __init__ :: ((Either err a -> void) -> void) -> Future (Either err a)
def __init__(self, f):
self.subscribers = []
self.cache = Nothing()
self.semaphore = threading.BoundedSemaphore(1)
f(self.__callback)
# pure :: a -> Future a
@staticmethod
def pure(value):
return Future(lambda cb: cb(Either.pure(value)))
@staticmethod
def __exec(f, cb):
try:
data = f()
cb(Right(data))
except Exception as err:
cb(Left(err))
@staticmethod
def __exec_on_thread(f, cb):
t = threading.Thread(target=Future.__exec, args=[f, cb])
t.start()
@staticmethod
def do_async(f):
return Future(lambda cb: Future.__exec_on_thread(f, cb))
# map :: (a -> b) -> Future b
def map(self, f):
return self.bind(lambda x: self.pure(f(x)))
# bind :: (a -> Future b) -> Future b
def bind(self, f):
return Future(
lambda cb: self.__subscribe(
lambda value: cb(value)
if (value is Left)
else f(value.value).__subscribe(cb)
)
)
# traverse :: [a] -> (a -> Future b) -> Future [b]
@staticmethod
def traverse(arr):
return lambda f: reduce(
lambda acc, elem: acc.bind(
lambda values: f(elem).map(lambda value: values + [value])
),
arr,
Future.pure([]),
)
# callback :: Either err a -> void
def __callback(self, value):
self.semaphore.acquire()
self.cache = Just(value)
while len(self.subscribers) > 0:
sub = self.subscribers.pop(0)
t = threading.Thread(target=sub, args=[value])
t.start()
self.semaphore.release()
# subscribe :: (Either err a -> void) -> void
def __subscribe(self, subscriber):
self.semaphore.acquire()
if isinstance(self.cache, Just):
self.semaphore.release()
subscriber(self.cache.value)
else:
self.subscribers.append(subscriber)
self.semaphore.release()
``` |
{
"source": "JosselinLuneau/BachelorDIM-Lectures-Algorithms-2019",
"score": 3
} |
#### File: scripts/S4/s4_tools.py
```python
import os
import pika
import config
queueName = 'post'
def InitConnection(timeout = 5):
# Parse CLODUAMQP_URL (fallback to localhost)
url = os.environ.get('CLOUDAMQP_URL',config.CLOUD_AMQP_URL)
params = pika.URLParameters(url)
params.socket_timeout = timeout
connection = pika.BlockingConnection(params) # Connect to CloudAMQP
return connection
```
#### File: BachelorDIM-Lectures-Algorithms-2019/test_unit/test_S2.py
```python
import pytest
import numpy as np
import cv2
import scripts.S1_aglotools as algo
# INIT UTILS VARIABLES #
array=[1,5,8,7,0,-5,4-2,0,10,9,7]
# Exercice 1
def test_averrage_above_zero():
assert algo.average_above_zero(array) == 6.125
# Errors
def test_averrage_TypeError_list():
with pytest.raises(TypeError):
algo.average_above_zero(8)
def test_averrage_valueError_empty_list():
with pytest.raises(ValueError):
algo.average_above_zero([])
def test_averrage_divide_by_zero():
with pytest.raises(ZeroDivisionError):
algo.average_above_zero([0,0,0,0])
def test_averrage_negative_array_values():
with pytest.raises(ZeroDivisionError):
algo.average_above_zero([-2,-3,-8,-7])
def test_averrage_char_list():
with pytest.raises(TypeError):
algo.average_above_zero(['1', 'e'])
# Exercice 2
def test_max_value():
assert algo.max_value(array) == (10, 8)
# Errors
def test_max_value_TypeError_list():
with pytest.raises(TypeError):
algo.max_value(8)
def test_max_value_ValueError_empty_list():
with pytest.raises(ValueError):
algo.max_value([])
def test_max_value_char_list():
with pytest.raises(TypeError):
algo.max_value(['1', 'e'])
# Exercice 3
def test_reverse_table():
answer=[7, 9, 10, 0, 2, -5, 0, 7, 8, 5, 1]
assert algo.reverse_table(array) == answer
# Errors
def test_reverse_table_TypeError_list():
with pytest.raises(TypeError):
algo.reverse_table(8)
def test_reverse_table_ValueError_empty_list():
with pytest.raises(ValueError):
algo.reverse_table([])
# Exercice 4
def test_roi_bbox():
img=cv2.imread('img.png', 0)
answer=np.array([[16, 19], [16, 702], [19, 468], [702, 468]])
assert (algo.roi_bbox(img) == answer).prod()
# Errors
def test_roi_bboxe_TypeError_list():
with pytest.raises(TypeError):
algo.roi_bbox(8)
def test_roi_bbox_valueError_empty_list():
with pytest.raises(ValueError):
algo.roi_bbox([])
# Exercice 5
def test_random_fill_sparse():
fill_value='/'
np_array=algo.random_fill_sparse(algo.char_array, 3, fill_value)
count=len(np.argwhere(np_array==fill_value))
assert 3 == count
# Errors
def test_random_fill_sparse_TypeError_list():
with pytest.raises(TypeError):
algo.random_fill_sparse(8, 1)
def test_random_fill_sparse_valueError_empty_list():
with pytest.raises(ValueError):
algo.random_fill_sparse([], 1)
def test_random_fill_sparse_TypeError_int():
with pytest.raises(TypeError):
algo.random_fill_sparse(array, 'a')
```
#### File: BachelorDIM-Lectures-Algorithms-2019/test_unit/test_S3.py
```python
import pytest
import scripts.S3_imgproc_tools as S3
import numpy as np
def test_invert_colors_manual_slow():
img=np.array([[[255,255,255], [255, 255, 255], [255, 255, 255]]])
assert (S3.invert_colors_manual_slow(img) == np.array([[[0,0,0], [0,0,0], [0, 0, 0]]])).prod()
def test_invert_colors_manual_fast():
img=np.array([[255,255,255], [255, 255, 255]])
assert (S3.invert_colors_manual_fast(img) == np.array([[0,0,0], [0,0,0]])).prod()
``` |
{
"source": "JosselinSomervilleRoberts/SpeechBubbleSubtitles",
"score": 4
} |
#### File: SpeechBubbleSubtitles/bubbleLibrary/bubbleClass.py
```python
from bubbleLibrary.utils_cv2 import rounded_rectangle, dist
import numpy as np
import cv2
class Bubble:
def __init__(self):
self.computed = False # if the bubble was computed
self.center = (0, 0) # Center of bubble
self.width = 0 # Width
self.height = 0 # Height
self.lines = [] # Lines of text to display
self.font_scale = 0.5 # Font scale
self.attach_mouth = (0, 0) # Coordinates of the attach point (mouth)
self.attach_bubble = [(0,0), (0,0)] # Coordinates of the attach of the tail on the bubble
self.perso = None # Character
self.frame_end = -1 # ???
self.display_attach = False
def initiateAttachBubble(self):
"""compute the attach of the tail on the bubble. The two points should belong to :
- an edge of the bubble rectangle
- the lines passing through the attach on the mouth and the center of the rectangle +/- a fraction of its height (called up and down in the code)
-> we compute their equation as y=a_up*x+b_up and y=a_down*x+b_down
"""
center_up = (self.center[0], self.center[1] - self.height/5.)
center_down = (self.center[0], self.center[1] + self.height/5.)
delta_x = self.center[0] - self.attach_mouth[0]
if delta_x == 0:
delta_x = 1e-5
a_up = (center_up[1] - self.attach_mouth[1]) / delta_x
b_up = center_up[1] - a_up*center_up[0]
a_down = (center_down[1] - self.attach_mouth[1]) / delta_x
b_down = center_down[1] - a_down*center_down[0]
#compute the x coordinate on an edge of the rectangle depending on the mouth's position
if self.attach_mouth[0] < self.center[0] - self.width/2.: # left edge
#print(" left")
x_up = self.center[0] - self.width/2.
y_up = a_up*x_up + b_up
x_down = self.center[0] - self.width/2.
y_down = a_down*x_down + b_down
elif self.attach_mouth[0] > self.center[0] + self.width/2.: # right edge
#print(" right")
x_up = self.center[0] + self.width/2.
y_up = a_up*x_up + b_up
x_down = self.center[0] + self.width/2.
y_down = a_down*x_down + b_down
elif self.attach_mouth[1] > self.center[1]: # bottom edge
#print(" bottom")
y_up = self.center[1] + self.height/2.
x_up = (y_up - b_up) / a_up
y_down = self.center[1] + self.height/2.
x_down = (y_down - b_down) / a_down
else: #upper edge
#print(" up")
y_up = self.center[1] - self.height/2.
x_up = (y_up - b_up) / a_up
y_down = self.center[1] - self.height/2.
x_down = (y_down - b_down) / a_down
#y_up and y_down may be outside of the rectangle's range, so crop them if that's the case
y_up = max(y_up, self.center[1] - self.height/5.)
y_up = min(y_up, self.center[1] + self.height/5.)
y_down = max(y_down, self.center[1] - self.height/5.)
y_down = min(y_down, self.center[1] + self.height/5.)
#self.attach_bubble[0] = (int(self.center[0]), int(self.center[1] - self.height/5.))
#self.attach_bubble[1] = (int(self.center[0]), int(self.center[1] + self.height/5.))
self.attach_bubble[0] = (int(x_up), int(y_up))
self.attach_bubble[1] = (int(x_down), int(y_down))
def initiate(self, center, width, height, lines, attach_mouth, display_attach, frame_end = -1, perso = None):
self.center = center
self.width = width
self.height = height
self.lines = lines
self.attach_mouth = attach_mouth
self.frame_end = frame_end
self.perso = perso
self.display_attach = display_attach
self.initiateAttachBubble()
#-------
#Getters
#-------
def getWidth(self):
return self.width
def getHeight(self):
return self.height
#-------
#Setters
#-------
def setAttachMouth(self, new_attach_mouth):
self.attach_mouth = new_attach_mouth
def setWidthAndHeight(self, new_width, new_height):
self.width = new_width
self.height = new_height
#---------
#Functions
#---------
#-----------------------------------
#Determine the bubble's optimal area
#-----------------------------------
def estimateOptimalBubbleArea(self, bubble_width = 200):
#find a good area for the bubble depending on the text
#we take the approximate area that would be necessary to fit the text into a bubble of width bubble_width and add a 25% margin
text_size = cv2.getTextSize(self.lines, fontFace = cv2.FONT_HERSHEY_SIMPLEX, fontScale = self.font_scale, thickness = 2)[0]
nb_lines = text_size[0] // bubble_width + 1
bubble_height = 2 * text_size[1] * nb_lines
return 1.25 * bubble_width * bubble_height
#---------------
#Draw the bubble
#---------------
def drawBubble(self, frame, draw_tail = True):
"""
Draws a bubble
input:
frame: opencv image
center: center of the bubble (tuple of ints)
width, height: width and height of the bubble
attach: position of the end of the bubble tail (tuple)
"""
top_left = (self.center[0] - int(self.width/2.), self.center[1] - int(self.height/2.))
bottom_right = (self.center[0] + int(self.width/2.), self.center[1] + int(self.height/2.))
# Parameters for the bubble
radius = 0.5
outline_color = (255, 0, 0 )
fill_color = (255, 255, 255)
thickness = 2
line_type = cv2.LINE_AA
shapes = np.zeros_like(frame, np.uint8)
#Draw the tail of the bubble
if self.display_attach:
#it is the intersection of the triangle (mouth-attach_bubble[0]-attach_bubble[1]) and one of the sides of the rectangle
self.initiateAttachBubble()
attach_points = np.array([self.attach_bubble[0], self.attach_bubble[1], self.attach_mouth])
cv2.drawContours(shapes, [attach_points], 0, fill_color, -1)
cv2.drawContours(shapes, [attach_points], 0, outline_color, thickness)
#Draw the rectangle for the bubble
rounded_rectangle(shapes,
top_left,
(bottom_right[1], bottom_right[0]),
radius = radius,
outline_color = outline_color,
fill_color = fill_color,
thickness = thickness,
line_type = line_type)
#Add the bubble to the frame
alpha = 0.5
mask = shapes.astype(bool)
frame[mask] = cv2.addWeighted(frame, alpha, shapes, 1 - alpha, 0)[mask]
#-------------
#Draw the text
#-------------
def cutLinesIntoWords(self):
"""Returns the list of words in self.lines. eg if self.lines = "Cool cool cool", returns ["Cool", "Cool", "Cool"]"""
list_of_words = [""]
lines_index = 0
while lines_index < len(self.lines):
#if a \n or \N is found (raw or normal), transform it into the \n word. We assume that whenever there is a \ in the text it is followed by n or N.
if "\\" in r"%r" % self.lines[lines_index:lines_index+1]:
list_of_words.append("\n")
list_of_words.append("")
lines_index +=2
elif "\\n" in r"%r" % self.lines[lines_index:lines_index+1] or "\\N" in r"%r" % self.lines[lines_index:lines_index+1]:
list_of_words.append("\n")
lines_index += 1
#else, just add the character to the current word unless it is a space
else:
if self.lines[lines_index] == ' ':
list_of_words.append("")
else:
list_of_words[-1] += self.lines[lines_index]
lines_index += 1
return list_of_words
def newLinePosition(word):
"""Finds the position (if it exists) of a new line. Should work for strings and raw strings"""
for i in range(len(word)-1):
if "\\" in r"%r" % word[i:i+1] or "\\n" in r"%r" % word[i:i+1] or "\\N" in r"%r" % word[i:i+1]:
return i
return -1
def drawText(self, frame):
"""Draws the text inside the bubble"""
text_per_line = [""] #text we are going to display on each line
list_of_words = self.cutLinesIntoWords()
size_space = cv2.getTextSize(" ", fontFace = cv2.FONT_HERSHEY_SIMPLEX, fontScale = self.font_scale, thickness = 2)[0][0]
margin = 20 #margin for text to leave some space at the boundary of the bubble
empty_space_on_line = self.width - margin #size of empty space at the end of the line that can be filled with words
for word in list_of_words:
#if word is a \n, just change line
if word == "\n":
text_per_line.append("")
empty_space_on_line = self.width - margin
else:
height_word = cv2.getTextSize(word, fontFace = cv2.FONT_HERSHEY_SIMPLEX, fontScale = self.font_scale, thickness = 2)[0][1]
size_word = cv2.getTextSize(word, fontFace = cv2.FONT_HERSHEY_SIMPLEX, fontScale = self.font_scale, thickness = 2)[0][0]
#if there is not enough space on the current line, change line and reset the space left on the line
if size_word > empty_space_on_line:
text_per_line.append("")
empty_space_on_line = self.width - margin
#add current word to the right line
text_per_line[-1] += word + " "
empty_space_on_line -= size_word + size_space
#display the right text on each line
nb_lines = len(text_per_line)
for index_line in range(nb_lines):
size_text = cv2.getTextSize(text_per_line[index_line], fontFace = cv2.FONT_HERSHEY_SIMPLEX, fontScale = self.font_scale, thickness = 2)[0][0]
y_org = int(self.center[1] - (nb_lines-1)*0.5*(self.height - margin) / nb_lines + index_line*(self.height - margin) / nb_lines)
#y_org = int(self.center[0] + 2 * 12 * index_line)
cv2.putText(frame, text_per_line[index_line], org = (int(self.center[0]-0.5*size_text), y_org),
fontFace = cv2.FONT_HERSHEY_SIMPLEX, fontScale = self.font_scale, color = (0, 0, 0), thickness = 2)
#---------------
#Draw everything
#---------------
###trouver une size min acceptable pour la bulle en fonction du texte dans la bulle
def draw(self, frame, draw_tail = True):
"""Draw the bubble and the text inside it"""
#Draw bubble
self.drawBubble(frame, draw_tail)
#Draw text
self.drawText(frame)
```
#### File: SpeechBubbleSubtitles/recognizer/position.py
```python
from recognizer.interpolable import Interpolable
class Position:
def __init__(self):
self.x = Interpolable()
self.y = Interpolable()
def add(self, value, frame_index):
self.x.add(value[0], frame_index)
self.y.add(value[1], frame_index)
def get(self, frame_index):
return (self.x.get(frame_index), self.y.get(frame_index))
def cleanup(self, frame_index):
self.x.cleanup(frame_index)
self.y.cleanup(frame_index)
def merge(self, other):
self.x.merge(other.x)
self.y.merge(other.y)
```
#### File: SpeechBubbleSubtitles/recognizer/utils.py
```python
from math import sqrt
def dist(x1, x2):
return sqrt((x1[0]-x2[0])**2 + (x1[1]-x2[1])**2)
def getBoxFromLandmark(landmark, frame_width, frame_height):
cx_min = frame_width
cy_min = frame_height
cx_max = cy_max = 0
for id, lm in enumerate(landmark):
cx, cy = int(lm.x * frame_width), int(lm.y * frame_height)
if cx < cx_min:
cx_min = cx
if cy < cy_min:
cy_min = cy
if cx > cx_max:
cx_max = cx
if cy > cy_max:
cy_max = cy
# From top-left/bottom-right ------> To Center/width-height
w = cx_max - cx_min
h = cy_max - cy_min
x = int((cx_min + cx_max) / 2.)
y = int((cy_min + cy_max) / 2.)
return (x,y,w,h)
``` |
{
"source": "JossendalDevelopment/Virtual-game-controller",
"score": 2
} |
#### File: Virtual-game-controller/server/main.py
```python
from network_setup import Window
from flask_socketio import SocketIO
from flask_cors import CORS
from flask import Flask, request, jsonify, url_for, Response
from ctypes_init import keyboard_stream, SendInput
import asyncio
import ssl
import secrets
import json
import keyboard
import time
import win32api
import eventlet
eventlet.monkey_patch()
ip = Window.get_users_ip()
if ip == "":
ip = "0.0.0.0"
# Create the Flask app
app = Flask(__name__)
# enable CORS
CORS(app, resources={r'/*': {'origins': '*'}})
# This is a properly random 32 bit secret key generated via:
# "".join([chr(secrets.randbits(8)) for x in range(32)]).encode()
app.config['SECRET_KEY'] = "".join(
[chr(secrets.randbits(8)) for x in range(32)]).encode()
# app.secret_key = b"\<KEY>" \
# b"\xc2\x9d\xc3\xb5\xc2\x86L^0}\x12,\\\x01\xc2\xa8P\xc3\xb2" \
# b"\xc2\xber@\xc3\xaf\x02(\xc2\xa8\t"
socketio = SocketIO(app, cors_allowed_origins="*", async_mode='eventlet')
def message_received(methods=['GET', 'POST']):
print('message was received!!!')
@app.route('/')
def index():
print("Route '/' loaded")
return 'Loaded'
@app.route('/get_key_mapping', methods=['GET'])
def get_key_mapping():
with open('keyMappings.json') as json_file:
data = json.load(json_file)
return jsonify(data)
@app.route('/set_key_mapping', methods=['POST'])
def set_key_mapping():
data = request.get_json()
new_json = {}
new_json['keyMappings'] = data
with open('./keyMappings.json', 'w') as json_file:
json.dump(new_json, json_file, indent=2)
return jsonify(new_json)
@socketio.on('message')
def handle_message(message):
print('recieved message: ', + message)
@socketio.on('json')
def handle_json(json):
print('received json: ' + str(json))
@socketio.on('keypress')
def handle_keypress(json_data, methods=['GET', 'POST']):
time.sleep(3)
try:
print('received keypress: ', json_data['key'])
# keyboard.press_and_release(json_data['key'])
# keyboard_stream(json_data['key'])
for event in json_data['key']:
win32api.keybd_event(int(event, base=16), 0, 0, 0)
time.sleep(0.1)
# for event in json_data['key']:
# win32api.keybd_event(event, 0, 0, 0)
# # win32api.keybd_event(0x010, 0, 0, 0)
# # SendInput(event)
# time.sleep(0.1)
socketio.emit('keypress-response', json_data,
callback=message_received)
except ValueError:
print("ERROR FOUND")
error_response = {"status": 500, "msg": "Unrecognized key value"}
socketio.emit('keypress-response', error_response)
if __name__ == "__main__":
print("START SERVER WITH IP", ip)
socketio.run(app, debug=True, port=5000, host=ip)
```
#### File: Virtual-game-controller/server/network_setup.py
```python
from tkinter import *
import socket
ip_addr = ""
class Window(Frame):
def __init__(self, parent=None):
Frame.__init__(self, parent)
self.parent = parent
self.user_input = Entry(self)
self.init_window()
@staticmethod
def get_users_ip():
return ip_addr
def init_window(self):
self.pack(fill=BOTH, expand=1)
current_ip_addr = socket.gethostbyname(socket.gethostname())
Label(self, text="Please enter your local ip address").grid(
row=0, columnspan=3, pady=20, padx=20)
self.user_input.grid(row=1, columnspan=3)
self.user_input.insert(0, current_ip_addr)
self.user_input.focus_set()
quit_button = Button(self, text="Quit", command=self.client_exit)
quit_button.grid(row=2, column=1)
accept_button = Button(self, text="Connect",
command=self.connect)
accept_button.grid(row=2, column=2, padx=10, pady=10)
def client_exit(self):
exit()
def connect(self):
global ip_addr
ip_addr = self.user_input.get()
root.destroy()
def center(win):
"""
centers a tkinter window
:param win: the root or Toplevel window to center
"""
win.update_idletasks()
width = win.winfo_width()
frm_width = win.winfo_rootx() - win.winfo_x()
win_width = width + 2 * frm_width
height = win.winfo_height()
titlebar_height = win.winfo_rooty() - win.winfo_y()
win_height = height + titlebar_height + frm_width
x = win.winfo_screenwidth() // 2 - win_width // 2
y = win.winfo_screenheight() // 2 - win_height // 2
win.geometry('{}x{}+{}+{}'.format(width, height, x, y))
# win.geometry('{}x{}'.format(x, y))
win.deiconify()
root = Tk()
# initial window dimensions
# root.geometry("400x200")
root.title("Setup")
app = Window(root)
center(root)
root.mainloop()
``` |
{
"source": "jossepio/jos",
"score": 3
} |
#### File: jos/cogs/game.py
```python
import discord
from discord.ext import commands
class Game(commands.Cog):
def __init__(self,client):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
print("Game Cog has been loaded\n-----")
@commands.group(help="Base command")
async def game(self,ctx):
if ctx.invoked_subcommand is None:
await ctx.message.delete()
await ctx.send("`Lütfen oynamak istediğiniz oyunu belirtin.`",delete_after=5)
@game.group(help="PUBG oyuncularına duyuruluyor...")
async def pubg(self,ctx,member= discord.Member):
await ctx.send(f"Tamamdır {ctx.author.name}, PUBG oynamak istediğini tüm üyelere söylüyorum!\n@everyone bi bakalım buraya...")
@game.group(help="LoL oyuncularına duyuruluyor...")
async def lol(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, LoL oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="Valorant oyuncularına duyuruluyor...")
async def valorant(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, Valorant oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="CS:GO oyuncularına duyuruluyor...")
async def csgo(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, CS:GO oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="Apex oyuncularına duyuruluyor...")
async def apex(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, Apex oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="RS6 oyuncularına duyuruluyor...",aliases=["rainbow"])
async def rs6(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, RS6 oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="TFT oyuncularına duyuruluyor...")
async def tft(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, TFT oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="Minecraft oyuncularına duyuruluyor...",aliases=['mc'])
async def minecraft(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, Minecraft oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="COD: Warzone oyuncularına duyuruluyor...")
async def warzone(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, COD: Warzone oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="Raft oyuncularına duyuruluyor...")
async def raft(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name},Raft oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="HOI oyuncularına duyuruluyor...",aliases=["hoi"])
async def hoi4(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, HOI4 oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="Roblox oyuncularına duyuruluyor...")
async def roblox(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, Roblox oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="Rust oyuncularına duyuruluyor...")
async def rust(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, Rust oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="COD: Warzone oyuncularına duyuruluyor...",aliases=["gtav","gta5"])
async def gta(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, GTA V oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="Forest oyuncularına duyuruluyor...")
async def forest(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, Forest oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="Warcraft oyuncularına duyuruluyor...")
async def warcraft(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, Warcraft oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
def setup(client):
client.add_cog(Game(client))
``` |
{
"source": "jossets/jekyll-toc",
"score": 3
} |
#### File: jossets/jekyll-toc/tests.py
```python
import os
import re
import unittest
import xml.etree.ElementTree as ET
class TestSequense(unittest.TestCase):
pass
def test_generator(a, b):
def test(self):
self.assertEqual(a, b)
return test
def normalize_xml(xml):
tree = ET.fromstring(xml)
return re.sub('\\n\s+', '', ET.tostring(tree))
if __name__ == '__main__':
test_path = os.path.join(os.getcwd(), '_site', 'tests')
for test_file in os.listdir(test_path):
path = os.path.join(test_path, test_file)
with open(path, 'r') as file:
actual, expected = file.read().split('<!-- /// -->')
actual = normalize_xml(actual)
expected = normalize_xml(expected)
# Add the unit test to our TestSequense
test_name = 'test_{}'.format(test_file)
test = test_generator(actual, expected)
setattr(TestSequense, test_name, test)
unittest.main()
``` |
{
"source": "jossiebk/tytus",
"score": 2
} |
#### File: team28/controllers/procedures.py
```python
import copy
from views.data_window import DataWindow
from utils.decorators import singleton
from controllers.symbol_table import SymbolTable
from controllers.error_controller import ErrorController
@singleton
class Procedures(object):
def __init__(self):
self.__storedProcedure = {}
def __searchProcedure(self, name: str):
"""
Method to search a stored procedure in the structure
:param name: The name of stored procedure
:return: Returns a stored procedure
"""
if name in self.__storedProcedure:
return True
return False
def saveProcedure(self, name, tac, line, column):
"""
Method to create a stored procedure in the structure
:param name: Stored procedure name
:param tac: Three-address code of procedure
:param line: The instruction line
:param column: The instruction column
:return: Returns nothing
"""
db = SymbolTable().useDatabase
# if not db:
# desc = f": Database not selected"
# ErrorController().add(4, 'Execution', desc,
# line, column)
# return
key = f"{name}"
if self.__searchProcedure(key):
desc = f": Function {name} already exists"
ErrorController().add(38, 'Execution', desc, line, column)
return False
newTac = copy.deepcopy(tac)
self.__storedProcedure[key] = {
'name': name,
'database': db,
'tac': newTac,
'line': line,
'column': column
}
return True
def getProcedure(self, name, params, line, column):
db = SymbolTable().useDatabase
# if not db:
# desc = f": Database not selected"
# ErrorController().add(4, 'Execution', desc,
# line, column)
# return
# key = f"{name}{db}"
key = f"{name}"
if key in self.__storedProcedure:
if params == len(self.__storedProcedure[key]['tac'].params):
sp = copy.deepcopy(self.__storedProcedure[key]['tac'])
return sp.print(sp.environment)
desc = f": Function {name} does not exist"
ErrorController().add(39, 'Execution', desc, line, column)
return None
def getParams(self, name):
db = SymbolTable().useDatabase
key = f"{name}{db}"
if key in self.__storedProcedure:
return self.__storedProcedure[key]['tac'].params
return []
def dropProcedure(self, name, line, column):
# db = SymbolTable().useDatabase
# if not db:
# desc = f": Database not selected"
# ErrorController().add(4, 'Execution', desc,
# line, column)
# return
# key = f"{name}{db}"
key = f"{name}"
if key in self.__storedProcedure:
DataWindow().consoleText('Query returned successfully: Function deleted')
return self.__storedProcedure.pop(key)
desc = f": Function {name} does not exist"
ErrorController().add(39, 'Execution', desc, line, column)
```
#### File: models/Indexes/indexes.py
```python
from controllers.three_address_code import ThreeAddressCode
from views.data_window import DataWindow
from controllers.data_controller import DataController
from controllers.type_checker import TypeChecker
from controllers.error_controller import ErrorController
from controllers.symbol_table import SymbolTable
from models.objects.index import Index
from models.instructions.shared import Instruction
class Indexes(Instruction):
def __init__(self, type_index, table, variable, mode, list_column_reference, where_optional, line, column):
self.type_index = type_index
self.table = table
self.variable = variable
self.mode = mode
self.list_column_reference = list_column_reference
self.where_optional = where_optional
self.alias = table
self.line = line
self.column = column
self._tac = ''
def __repr__(self):
return str(vars(self))
def process(self, instruccion):
lista_sort = []
lista_id = []
type_index = self.type_index
table = self.table
variable = self.variable
mode = self.mode
lista_valores = self.list_column_reference
where_clause = self.where_optional
if isinstance(lista_valores, list):
for index, data in enumerate(lista_valores):
if isinstance(data, bool):
if data == True:
lista_sort.append('DESC')
else:
lista_sort.append('ASC')
else:
lista_id.append(data)
if len(lista_id) > len(lista_sort):
if index == len(lista_valores) - 1:
if len(lista_valores) == 1:
lista_sort.append("Not Sort")
elif isinstance(data, str):
lista_sort.append("Not Sort")
else:
pass
elif isinstance(lista_valores[index+1], bool):
pass
else:
lista_sort.append("Not Sort")
isTabla = self.searchTableIndex(table, self.line, self.column)
isDuplicate = self.searchDuplicateIndex(self.variable)
if isTabla and not isDuplicate:
if type_index.lower() == 'index': # Normal
if mode == None:
SymbolTable().add(Index(type_index, table, variable, 'BTREE', lista_id, lista_sort), variable,'Index', None, table, self.line, self.column)
DataWindow().consoleText('Query returned successfully: Create Index')
elif mode.upper() == 'BTREE':
SymbolTable().add(Index(type_index, table, variable, 'BTREE', lista_id, lista_sort), variable,'Index', None, table, self.line, self.column)
DataWindow().consoleText('Query returned successfully: Create Index')
else: # HASH
SymbolTable().add(Index(type_index, table, variable, 'HASH', lista_id, lista_sort), variable,'Index', None, table, self.line, self.column)
DataWindow().consoleText('Query returned successfully: Create Index')
else: # Unique
if mode == None:
SymbolTable().add(Index(type_index, table, variable, 'BTREE', lista_id, lista_sort), variable,'Index', None, table, self.line, self.column)
DataWindow().consoleText('Query returned successfully: Create Index')
elif mode.upper() == 'BTREE':
SymbolTable().add(Index(type_index, table, variable, 'BTREE', lista_id, lista_sort), variable,'Index', None, table, self.line, self.column)
DataWindow().consoleText('Query returned successfully: Create Index')
else: # HASH
SymbolTable().add(Index(type_index, table, variable, 'HASH', lista_id, lista_sort), variable,'Index', None, table, self.line, self.column)
DataWindow().consoleText('Query returned successfully: Create Index')
else:
desc = "FATAL ERROR, la tabla no existe para crear el index o el index es repetido"
ErrorController().add(34, 'Execution', desc, self.line, self.column)
def compile(self, enviroment):
temp = ThreeAddressCode().newTemp()
c3d = ThreeAddressCode().addCode(f"{temp} = '{self._tac}'")
def searchTableIndex(self, tabla, linea, column):
database_id = SymbolTable().useDatabase
lista = []
if not database_id:
desc = f": Database not selected"
ErrorController().add(4, 'Execution', desc, linea,column)#manejar linea y columna
return False
#Base de datos existe --> Obtener tabla
table_tp = TypeChecker().searchTable(database_id, tabla)
if not table_tp:
desc = f": Table does not exists"
ErrorController().add(4, 'Execution', desc, linea , column)#manejar linea y columna
return False
table_cont = DataController().extractTable(tabla,linea,column)
headers = TypeChecker().searchColumnHeadings(table_tp)
return True
def searchDuplicateIndex(self, name):
for index, c in enumerate(SymbolTable().getList()):
if c.value == name and c.dataType == 'Index':
return True
return False
class DropIndex(Instruction):
def __init__(self, name_index, line, column):
self.name_index = name_index
self.line = line
self.column = column
self._tac = ''
def __repr__(self):
return str(vars(self))
def process(self, enviroment):
for name in self.name_index:
isDropIndex = self.search_index(name)
if isDropIndex:
DataWindow().consoleText('Query returned successfully: Drop Index')
else:
desc = f": Name of Index not Exists"
ErrorController().add(4, 'Execution', desc, self.line , self.column)#manejar linea y columna
def compile(self, enviroment):
temp = ThreeAddressCode().newTemp()
c3d = ThreeAddressCode().addCode(f"{temp} = '{self._tac}'")
try:
pass
except:
desc = f": Name of Index not Exists"
ErrorController().add(4, 'Execution', desc, self.line , self.column)#manejar linea y columna
def search_index(self, name):
for index, c in enumerate(SymbolTable().getList()):
if c.value == name and c.dataType == 'Index':
# print('Entro')
del SymbolTable().getList()[index]
return True
return False
class AlterIndex(Instruction):
def __init__(self, name_index, new_name, line, column, isColumn, index_specific):
self.name_index = name_index
self.new_name = new_name
self.index_specific = index_specific
self.line = line
self.column = column
self.isColumn = isColumn
self._tac = ''
def __repr__(self):
return str(vars(self))
def process(self, enviroment):
if self.isColumn:
isChangeName = self.rename_column(self.name_index, self.new_name, self.index_specific)
if isChangeName:
DataWindow().consoleText('Query returned successfully: Alter Index')
else:
desc = f": Name of Index not Exists"
ErrorController().add(4, 'Execution', desc, self.line , self.column)#manejar linea y columna
else:
isChangeName = self.search_index(self.name_index, self.new_name)
if isChangeName:
DataWindow().consoleText('Query returned successfully: Alter Index')
else:
desc = f": Name of Index not Exists"
ErrorController().add(4, 'Execution', desc, self.line , self.column)#manejar linea y columna
def compile(self, enviroment):
temp = ThreeAddressCode().newTemp()
c3d = ThreeAddressCode().addCode(f"{temp} = '{self._tac}'")
try:
pass
except:
desc = f": Name of Index not Exists"
ErrorController().add(4, 'Execution', desc, self.line , self.column) #manejar linea y columna
def search_index(self, name, new_name):
for index, c in enumerate(SymbolTable().getList()):
if c.value == name and c.dataType == 'Index' and new_name != None:
# print('Entro')
SymbolTable().getList()[index].name.variable = new_name
SymbolTable().getList()[index].value = new_name
return True
return False
def rename_column(self, name, column, position):
for index, c in enumerate(SymbolTable().getList()):
if c.value == name and c.dataType == 'Index' and column != None:
if isinstance(position, int):
if position-1 > len(SymbolTable().getList())-1:
return False
SymbolTable().getList()[index].name.list_column_reference[position-1] = column
return True
else:
if not position in SymbolTable().getList()[index].name.list_column_reference:
return False
position = SymbolTable().getList()[index].name.list_column_reference.index(position)
SymbolTable().getList()[index].name.list_column_reference[position] = column
return True
return False
```
#### File: models/Other/funcion.py
```python
from models.instructions.shared import Instruction
from models.Other.ambito import Ambito
from controllers.three_address_code import ThreeAddressCode
from controllers.procedures import Procedures
from models.instructions.Expression.expression import DATA_TYPE, PrimitiveData
class Parametro(Instruction):
def __init__(self, id, data_type, line, column):
self.id = id
self.data_type = data_type
self.line = line
self.column = column
self._tac = ''
def compile(self):
pass
def process(self, environment):
pass
def __repr__(self):
return str(vars(self))
class Funcion(Instruction):
def __init__(self, id, params, body, val_return, isNew, isCall, line, column):
self.id = id
self.params = params
self.body = body
self.val_return = val_return
self.isNew = isNew
self.isCall = isCall
self.environment = None
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def process(self, environment):
pass
def compile(self, environment):
params = len(self.params)
temporal = None
if self.isNew:
self.environment = environment # TODO verificar
if Procedures().saveProcedure(self.id, self, self.line, self.column):
var_array = self.print(environment)
temporal = self.setVariables(var_array, environment)
else:
var_array = Procedures().getProcedure(self.id, params, self.line, self.column)
if var_array:
temporal = self.setVariables(var_array, environment)
fun = ThreeAddressCode().searchFunction(self.id)
if fun:
temporal = self.setVariables(fun['variables'], environment)
return temporal
#temp = ThreeAddressCode().newTemp()
def print(self, environment):
if ThreeAddressCode().searchFunction(self.id):
return None
ThreeAddressCode().newFunction(self.id)
newAmbito = Ambito(environment)
pos = 0
var_array = []
for var in self.params:
pos = ThreeAddressCode().stackCounter
var_array.append(newAmbito.addVar(var.id, var.data_type, None,
pos, var.line, var.column))
ThreeAddressCode().incStackCounter()
pos = ThreeAddressCode().stackCounter
#Generando etiqueta de salida para la funcion
lbl_exit = ThreeAddressCode().newLabel()
newAmbito.lbl_return = lbl_exit
#Agregando cuerpo de la funcion
self.body.compile(newAmbito)
# Agregando etiqueta de salida
ThreeAddressCode().addCode(f"label .{lbl_exit}")
# Imprime primera variable declarada, NO parametro
# ThreeAddressCode().addCode(f"print(Stack[{pos}])")
ThreeAddressCode().createFunction(self.id, self.params, var_array)
return var_array
def setVariables(self, var_array, environment):
if self.isCall:
value = 0
for index, var in enumerate(var_array):
value = self.params[index].compile(environment)
if isinstance(value, PrimitiveData):
if value.data_type == DATA_TYPE.STRING:
value.value = f"\'{value.value}\'"
ThreeAddressCode().addCode(f"Stack[{var.position}] = {value.value}")
temp = ThreeAddressCode().newTemp()
#Llamando a la funcion
ThreeAddressCode().addCode(f"{self.id}()")
#Obteniendo el valor de retorno de la funcion
ThreeAddressCode().addCode("#Obteniendo valor de retorno--------")
ThreeAddressCode().addCode(f"{temp} = Stack[P]")
return temp
return None
class DropFuncion(Instruction):
def __init__(self, id, params, line, column):
self.id = id
self.params = params
self.line = line
self.column = column
class ProcedimientoAlmacenado(Instruction):
def __init__(self, id, params, body, isNew, isCall, line, column):
self.id = id
self.params = params
self.body = body
self.isNew = isNew
self.isCall = isCall
self.environment = None
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def process(self, environment):
pass
def compile(self, environment):
params = len(self.params)
if self.isNew:
self.environment = environment # TODO verificar
if Procedures().saveProcedure(self.id, self, self.line, self.column):
var_array = self.print(environment)
self.setVariables(var_array, environment)
else:
var_array = Procedures().getProcedure(self.id, params, self.line, self.column)
if var_array:
self.setVariables(var_array, environment)
fun = ThreeAddressCode().searchFunction(self.id)
if fun:
self.setVariables(fun['variables'], environment)
#temp = ThreeAddressCode().newTemp()
def print(self, environment):
if ThreeAddressCode().searchFunction(self.id):
return None
ThreeAddressCode().newFunction(self.id)
newAmbito = Ambito(environment)
pos = 0
var_array = []
for var in self.params:
pos = ThreeAddressCode().stackCounter
var_array.append(newAmbito.addVar(var.id, var.data_type, None,
pos, var.line, var.column))
ThreeAddressCode().incStackCounter()
pos = ThreeAddressCode().stackCounter
#Generando etiqueta de salida para la funcion
lbl_exit = ThreeAddressCode().newLabel()
newAmbito.lbl_return = lbl_exit
#Agregando cuerpo de la funcion
self.body.compile(newAmbito)
# Agregando etiqueta de salida
ThreeAddressCode().addCode(f"label .{lbl_exit}")
# Imprime primera variable declarada, NO parametro
ThreeAddressCode().addCode(f"print(Stack[{pos}])")
ThreeAddressCode().createFunction(self.id, self.params, var_array)
return var_array
def setVariables(self, var_array, environment):
if self.isCall:
value = 0
for index, var in enumerate(var_array):
value = self.params[index].compile(environment)
if isinstance(value, PrimitiveData):
if value.data_type == DATA_TYPE.STRING:
value.value = f"\'{value.value}\'"
ThreeAddressCode().addCode(f"Stack[{var.position}] = {value.value}")
#Llamando a la funcion
ThreeAddressCode().addCode(f"{self.id}()")
#Una procedimiento almacenado NO devuelve nada
``` |
{
"source": "JossinetLab/CAAGLE",
"score": 3
} |
#### File: CAAGLE/scripts/search_interactants.py
```python
import sys, os, urllib, urllib2
from zipfile import ZipFile
from pymongo import MongoClient
from bs4 import BeautifulSoup
def download_data():
dirname = os.path.abspath(os.path.dirname(__file__))+'/../data'
if not os.path.exists(dirname):
os.makedirs(dirname)
### BIOGRID DATA ###
url = "http://thebiogrid.org/downloads/archives/Release%20Archive/BIOGRID-3.4.138/BIOGRID-ORGANISM-3.4.138.tab2.zip"
biogrid_path = dirname+'/BioGrid'
if not os.path.exists(biogrid_path):
os.makedirs(biogrid_path)
zip_response = urllib.urlopen(url)
tempory_file = url.split('/')[-1]
with open("%s/%s"%(biogrid_path, tempory_file), "wb") as fh:
fh.write(zip_response.read())
zip_response.close()
zf = ZipFile("%s/%s"%(biogrid_path, tempory_file))
zf.extractall(path=biogrid_path)
zf.close()
os.remove("%s/%s"%(biogrid_path, tempory_file))
biogrid_file_name = None
for file_name in os.listdir(biogrid_path):#we upload a directory
if not 'Saccharomyces_cerevisiae_S288c' in file_name:
os.remove("%s/%s"%(biogrid_path, file_name))
else:
biogrid_file_name = file_name
### SGD DATA ###
url = "http://downloads.yeastgenome.org/curation/chromosomal_feature/SGD_features.tab"
sgd_path = dirname+'/SGD'
if not os.path.exists(sgd_path):
os.makedirs(sgd_path)
response = urllib.urlopen(url)
outfile_content = str(response.read())
response.close()
sgd_file_name = url.split('/')[-1]
with open("%s/%s"%(sgd_path, sgd_file_name), 'w') as fh:
fh.write(outfile_content)
return biogrid_path+"/"+biogrid_file_name, sgd_path+"/"+sgd_file_name
def parse_biogrid_data(data_path):
genetic_interactions = {}
physical_interactions = {}
with open(data_path, 'r') as fh:
lines = fh.readlines()
for line in lines:
if not line.startswith('#'):
tokens = line.strip().split('\t')
interactorA = tokens[5]#BioGrid and SGD Systematic name
interactorB = tokens[6]#BioGrid and SGD Systematic name
interaction_type = tokens[12]#'genetic' or 'physical'
interactorA_organism = tokens[15]
interactorB_organism = tokens[16]
if interactorA_organism == '559292' and interactorB_organism == '559292':#NCBI taxonomy ID of 'Saccharomyces_cerevisiae_S288c'
biogrid_dict = genetic_interactions if interaction_type == 'genetic' else physical_interactions
intB_list = biogrid_dict.get(interactorA)
if not intB_list:
biogrid_dict[interactorA] = [interactorB]
else:
if interactorB not in intB_list:#we remove redundancy
intB_list.append(interactorB)
intA_list = biogrid_dict.get(interactorB)
if not intA_list:
biogrid_dict[interactorB] = [interactorA]
else:
if interactorA not in intA_list:#we remove redundancy
intA_list.append(interactorA)
return genetic_interactions, physical_interactions
def parse_sgd_data(data_path):
correlation_id_name = {}
with open(data_path, 'r') as fh:
lines = fh.readlines()
for line in lines:
tokens = line.strip().split('\t')
correlation_id_name[tokens[0].strip()] = tokens[3].strip()
return correlation_id_name
def search_interactants(genetic_interactions, physical_interactions, correlation_id_name):
"""
For each C. glabrata/Cagl annotation:
- retrieve the SGD link of the S. cerevisiae/Sace ortholog (if it exists ; several? currently NOT)
- convert the SGD ID, contained in the SGD link, into SGD systematic name
- get Sace GENETIC and PHYSICAL BioGrid interactors
For each Sace interactor:
- convert the SGD systematic name of this interactor into SGD ID
- from the SGD ID, contained in the SGD link, search the entire Cagl MongoDB for the absence or the presence of one or several Cagl orthologs
If no Cagl ortholog corresponds to the Sace interactor => keep the SGD link of the Sace interactor
If one or several Cagl ortholog(s) => keep the MongoDB ID of the Cagl annotation(s) ; for example: "5774cd8f9ae2c726d247b7cf@Candida_glabrata_CBS_138"
If interactors, update the C. glabrata MongoDB table named 'annotations':
'interactors':
'source': list of source(s) "db:biogrid:sgd_id"
'genetic': list of the Cagl ortholog ID of the Sace GENETIC interactors or, if no Cagl ortholog exists, the SGD link of the Sace interactor
'physical': list of the Cagl ortholog ID of the Sace PHYSICAL interactors or, if no Cagl ortholog exists, the SGD link of the Sace interactor
"""
client = MongoClient()
db = client['Candida_glabrata_CBS_138']
sgd_url ="http://www.yeastgenome.org/cgi-bin/locus.pl?dbid="
ids_to_update = []
for annotation in db['annotations'].find({'orthologs_in_non_CGD_species':{'$regex':"S. cerevisiae:"}}, no_cursor_timeout = True):
if annotation.get('interactors'):
print "%s, interactors already stored"%annotation['locus_tag']
else:
print annotation['locus_tag']
genetic_cagl_interactors = []
physical_cagl_interactors = []
source = []
for ortholog in annotation['orthologs_in_non_CGD_species']:
if ortholog.split(':')[0] == 'S. cerevisiae':
sgd_id = ':'.join(ortholog.split(':')[1:]).split('dbid=')[1]
sgd_systematic_name = correlation_id_name.get(sgd_id)
if sgd_systematic_name:
### Search C. glabrata orthologs of S. cerevisiae GENETIC interactors ###
genetic_interactors = genetic_interactions.get(sgd_systematic_name)
if genetic_interactors:
for genetic_interactor in genetic_interactors:#genetic_interactor = SGD Standard Name
for genetic_item in correlation_id_name.iteritems():
if genetic_item[1] == genetic_interactor:
if db['annotations'].find({'orthologs_in_non_CGD_species': {'$regex': "dbid=%s"%genetic_item[0]}}).count():
for genetic_annotation in db['annotations'].find({'orthologs_in_non_CGD_species': {'$regex': "dbid=%s"%genetic_item[0]}}, no_cursor_timeout = True):
genetic_cagl_interactors.append("%s@Candida_glabrata_CBS_138"%genetic_annotation['_id'])
else:# NO Cagl ortholog of the Sace genetic interactor
genetic_cagl_interactors.append(sgd_url+genetic_item[0])
### Search C. glabrata orthologs of S. cerevisiae PHYSICAL interactors ###
physical_interactors = physical_interactions.get(sgd_systematic_name)
if physical_interactors:
for physical_interactor in physical_interactors:#physical_interactor = SGD Standard Name
for physical_item in correlation_id_name.iteritems():
if physical_item[1] == physical_interactor:
if db['annotations'].find({'orthologs_in_non_CGD_species': {'$regex': "dbid=%s"%physical_item[0]}}).count():
for physical_annotation in db['annotations'].find({'orthologs_in_non_CGD_species': {'$regex': "dbid=%s"%physical_item[0]}}, no_cursor_timeout = True):
physical_cagl_interactors.append("%s@Candida_glabrata_CBS_138"%physical_annotation['_id'])
else:# NO Cagl ortholog of the Sace physical interactor
physical_cagl_interactors.append(sgd_url+physical_item[0])
if genetic_interactors or physical_interactors:
source.append("db:biogrid:%s"%sgd_id)
if genetic_cagl_interactors or physical_cagl_interactors:
interactors = {'source': source}
if genetic_cagl_interactors:
interactors ['genetic'] = list(set(genetic_cagl_interactors))#we remove redundancy
if physical_cagl_interactors:
interactors ['physical'] = list(set(physical_cagl_interactors))#we remove redundancy
ids_to_update.append((annotation['_id'], interactors))
### Update C. glabrata MongoDB ###
for _id in ids_to_update:
db['annotations'].find_one_and_update({'_id':_id[0]},{'$set':{'interactors':_id[1]}}, upsert=False)
client.close()
if __name__ == '__main__':
biogrid_path, sgd_path = download_data()
genetic_interactions, physical_interactions = parse_biogrid_data(biogrid_path)
#genetic_interactions, physical_interactions = parse_biogrid_data("/Users/laurence/CAAGLE/scripts/../data/BioGrid/BIOGRID-ORGANISM-Saccharomyces_cerevisiae_S288c-3.4.138.tab2.txt")
correlation_id_name = parse_sgd_data(sgd_path)
#correlation_id_name = parse_sgd_data("/Users/laurence/CAAGLE/scripts/../data/SGD/SGD_features.tab")
# search_interactants(parse_biogrid_data(biogrid_path), parse_sgd_data(sgd_path)) #FUNCTION search_interactants() REQUEST 3 ARGUMENTS !!!!!!!!! genetic_interactions, physical_interactions, correlation_id_name
search_interactants(genetic_interactions, physical_interactions, correlation_id_name)
``` |
{
"source": "josslei/Gender-Detection",
"score": 3
} |
#### File: Gender-Detection/script/Pt2Onnx.py
```python
import torch
import sys
def pt2onnx(pt_path, onnx_path, device, input_size=(3, 200, 200)):
model = torch.load(pt_path, map_location=device)
model.eval()
tracer_input = torch.randn(1, *input_size).to(device)
torch.onnx.export(model, tracer_input, onnx_path)
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Please specify the input file (.pt file to be converted).')
print('Please specify where to save the output file (.onnx file).')
# converting
print('Converting...')
pt2onnx(sys.argv[1], sys.argv[2], torch.device('cpu'))
print('Succeeded!')
```
#### File: josslei/Gender-Detection/utils.py
```python
import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import os
import sys
def export_sample_images(amount:int, export_dir:str, dataset, shuffle=True):
os.makedirs(export_dir, exist_ok=True)
loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=amount, shuffle=shuffle)
for images, _ in loader:
for i, img in enumerate(images):
img = img.squeeze(0)
img = transforms.ToPILImage()(img)
img.save(os.path.join(export_dir, str(i)) + '.png')
break
def getStat(train_data):
print('Compute mean and variance for training data.')
print(len(train_data))
train_loader = torch.utils.data.DataLoader(
train_data, batch_size=1, shuffle=False, num_workers=0,
pin_memory=True)
mean = torch.zeros(3)
std = torch.zeros(3)
for X, _ in train_loader:
for d in range(3):
mean[d] += X[:, d, :, :].mean()
std[d] += X[:, d, :, :].std()
mean.div_(len(train_data))
std.div_(len(train_data))
return list(mean.numpy()), list(std.numpy())
if __name__ == '__main__':
if input('Are you sure to start calculating mean and std? [y/n] ') != y:
exit()
if len(sys.argv) != 2:
print('Please specify the path of the dataset')
exit(-1)
transform = transforms.Compose([
transforms.Resize((200, 200)),
transforms.ToTensor()
])
train_dataset = datasets.ImageFolder(root=r'/home/user/data/gender/train', transform=transform)
mean, std = getStat(train_dataset)
print('mean = ', mean)
print('std = ', std)
``` |
{
"source": "jossM/manga_scraping",
"score": 2
} |
#### File: manga_scraping/lambda/page_marks_db.py
```python
from datetime import datetime
import inspect
import pytz
from typing import List, Dict, Union, Iterable, Tuple
import dateutil.parser
import boto3
from botocore.exceptions import ClientError, ValidationError
from config import AWS_REGION
from global_types import Chapter, Serializable
from logs import logger
dynamodb = boto3.resource('dynamodb', region_name=AWS_REGION)
DYNAMO_TABLE = dynamodb.Table('manga_page_marks')
class CorruptedDynamoDbBase(UserWarning):
pass
class PageMark(Serializable):
""" ORM for table manga_page_marks on dynamodb"""
def __init__(self,
serie_id: str, # bakaupdate serie id
serie_name: Union[str, None]=None, # name of the serie to be displayed
# when the series was updated for the last time (to prioritise scrapping)
latest_update: Union[datetime, None]=None,
chapter_marks: Union[List[Chapter], Tuple[Chapter]]= tuple()): # all chapters for the serie
self._serie_id = serie_id
self.serie_name = serie_name
self.latest_update = latest_update
self.chapter_marks: List[Chapter] = sorted(chapter_marks, reverse=True)
@property
def serie_id(self) -> str:
""" serie id is immutable """
return self._serie_id
def __hash__(self) -> int:
return hash(self._serie_id)
def __contains__(self, item: Chapter) -> bool:
return item in self.chapter_marks
def __iter__(self):
for chapter in self.chapter_marks:
yield chapter
def __repr__(self) -> str:
header_sep = '\n\t'
return (f'{str(type(self))}<'
+ header_sep.join([f'serie: {self.serie_id}',
f'name: {self.serie_name}',]
+ [f'\tchapter {chapter}' for chapter in self.chapter_marks]) +
'>')
def extend(self, chapters: Iterable[Chapter]) -> 'PageMark':
""" offers easy implementation to add chapters to chapter marks """
new_chapter_marks = [chapter for chapter in chapters if chapter not in self]
self.chapter_marks = sorted(new_chapter_marks + self.chapter_marks, reverse=True)
return self
def serialize(self) -> Dict:
serialized_mark = dict(serie_id=self.serie_id)
if self.serie_name is not None:
serialized_mark['serie_name'] = self.serie_name
if self.latest_update is not None:
serialized_mark['latest_update'] = self.latest_update.astimezone(pytz.utc).isoformat()
if self.chapter_marks:
serialized_mark['chapter_marks'] = [chapter.serialize() for chapter in self.chapter_marks]
return serialized_mark
@classmethod
def deserialize(cls, dict_data: Dict) -> 'PageMark':
"""
transforms a dict into an objet.
may trigger warnings if object does not have the correct format
"""
deserialized_page_mark = cls(serie_id=dict_data['serie_id'])
warning_message_elem = []
if 'serie_name' not in dict_data:
warning_message_elem.append('"serie_name" attribute is missing.')
else:
deserialized_page_mark.serie_name = dict_data['serie_name']
if 'latest_update' in dict_data:
try:
deserialized_page_mark.latest_update = dateutil.parser.parse(dict_data['latest_update'])
except KeyError:
pass
except ValueError as e:
warning_message_elem.append(f'"latest_update" attribute has unrecognized format. Parsing error {e}. '
f'Value was : {dict_data.get("latest_update")}')
chapter_marks = list()
for index_position, mark in enumerate(dict_data.get('chapter_marks', [])):
try:
chapter = Chapter.deserialize(mark)
if not chapter.is_valid():
warning_message_elem.append(f'chapter_mark" attribute is invalid at position. {index_position},'
f' with key values "{str(mark)}"')
chapter_marks.append(chapter)
except TypeError:
warning_message_elem.append(f' chapter_mark" attribute is invalid at position. {index_position},'
f' with key values "{str(mark)}"')
deserialized_page_mark.chapter_marks = sorted(chapter_marks, reverse=True)
if warning_message_elem:
warning_message = f'Corrupted PageMark document for serie id {deserialized_page_mark.serie_id}, ' \
f'and serie name {dict_data.get("serie_name", "")} ' + '\n'.join(warning_message_elem)
logger.warning(warning_message)
return deserialized_page_mark
def get_all() -> List[PageMark]:
""" Get all page coming from db. """
attributes = list(inspect.signature(PageMark.__init__).parameters.keys())
attributes.remove('self')
response = DYNAMO_TABLE.scan(ProjectionExpression=', '.join(attributes))
return [PageMark.deserialize(page_mark_elem) for page_mark_elem in response['Items']]
def get(serie_id: str) -> Union[None, PageMark]:
""" Retrieves a page mark object from db or returns None if no matching key is found. """
try:
item = DYNAMO_TABLE.get_item(Key=dict(serie_id=serie_id))['Item']
except ClientError:
logger.error('failed to get ', exc_info=True)
return None
except KeyError:
return None
return PageMark.deserialize(item)
def batch_put(page_marks: Iterable[PageMark]) -> None:
""" writes all page marks on dynamodb table"""
with DYNAMO_TABLE.batch_writer() as batch:
for page_mark in page_marks:
batch.put_item(Item=page_mark.serialize())
def put(page_mark: PageMark) -> None:
""" writes on dynamodb table"""
DYNAMO_TABLE.put_item(Item=page_mark.serialize())
def delete(page_mark_serie_id: str) -> None:
""" delete the record in dynamodb """
try:
DYNAMO_TABLE.delete_item(
Key=dict(serie_id=page_mark_serie_id),
Exists=True,
)
except ValidationError as e:
logger.error(f"serie {page_mark_serie_id} does not exists in db.")
raise e
```
#### File: manga_scraping/lambda/release_formating.py
```python
from typing import Union, Iterable
from urllib.parse import urlunparse, urlencode
from page_marks_db import PageMark
from skraper.types import ScrappedChapterRelease, ScrappedReleases
from img_hosting import build_serie_img_viewer_url
class FormattingWarning(Warning):
""" Any issue during formatting of release will have this type """
pass
class FormattedScrappedChapterRelease(ScrappedChapterRelease):
""" data to be displayed for a single release """
def __init__(self,
scraped_chapter_release: ScrappedChapterRelease,
top: bool = False,
url_release_link: Union[None, str] = None):
super(FormattedScrappedChapterRelease, self).__init__(**{var: getattr(scraped_chapter_release, var)
for var in vars(scraped_chapter_release)})
self.link = url_release_link
self.top = top
class FormattedScrappedReleases(ScrappedReleases):
""" data to be displayed for a serie """
def __init__(self,
serie_id: str,
serie_title: str,
serie_img_link: str,
chapters_releases: Iterable[FormattedScrappedChapterRelease]):
super(FormattedScrappedReleases, self).__init__(serie_id, chapters_releases)
self.serie_title = serie_title
self.serie_img_link = serie_img_link
def add_likely_link(
serie_name: str,
release: Union[ScrappedChapterRelease, FormattedScrappedChapterRelease])\
-> FormattedScrappedChapterRelease:
if release.volume:
query = f' v.{release.volume} '
else:
query = ""
query += f'"{release.chapter}" manga {serie_name} {release.group} -site:mangaupdates.com -site:play.google.com'
google_url = urlunparse(('https',
'www.google.com',
'/search',
None,
urlencode(dict(
q=query,
safe='images', # remove safe search
btnG="Search",
lr="lang_en",)),
None))
release.link = google_url
return release
def format_new_releases(scrapped_releases: ScrappedReleases,
serie_page_mark: PageMark,
top_chapter_lim: int= 5) -> FormattedScrappedReleases:
""" returns new releases with links and information of whether they are top chapters as defined by
top_chapter_limit"""
new_releases = sorted([release for release in scrapped_releases if release not in serie_page_mark.chapter_marks],
reverse=True)
chapters_page_mark = sorted(serie_page_mark.chapter_marks, reverse=True)
if chapters_page_mark:
limiting_chapter = chapters_page_mark[-min(len(chapters_page_mark), top_chapter_lim)]
def is_top(release):
return release > limiting_chapter
else:
def is_top(_):
return True
formatted_scrapped_new_chapter_release = []
for release in new_releases:
formatted_release = add_likely_link(serie_page_mark.serie_name, release)
formatted_release.top = is_top(release)
formatted_scrapped_new_chapter_release.append(formatted_release)
return FormattedScrappedReleases(
serie_id=serie_page_mark.serie_id,
serie_title=serie_page_mark.serie_name,
serie_img_link=build_serie_img_viewer_url(serie_page_mark.serie_id),
chapters_releases=formatted_scrapped_new_chapter_release)
```
#### File: lambda/skraper/types.py
```python
from typing import Union, Iterable, NamedTuple
from global_types import Chapter
class ScrappingWarning(UserWarning):
""" Any issue during scraping will have this type """
pass
class ScrappedSerie(NamedTuple):
serie_id: str # bakaupdate serie id
serie_name: str # Name of the serie on the page
img_file: str # Path to the file
def as_dict(self):
return self._asdict()
class ScrappedChapterRelease(Chapter):
""" data on a given chapter """
def __init__(self, group: str, chapter: str, volume: Union[int, None]= None):
super(ScrappedChapterRelease, self).__init__(chapter, volume)
self.group = group
class ScrappedReleases:
""" data returned from scrapping """
def __init__(self,
serie_id: str,
chapters_releases: Iterable[ScrappedChapterRelease]):
self.serie_id = serie_id
self.releases = sorted(chapters_releases, reverse=True)
def __iter__(self):
for chapter_release in self.releases:
yield chapter_release
def __repr__(self) -> str:
rep = f"Available releases for serie {self.serie_id}:"
releases = '\n'.join(f"{release} \tby group {release.group}" for release in self.releases)
if releases:
rep += '\n' + releases
return rep
``` |
{
"source": "JossMoff/ptl",
"score": 3
} |
#### File: ptl/ptl/__main__.py
```python
import psutil
import time
import math
import gitlab
import os
from argparse import ArgumentParser
import configparser
MODULE_NAME = "ptl: Automatically log time in GitLab issue tracker for COMP23311 at UoM."
__version__ = "0.1.0"
def print_config(token, project_id, issue_id):
print("--:CONFIG:--\n" + "🎫 TOKEN:" + token + "\n🆔 PROJECT-ID:" + project_id + "\n🆔 ISSUE-ID:" + issue_id)
def record_time(token, project_id, issue_id, ide="eclipse"):
eclipse_id = -1
# Iterate over all running process
for proc in psutil.process_iter():
try:
# Get process name & pid from process object.
proc_name = proc.name()
proc_id = proc.pid
if ide in proc_name:
eclipse_id = proc_id
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
if eclipse_id != -1:
# Get start_time
print("⏱️ Recording elapsed worktime for " + ide)
start_time = time.time()
while psutil.pid_exists(eclipse_id):
time.sleep(1)
end_time = time.time()
elapsed_time = (end_time - start_time) / 3600
elapsed_time = (math.ceil(elapsed_time))
# private token or personal token authentication
gl = gitlab.Gitlab('https://gitlab.cs.man.ac.uk', private_token=token)
# Make an API request and authenticate in order to add issue.
gl.auth()
project = gl.projects.get(project_id)
issue = project.issues.get(issue_id)
issue.add_spent_time(str(elapsed_time)+'h')
print("⏱️ " + str(elapsed_time) + "h of time recorded.")
else:
print("❌ IDE not running yet!")
def set_config(token, project_id, issue_id, config):
config['SETTINGS']['token'] = token
config['SETTINGS']['project_id'] = project_id
config['SETTINGS']['issue_id'] = issue_id
with open('config.ini', 'w') as configfile:
config.write(configfile)
return config
def main():
config = configparser.ConfigParser()
config.read('config.ini')
if len(config) == 1 and 'DEFAULT' in config:
config['SETTINGS'] = {}
config = set_config("0", "0", "0", config)
parser = ArgumentParser(description=MODULE_NAME)
parser.add_argument('-c','--config',
action="store_true", dest="config", default=False,
help="Shows config of current ptl settings")
parser.add_argument('-t','--token',
type=str, dest="token", default=config['SETTINGS']['token'],
help="Sets private token for GitLab user")
parser.add_argument('-p','--projectid',
type=str, dest="project_id", default=config['SETTINGS']['project_id'],
help="Sets project id for a GitLab repository")
parser.add_argument('-i','--issueid',
type=str, dest="issue_id", default=config['SETTINGS']['issue_id'],
help="Sets issue id for an issue in a GitLab repository")
parser.add_argument('-s','--start',
action="store_true", dest="time", default=False,
help="Start timing IDE open time.")
args = parser.parse_args()
if args.config:
print_config(args.token,args. project_id, args.issue_id)
else:
if (args.token != config['SETTINGS']['token'] or args.project_id != config['SETTINGS']['project_id']
or args.issue_id != config['SETTINGS']['issue_id']):
config = set_config(args.token, args.project_id, args.issue_id, config)
elif args.time:
record_time(args.token, int(args.project_id), int(args.issue_id))
if __name__ == "__main__":
main()
``` |
{
"source": "JossMoff/SetClipper",
"score": 3
} |
#### File: JossMoff/SetClipper/omc.py
```python
import youtube_dl
import subprocess
import os.path
import shutil
import os
import json
class OnlineMediaConverter(youtube_dl.YoutubeDL):
def __init__(self, options, extension, cache_limit):
super().__init__(options)
self.options = options
self.id = ""
self.extension = '.' + extension
self.cache_limit = cache_limit
def download_from_url(self, url, filename, clip_times=()):
video_id = 'yt-' + self._get_id(url)
input_path = './download.wav'
if not self._check_cache(video_id):
self.download([url])
self._convert_to_pcm(input_path, filename, clip_times)
# Move one file to cache and other to media
self._move_to_folder("./download.wav", ".cache/" + video_id + self.extension)
else:
input_path = '.cache/'+ video_id + self.extension
# TODO MAKE _convert_to_pcm take a location to convert from
self._convert_to_pcm(input_path, filename, clip_times)
def _get_id(self, url):
if "youtube.com/watch?v=" in url:
return url.replace('https://www.youtube.com/watch?v=', '')
else:
raise Exception('Currently only Youtube is supported.')
def _convert_to_pcm(self, input_path, output_filename, clip_times):
if isinstance(clip_times, tuple):
if len(clip_times) == 0:
output = subprocess.getoutput(['ffmpeg', '-i', input_path,
output_filename])
return output
elif len(clip_times) == 2:
start_time = clip_times[0]
end_time = clip_times[1]
return self._convert_to_pcm_clip(input_path, output_filename, start_time, end_time)
else:
raise TypeError('Clip times must be in the form (start_time, end_time)')
else:
raise TypeError('Clip times must be in the form (start_time, end_time)')
def _convert_to_pcm_clip(self, input_path, output_filename, start_time, end_time):
output = subprocess.getoutput(['ffmpeg', '-i', input_path,
'-ss', str(start_time), '-to',
str(end_time), 'media/' + output_filename])
return output
def _move_to_folder(self, current_path, output_path):
if os.path.isfile(current_path) and not os.path.isfile(output_path) :
shutil.move(current_path, output_path)
else:
raise Exception('''Please enter valid current_path and move_path
and ensure move_path does not already exist''')
def _check_cache(self, video_id):
return(os.path.isfile('.cache/'+ video_id + self.extension))
``` |
{
"source": "jossM/streamflow",
"score": 3
} |
#### File: backend/graph/changes.py
```python
from typing import List, Dict
from model.db import DbTasksChange, DbTask
from graph.utils import is_task_in_dag
from model.task_model import TasksChange
def build_db_changes(change: TasksChange, current_tasks: Dict[str, DbTask]) -> DbTasksChange:
"""Evaluate all changes that must be performed on task to apply the requested changes"""
deleted_tasks_ids = (
{task_id for task_id in current_tasks.keys() if is_task_in_dag(task_id, change.dags)}
- {task.id for task in change.tasks}
)
new_tasks_mixed = change.tasks + [task for task in current_tasks.values()
if not is_task_in_dag(task.id, change.dags)]
new_tasks_db = [
DbTask(
**task.dict(exclude={"next_tasks_ids"}, exclude_unset=True),
next_tasks_ids=sorted(other_task.id for other_task in new_tasks_mixed
if task.id in other_task.previous_tasks_ids)
)
for task in new_tasks_mixed
]
return DbTasksChange(
tasks_to_update=sorted([task_db for task_db in new_tasks_db if task_db.id != current_tasks.get(task_db.id)],
key=lambda task: task.id),
ids_to_remove=deleted_tasks_ids,)
def build_new_tasks_graph(change: DbTasksChange, current_tasks: Dict[str, DbTask]) -> List[DbTask]:
"""Create the new list of tasks effective after the DbChanges has been performed"""
all_tasks: Dict[str, DbTask] = {task.id: task for task in current_tasks.values()
if task.id not in change.ids_to_remove}
all_tasks.update({task.id: task for task in change.tasks_to_update})
return sorted(all_tasks.values(), key=lambda task: task.id)
```
#### File: tests/db/mocked_tasks.py
```python
from typing import Optional, List
from unittest.mock import AsyncMock, call
import pytest
from db.tasks import TASK_KEY_PREFIX
from model.db import DbTask
@pytest.fixture
def scan_table_mock(mocker):
async_mock = AsyncMock()
mocker.patch("db.tasks._scan_table", side_effect=async_mock)
return async_mock
def make_scan_table_response(results: Optional[List[DbTask]] = None, next_key: Optional[str] = None) -> dict:
if results is None:
results = []
return {'Items': [dict(id=TASK_KEY_PREFIX+t.id, **t.dict(exclude_defaults=True, exclude={"id"})) for t in results],
"LastEvaluatedKey": next_key}
@pytest.fixture
def mock_task_lock(mocker):
lock_mock = AsyncMock()
acquire_lock_mock = AsyncMock()
lock_mock.attach_mock(acquire_lock_mock, 'acquire_lock')
release_lock_mock = AsyncMock()
lock_mock.attach_mock(release_lock_mock, 'release_lock')
mocker.patch("db.tasks.acquire_lock", side_effect=lock_mock.acquire_lock)
mocker.patch("db.tasks.release_lock", side_effect=lock_mock.release_lock)
return lock_mock
EXPECT_MOCK_CALLS = [call.acquire_lock(), call.release_lock()]
@pytest.fixture
def mock_update_db(mocker):
update_mock = AsyncMock()
mocker.patch('db.tasks.update_db', side_effect=update_mock)
return update_mock
```
#### File: tests/model/test_task_model.py
```python
from pydantic import ValidationError
from pytest import raises
from model.task_model import CallTask
from model.db import DbTask
from tests.model.utils import make_task_dict
def test_db_task_validation_can_pass():
task_data = make_task_dict()
DbTask(**task_data)
def test_task_can_only_contain_one_template():
task_data = make_task_dict(
pod_template="template",
call_templates=[CallTask(url_template="url", method="POST")]
)
with raises(ValidationError):
DbTask(**task_data)
def test_task_must_contain_at_most_one_template():
task_data = make_task_dict(call_templates="template", pod_template="template")
with raises(ValidationError):
DbTask(**task_data)
def test_task_id_must_contain_dag():
task_data = make_task_dict(id="no_separator_indicating_dage_of_task")
with raises(ValidationError):
DbTask(**task_data)
``` |
{
"source": "jossthomas/Enigma-Machine",
"score": 3
} |
#### File: Enigma-Machine/components/Enigma_Machine.py
```python
from string import ascii_uppercase
from random import randrange #used for manual setup - allowing human choice of rotor positions creates bias
from .Enigma_Components import rotor, entry_wheel, reflector, rotor_array, plugboard
from .Default_Settings import reflector_sequences, rotor_sequences, ETW, cat_sort, numeral_sort
class enigma:
"""Broad container class for all enigma components and setup proceedures, acts to interface between the frontend and the subcomponents"""
def __init__(self):
self.main_entry_wheel = entry_wheel()
self.rotors = rotor_array()
self.main_reflector = reflector()
self.main_plugboard = plugboard()
def default_setup(self):
"""Set of default rotors, reflectors and plugs, useful for testing"""
pairs = "" #No point having a plugboard really, commercial Enigmas didn't have them.
if pairs != "": #Incase anyone really wants one
pairs = pairs.split(" ")
self.main_plugboard.set(pairs)
self.main_entry_wheel.set(ETW['STANDARD'])
rotor_spec = rotor_sequences['I']
rotor_spec2 = rotor_sequences['II']
rotor_spec3 = rotor_sequences['III']
self.rotors.add_rotor(rotor_spec[0], 0, rotor_spec[1])
self.rotors.add_rotor(rotor_spec2[0], 0, rotor_spec2[1])
self.rotors.add_rotor(rotor_spec3[0], 0, rotor_spec3[1])
self.main_reflector.set(reflector_sequences['A'])
def manual_setup(self):
"""Allows the user to manually define all components of the device"""
self.choose_entry_wheel()
self.choose_rotors()
self.choose_reflector()
self.set_rotor_positions()
self.configure_plugboard()
print("\nCurrent Enigma Setup: \n")
self.print_setup()
def choose_entry_wheel(self):
"""Acts as part of the key in the naval version only"""
available_ETW = list(ETW.keys())
ETW_choice = None
print("Available Entry Wheels: ", ', '.join(available_ETW))
while ETW_choice not in available_ETW:
ETW_choice = input("Choose Entry Wheel.\n> ").upper()
self.main_entry_wheel.set(ETW[ETW_choice])
def choose_rotors(self):
"""Choose rotors from the set of historic rotors in default_settings.py"""
rotor_choice = None
remaining_rotors = list(rotor_sequences.keys())
remaining_rotors.sort(key=lambda x: (cat_sort(x), numeral_sort(x))) #Sort the rotors into three categories, then by numeral within categories
#Choose number of rotor
#limited to 4 for historic reasons and because additional rotors lose functionality due to rarity of turning
Num_Rotors = input("Enter Desired Number of Rotors (up to 4).\n> ")
while Num_Rotors not in ["1", "2", "3", "4"]:
Num_Rotors = input("Entry Must be a number between 1 and 4.\n> ")
for i in range(1, int(Num_Rotors) + 1):
print("Available rotors: ", ', '.join(remaining_rotors))
while rotor_choice not in remaining_rotors: #Only allow user to select one of each rotor
rotor_choice = input("Enter Rotor {} name.\n> ".format(i)).upper()
self.rotors.add_rotor(rotor_sequences[rotor_choice][0], 0, rotor_sequences[rotor_choice][1]) #add the desired rotor
remaining_rotors.remove(rotor_choice)
def set_rotor_positions(self):
"""The starting rotations of the rotors acts as a key for the the code"""
rotor_position_start = None
randomise = None
while randomise not in ('manual', 'random'):
randomise = input('Enter \'manual\' for custom positions or \'random\' for randomly generated positions:\n> ').lower() #non random rotor positions made crytoanalysis easier historically
for index, rotor in enumerate(self.rotors.rotors):
if randomise == 'manual':
while rotor_position_start not in map(str, list(range(1,27))): #Check a valid number was entered
rotor_position_start = input('Please enter starting position for rotor {} between 1 and 26.\n> '.format(index + 1))
elif randomise == 'random':
rotor_position_start = randrange(1,27)
rotor.set_position(int(rotor_position_start) - 1) #Subtract 1 from the index due to 0 indexing of rotors
rotor_position_start = None #reset this so it works next time
def choose_reflector(self):
"""Essentially acts as another part of the key"""
available_reflectors = list(reflector_sequences.keys())
reflector_choice = None
print("Available Reflectors: ", ', '.join(available_reflectors))
while reflector_choice not in available_reflectors:
reflector_choice = input("Choose reflector.\n> ").upper()
self.main_reflector.set(reflector_sequences[reflector_choice])
def configure_plugboard(self):
"""Allows for letter swapping hence greatly increases entropy"""
plugs = None
plugs_to_add = []
used_letters = []
letters = list(ascii_uppercase)
#Choose number of plugs, 26 letter so 13 possible connections
while plugs not in map(str, list(range(0,14))):
plugs = input("Enter number of connections, Must be a number between 0 and 13.\n> ").upper()
if plugs != '0':
for i in range(int(plugs)):
pair = 'aa'
while pair[0] not in letters or pair[1] not in letters:
pair = input("Enter plug pair, should be in the format AB. \nAvailable plugs: {}.\n> ".format(''.join(letters))).upper()
letters.remove(pair[0])
letters.remove(pair[1])
plugs_to_add.append(pair.upper())
self.main_plugboard.set(plugs_to_add)
def run(self, message):
"""Take a string and split it before feeding it through enigma element wise"""
output = ''.join(list(map(self.encode, list(message))))
return output
def encode(self, letter):
"""Encoding a single letter, note that the process is mirrored before and after the reflector hence reversible"""
self.rotors.rotate_rotors() #Historically occured before the letter is encoded
letter = self.main_plugboard.substitute(letter)
letter = self.main_entry_wheel.forwards_encode(letter)
letter = self.rotors.encode(letter)
letter = self.main_reflector.reflect(letter) #No letter can ever encode itself
letter = self.rotors.reverse_encode(letter)
letter = self.main_entry_wheel.backwards_encode(letter)
letter = self.main_plugboard.substitute(letter)
return letter
def print_setup(self):
"""Print the current enigma component settings"""
print(self.main_entry_wheel)
for i, i_rotor in enumerate(self.rotors.rotors):
print("Rotor ", i + 1)
print(i_rotor)
print(self.main_reflector)
print(self.main_plugboard)
``` |
{
"source": "jostbr/GEF4510-Projects",
"score": 4
} |
#### File: GEF4510-Projects/project1/visualize_results.py
```python
import os
import sys
import numpy as np
from matplotlib import pyplot
def visualize_analytical(n_value):
analytical_solution = atmosphere_analytical
H = 270.0; kappa = 30.0; j_max = 27; K = 0.45; psi_0 = 10.0 # Defining parameters
delta_z = H / (j_max - 1.0) # Space step distance
delta_t = (K * (delta_z ** 2)) / kappa # Time step distance
t = n_value * delta_t # t-value corresponding to n
z = np.linspace(0, H, j_max) # Array of z-values
psi = analytical_solution(z, t, psi_0, kappa, H) # Array of psi-values
pyplot.plot(psi / psi_0, z / H, label = "n = {}".format(n_value)) # Plot dim-less psi
def atmosphere_analytical(z, t, psi_0, kappa, H):
return psi_0 * np.exp(-(((np.pi ** 2) * kappa) / (H ** 2)) * t) * np.sin((np.pi * z) / H) # Analytical solution
def ocean_analytical(z, t, psi_0, kappa, D):
return psi_0 * (z / D + 1) # Steday-state solution
if (__name__ == "__main__"):
# If the operating system succesfully compiles the Fortran file.
if (not os.system("gfortran -o {0}.exe {0}.f90".format(sys.argv[1]))):
os.system("{0}.exe {1}".format(sys.argv[1], sys.argv[2])) # Execute the program to generate .dat-file with results
if (sys.argv[1] == "atmosphere_application"):
length_scale = "H" # Length scale for labeling x-axis
y_start = 0; y_stop = 1; # Start and stop values for dim-less height
elif (sys.argv[1] == "ocean_application"):
length_scale = "(-D)" # Length scale for labeling x-axis
y_start = -1; y_stop = 0; # Start and stop values for dim-less height
n_values = [] # List to store n-values for analytical solution
z = np.linspace(y_start, y_stop, 27) # Create dimensionless height array
# Loop over all lines in file where each line is a psi array for a given n-value.
with open(sys.argv[2], "r") as data_file:
for line_num, current_line in enumerate(data_file):
list_of_current_values = current_line.split() # Put each value on current_line in a list
n_value = str(int(float(list_of_current_values[0]))) # Value of n for this line of psi-values
n_values.append(int(n_value)) # Update list with current n_value
psi_given_n = [float(psi_string) for psi_string in list_of_current_values[1:]] # Convert psi-values from string to float
pyplot.plot(np.array(psi_given_n), z, label = "n = {}".format(n_value)) # Plot current psi-array with label
# Plot steady-state solution in the same figure as the numerical solution.
if (sys.argv[1] == "ocean_application" and line_num == 7):
pyplot.plot(ocean_analytical(np.linspace(-30, 0, 27), None, 10.0, None, 30) / 10.0, z, label = "SS-solution")
pyplot.hold("on")
pyplot.xlabel("Dimension-less temperature $\psi/\psi_0$") # Set label on the x-axis of the figure
pyplot.ylabel("Dimension-less height $z/{0}$".format(length_scale)) # Set label on the y-axis of the figure
pyplot.title("Numerical solution for the {0}".format(sys.argv[1].split("_")[0])) # Set title of the figure
pyplot.legend() # Show labels on all plotted curves
if (sys.argv[1] == "atmosphere_application"):
pyplot.figure() # Generate new figure to plot in
for current_n in n_values:
visualize_analytical(current_n) # Plot analytical solution for same n's
pyplot.hold("on") # Hold plot for more curves coming later
pyplot.xlabel("Dimension-less temperature $\psi/\psi_0$") # Set label on the x-axis of the figure
pyplot.ylabel("Dimension-less height $z/{0}$".format(length_scale)) # Set label on the y-axis of the figure
pyplot.title("Analytical solution for the {0}".format(sys.argv[1].split("_")[0])) # Set title of the figure
pyplot.legend() # Show labels on all plotted curves
pyplot.show()
else:
print("\n---------- Compilation of '{0}' failed ----------\n".format(sys.argv[1])) # Give error message if compilations fails
``` |
{
"source": "jostbr/romsviz",
"score": 3
} |
#### File: romsviz/romsviz/outvar.py
```python
class OutVar(object):
"""Class representing a output variable generated in the NetcdfOut.get_var() method.
When extracting a variable from a netcdf file, an instance of this class will be
created and all relevant info on the extracted variable is stored as attributes
to that instance. Although, the class is meant as support to the NetcdfOut class,
one might find uses outside of that too. Various methods working on the attributes
are defined below as well.
"""
def __init__(self):
"""Constructor setting all attributes to None. They are expected
to be modified externally (e.g. by the NetcdfOut class)."""
self.name = None
self.meta = None
self.lims = None
self.bounds = None
self.time_dist = None
self.use_files = None
self.dim_names = None
self.data = None
self.time_name = None
self.time = None
def get_lim(self, dim_name):
"""
Method that extracts the index limits for a dimension.
Args:
dim_name (str) : Name of dimesnion tog et index limtis for
Returns:
lims (tuple) : (start, end) index of requested dim
"""
for i in range(len(self.dim_names)):
if self.dim_names[i] == dim_name:
return self.lims[i]
raise ValueError("{} is not a dimension of {}!".format(dim_name, self.var_name))
def get_bound(self, dim_name):
"""
Method that extracts the index bounds for a dimension.
Args:
dim_name (str) : Name of dimesnion tog et index limtis for
Returns:
lims (tuple) : (start, end) bound of requested dim
"""
for i in range(len(self.dim_names)):
if self.dim_names[i] == dim_name:
return self.bounds[i]
raise ValueError("{} is not a dimension of {}!".format(dim_name, self.var_name))
def identify_dim(self, suggestions):
"""
Method that checks if variable has one of the suggested dimensions.
Args:
suggestions (list) : List of name sggestions for dimensions
Returns:
dim_name (str) : Name of dim first matching with a suggestion
"""
for dim_name in self.dim_names:
for d in suggestions:
if dim_name == d:
return dim_name
raise ValueError("No dimension of {} are in <suggestions>".format(self.name))
def get_range_dims(self, enforce=1):
"""
Method that finds the dimensions spanning over a range of indices,
i.e. the dimensions which are not one in length for the instance self.
Returns:
range_dims (list) : List fo dimension names
"""
range_dims = list()
print(self.bounds)
for dim_name, (l0, l1) in zip(self.dim_names, self.lims):
l0 = 0 if l0 is None else l0
l1 = self.get_bound(dim_name) if l1 is None else l1
if l1 - l0 > 0:
range_dims.append(dim_name)
if len(range_dims) != enforce:
raise ValueError("Must have exactly {} range dims, has {}!".format(enforce, len(range_dims)))
return range_dims
def attr_to_string(self, obj, attr):
"""
Method that gives the string value of the requested attribute.
Args:
obj (type(obj)) : Some object whos string attribute to get
attr (str, list) : Attribute(s) names to get string value for
Returns:
attr_string (str) : String value of requested attribute
"""
if type(attr) is str:
attr = [attr] # need to be list below
for a in attr:
val = getattr(obj, a, None)
if val:
return val.encode("utf8").capitalize()
return "N/A"
def lims_to_str(self, exclude=list()):
"""
Method that converts self's lims to a string
Args:
exclude (list) : List of dim names to exclude in teh string
Returns:
lims_str (str) : String representation of self's lims
"""
lims_str = "("
for d_name, lim in zip(self.dim_names, self.lims):
if d_name not in exclude:
if lim[0] == lim[1]:
lims_str += "{}: {}".format(d_name, lim[0])
else:
lims_str += "{}: {}".format(d_name, lim)
if d_name != self.dim_names[-1]:
lims_str += ", "
return lims_str + ")"
def __getitem__(self, indices):
"""
Method to support instance indexing/slicing.
Args:
indices (int/slice) : Integer index or slice object
Returns:
array (ndarray) : The indexed self.data[indices] array
"""
return self.data.__getitem__(indices)
def __str__(self):
raise NotImplementedError
``` |
{
"source": "JostCrow/django-fontawesome-5",
"score": 2
} |
#### File: django-fontawesome-5/fontawesome_5/widgets.py
```python
from __future__ import absolute_import
from django import forms
from .app_settings import get_css_admin
class IconWidget(forms.Select):
template_name = 'fontawesome_5/select.html'
def __init__(self, attrs=None):
super(IconWidget, self).__init__(attrs)
class Media:
js = (
'django-fontawesome.js',
)
css = {
'all': get_css_admin()
}
``` |
{
"source": "JostCrow/django-font-icons",
"score": 2
} |
#### File: management/commands/loadfontawesome5_free.py
```python
from django.core.management.base import BaseCommand
from ...models import FontIconModel
class Command(BaseCommand):
help = "Load all free fontawesome icons"
def handle(self, *args, **options):
FontIconModel.objects.bulk_create([
FontIconModel(style="fas", icon_name="fa-address-book"),
FontIconModel(style="fas", icon_name="fa-address-card"),
FontIconModel(style="fas", icon_name="fa-adjust"),
FontIconModel(style="fas", icon_name="fa-air-freshener"),
FontIconModel(style="fas", icon_name="fa-align-center"),
FontIconModel(style="fas", icon_name="fa-align-justify"),
FontIconModel(style="fas", icon_name="fa-align-left"),
FontIconModel(style="fas", icon_name="fa-align-right"),
FontIconModel(style="fas", icon_name="fa-allergies"),
FontIconModel(style="fas", icon_name="fa-ambulance"),
FontIconModel(style="fas", icon_name="fa-american-sign-language-interpreting"),
FontIconModel(style="fas", icon_name="fa-anchor"),
FontIconModel(style="fas", icon_name="fa-angle-double-down"),
FontIconModel(style="fas", icon_name="fa-angle-double-left"),
FontIconModel(style="fas", icon_name="fa-angle-double-right"),
FontIconModel(style="fas", icon_name="fa-angle-double-up"),
FontIconModel(style="fas", icon_name="fa-angle-down"),
FontIconModel(style="fas", icon_name="fa-angle-left"),
FontIconModel(style="fas", icon_name="fa-angle-right"),
FontIconModel(style="fas", icon_name="fa-angle-up"),
FontIconModel(style="fas", icon_name="fa-angry"),
FontIconModel(style="fas", icon_name="fa-ankh"),
FontIconModel(style="fas", icon_name="fa-apple-alt"),
FontIconModel(style="fas", icon_name="fa-archive"),
FontIconModel(style="fas", icon_name="fa-archway"),
FontIconModel(style="fas", icon_name="fa-arrow-alt-circle-down"),
FontIconModel(style="fas", icon_name="fa-arrow-alt-circle-left"),
FontIconModel(style="fas", icon_name="fa-arrow-alt-circle-right"),
FontIconModel(style="fas", icon_name="fa-arrow-alt-circle-up"),
FontIconModel(style="fas", icon_name="fa-arrow-circle-down"),
FontIconModel(style="fas", icon_name="fa-arrow-circle-left"),
FontIconModel(style="fas", icon_name="fa-arrow-circle-right"),
FontIconModel(style="fas", icon_name="fa-arrow-circle-up"),
FontIconModel(style="fas", icon_name="fa-arrow-down"),
FontIconModel(style="fas", icon_name="fa-arrow-left"),
FontIconModel(style="fas", icon_name="fa-arrow-right"),
FontIconModel(style="fas", icon_name="fa-arrow-up"),
FontIconModel(style="fas", icon_name="fa-arrows-alt"),
FontIconModel(style="fas", icon_name="fa-arrows-alt-h"),
FontIconModel(style="fas", icon_name="fa-arrows-alt-v"),
FontIconModel(style="fas", icon_name="fa-assistive-listening-systems"),
FontIconModel(style="fas", icon_name="fa-asterisk"),
FontIconModel(style="fas", icon_name="fa-at"),
FontIconModel(style="fas", icon_name="fa-atlas"),
FontIconModel(style="fas", icon_name="fa-atom"),
FontIconModel(style="fas", icon_name="fa-audio-description"),
FontIconModel(style="fas", icon_name="fa-award"),
FontIconModel(style="fas", icon_name="fa-baby"),
FontIconModel(style="fas", icon_name="fa-baby-carriage"),
FontIconModel(style="fas", icon_name="fa-backspace"),
FontIconModel(style="fas", icon_name="fa-backward"),
FontIconModel(style="fas", icon_name="fa-bacon"),
FontIconModel(style="fas", icon_name="fa-balance-scale"),
FontIconModel(style="fas", icon_name="fa-ban"),
FontIconModel(style="fas", icon_name="fa-band-aid"),
FontIconModel(style="fas", icon_name="fa-barcode"),
FontIconModel(style="fas", icon_name="fa-bars"),
FontIconModel(style="fas", icon_name="fa-baseball-ball"),
FontIconModel(style="fas", icon_name="fa-basketball-ball"),
FontIconModel(style="fas", icon_name="fa-bath"),
FontIconModel(style="fas", icon_name="fa-battery-empty"),
FontIconModel(style="fas", icon_name="fa-battery-full"),
FontIconModel(style="fas", icon_name="fa-battery-half"),
FontIconModel(style="fas", icon_name="fa-battery-quarter"),
FontIconModel(style="fas", icon_name="fa-battery-three-quarters"),
FontIconModel(style="fas", icon_name="fa-bed"),
FontIconModel(style="fas", icon_name="fa-beer"),
FontIconModel(style="fas", icon_name="fa-bell"),
FontIconModel(style="fas", icon_name="fa-bell-slash"),
FontIconModel(style="fas", icon_name="fa-bezier-curve"),
FontIconModel(style="fas", icon_name="fa-bible"),
FontIconModel(style="fas", icon_name="fa-bicycle"),
FontIconModel(style="fas", icon_name="fa-binoculars"),
FontIconModel(style="fas", icon_name="fa-biohazard"),
FontIconModel(style="fas", icon_name="fa-birthday-cake"),
FontIconModel(style="fas", icon_name="fa-blender"),
FontIconModel(style="fas", icon_name="fa-blender-phone"),
FontIconModel(style="fas", icon_name="fa-blind"),
FontIconModel(style="fas", icon_name="fa-blog"),
FontIconModel(style="fas", icon_name="fa-bold"),
FontIconModel(style="fas", icon_name="fa-bolt"),
FontIconModel(style="fas", icon_name="fa-bomb"),
FontIconModel(style="fas", icon_name="fa-bone"),
FontIconModel(style="fas", icon_name="fa-bong"),
FontIconModel(style="fas", icon_name="fa-book"),
FontIconModel(style="fas", icon_name="fa-book-dead"),
FontIconModel(style="fas", icon_name="fa-book-medical"),
FontIconModel(style="fas", icon_name="fa-book-open"),
FontIconModel(style="fas", icon_name="fa-book-reader"),
FontIconModel(style="fas", icon_name="fa-bookmark"),
FontIconModel(style="fas", icon_name="fa-bowling-ball"),
FontIconModel(style="fas", icon_name="fa-box"),
FontIconModel(style="fas", icon_name="fa-box-open"),
FontIconModel(style="fas", icon_name="fa-boxes"),
FontIconModel(style="fas", icon_name="fa-braille"),
FontIconModel(style="fas", icon_name="fa-brain"),
FontIconModel(style="fas", icon_name="fa-bread-slice"),
FontIconModel(style="fas", icon_name="fa-briefcase"),
FontIconModel(style="fas", icon_name="fa-briefcase-medical"),
FontIconModel(style="fas", icon_name="fa-broadcast-tower"),
FontIconModel(style="fas", icon_name="fa-broom"),
FontIconModel(style="fas", icon_name="fa-brush"),
FontIconModel(style="fas", icon_name="fa-bug"),
FontIconModel(style="fas", icon_name="fa-building"),
FontIconModel(style="fas", icon_name="fa-bullhorn"),
FontIconModel(style="fas", icon_name="fa-bullseye"),
FontIconModel(style="fas", icon_name="fa-burn"),
FontIconModel(style="fas", icon_name="fa-bus"),
FontIconModel(style="fas", icon_name="fa-bus-alt"),
FontIconModel(style="fas", icon_name="fa-business-time"),
FontIconModel(style="fas", icon_name="fa-calculator"),
FontIconModel(style="fas", icon_name="fa-calendar"),
FontIconModel(style="fas", icon_name="fa-calendar-alt"),
FontIconModel(style="fas", icon_name="fa-calendar-check"),
FontIconModel(style="fas", icon_name="fa-calendar-day"),
FontIconModel(style="fas", icon_name="fa-calendar-minus"),
FontIconModel(style="fas", icon_name="fa-calendar-plus"),
FontIconModel(style="fas", icon_name="fa-calendar-times"),
FontIconModel(style="fas", icon_name="fa-calendar-week"),
FontIconModel(style="fas", icon_name="fa-camera"),
FontIconModel(style="fas", icon_name="fa-camera-retro"),
FontIconModel(style="fas", icon_name="fa-campground"),
FontIconModel(style="fas", icon_name="fa-candy-cane"),
FontIconModel(style="fas", icon_name="fa-cannabis"),
FontIconModel(style="fas", icon_name="fa-capsules"),
FontIconModel(style="fas", icon_name="fa-car"),
FontIconModel(style="fas", icon_name="fa-car-alt"),
FontIconModel(style="fas", icon_name="fa-car-battery"),
FontIconModel(style="fas", icon_name="fa-car-crash"),
FontIconModel(style="fas", icon_name="fa-car-side"),
FontIconModel(style="fas", icon_name="fa-caret-down"),
FontIconModel(style="fas", icon_name="fa-caret-left"),
FontIconModel(style="fas", icon_name="fa-caret-right"),
FontIconModel(style="fas", icon_name="fa-caret-square-down"),
FontIconModel(style="fas", icon_name="fa-caret-square-left"),
FontIconModel(style="fas", icon_name="fa-caret-square-right"),
FontIconModel(style="fas", icon_name="fa-caret-square-up"),
FontIconModel(style="fas", icon_name="fa-caret-up"),
FontIconModel(style="fas", icon_name="fa-carrot"),
FontIconModel(style="fas", icon_name="fa-cart-arrow-down"),
FontIconModel(style="fas", icon_name="fa-cart-plus"),
FontIconModel(style="fas", icon_name="fa-cash-register"),
FontIconModel(style="fas", icon_name="fa-cat"),
FontIconModel(style="fas", icon_name="fa-certificate"),
FontIconModel(style="fas", icon_name="fa-chair"),
FontIconModel(style="fas", icon_name="fa-chalkboard"),
FontIconModel(style="fas", icon_name="fa-chalkboard-teacher"),
FontIconModel(style="fas", icon_name="fa-charging-station"),
FontIconModel(style="fas", icon_name="fa-chart-area"),
FontIconModel(style="fas", icon_name="fa-chart-bar"),
FontIconModel(style="fas", icon_name="fa-chart-line"),
FontIconModel(style="fas", icon_name="fa-chart-pie"),
FontIconModel(style="fas", icon_name="fa-check"),
FontIconModel(style="fas", icon_name="fa-check-circle"),
FontIconModel(style="fas", icon_name="fa-check-double"),
FontIconModel(style="fas", icon_name="fa-check-square"),
FontIconModel(style="fas", icon_name="fa-cheese"),
FontIconModel(style="fas", icon_name="fa-chess"),
FontIconModel(style="fas", icon_name="fa-chess-bishop"),
FontIconModel(style="fas", icon_name="fa-chess-board"),
FontIconModel(style="fas", icon_name="fa-chess-king"),
FontIconModel(style="fas", icon_name="fa-chess-knight"),
FontIconModel(style="fas", icon_name="fa-chess-pawn"),
FontIconModel(style="fas", icon_name="fa-chess-queen"),
FontIconModel(style="fas", icon_name="fa-chess-rook"),
FontIconModel(style="fas", icon_name="fa-chevron-circle-down"),
FontIconModel(style="fas", icon_name="fa-chevron-circle-left"),
FontIconModel(style="fas", icon_name="fa-chevron-circle-right"),
FontIconModel(style="fas", icon_name="fa-chevron-circle-up"),
FontIconModel(style="fas", icon_name="fa-chevron-down"),
FontIconModel(style="fas", icon_name="fa-chevron-left"),
FontIconModel(style="fas", icon_name="fa-chevron-right"),
FontIconModel(style="fas", icon_name="fa-chevron-up"),
FontIconModel(style="fas", icon_name="fa-child"),
FontIconModel(style="fas", icon_name="fa-church"),
FontIconModel(style="fas", icon_name="fa-circle"),
FontIconModel(style="fas", icon_name="fa-circle-notch"),
FontIconModel(style="fas", icon_name="fa-city"),
FontIconModel(style="fas", icon_name="fa-clinic-medical"),
FontIconModel(style="fas", icon_name="fa-clipboard"),
FontIconModel(style="fas", icon_name="fa-clipboard-check"),
FontIconModel(style="fas", icon_name="fa-clipboard-list"),
FontIconModel(style="fas", icon_name="fa-clock"),
FontIconModel(style="fas", icon_name="fa-clone"),
FontIconModel(style="fas", icon_name="fa-closed-captioning"),
FontIconModel(style="fas", icon_name="fa-cloud"),
FontIconModel(style="fas", icon_name="fa-cloud-download-alt"),
FontIconModel(style="fas", icon_name="fa-cloud-meatball"),
FontIconModel(style="fas", icon_name="fa-cloud-moon"),
FontIconModel(style="fas", icon_name="fa-cloud-moon-rain"),
FontIconModel(style="fas", icon_name="fa-cloud-rain"),
FontIconModel(style="fas", icon_name="fa-cloud-showers-heavy"),
FontIconModel(style="fas", icon_name="fa-cloud-sun"),
FontIconModel(style="fas", icon_name="fa-cloud-sun-rain"),
FontIconModel(style="fas", icon_name="fa-cloud-upload-alt"),
FontIconModel(style="fas", icon_name="fa-cocktail"),
FontIconModel(style="fas", icon_name="fa-code"),
FontIconModel(style="fas", icon_name="fa-code-branch"),
FontIconModel(style="fas", icon_name="fa-coffee"),
FontIconModel(style="fas", icon_name="fa-cog"),
FontIconModel(style="fas", icon_name="fa-cogs"),
FontIconModel(style="fas", icon_name="fa-coins"),
FontIconModel(style="fas", icon_name="fa-columns"),
FontIconModel(style="fas", icon_name="fa-comment"),
FontIconModel(style="fas", icon_name="fa-comment-alt"),
FontIconModel(style="fas", icon_name="fa-comment-dollar"),
FontIconModel(style="fas", icon_name="fa-comment-dots"),
FontIconModel(style="fas", icon_name="fa-comment-medical"),
FontIconModel(style="fas", icon_name="fa-comment-slash"),
FontIconModel(style="fas", icon_name="fa-comments"),
FontIconModel(style="fas", icon_name="fa-comments-dollar"),
FontIconModel(style="fas", icon_name="fa-compact-disc"),
FontIconModel(style="fas", icon_name="fa-compass"),
FontIconModel(style="fas", icon_name="fa-compress"),
FontIconModel(style="fas", icon_name="fa-compress-arrows-alt"),
FontIconModel(style="fas", icon_name="fa-concierge-bell"),
FontIconModel(style="fas", icon_name="fa-cookie"),
FontIconModel(style="fas", icon_name="fa-cookie-bite"),
FontIconModel(style="fas", icon_name="fa-copy"),
FontIconModel(style="fas", icon_name="fa-copyright"),
FontIconModel(style="fas", icon_name="fa-couch"),
FontIconModel(style="fas", icon_name="fa-credit-card"),
FontIconModel(style="fas", icon_name="fa-crop"),
FontIconModel(style="fas", icon_name="fa-crop-alt"),
FontIconModel(style="fas", icon_name="fa-cross"),
FontIconModel(style="fas", icon_name="fa-crosshairs"),
FontIconModel(style="fas", icon_name="fa-crow"),
FontIconModel(style="fas", icon_name="fa-crown"),
FontIconModel(style="fas", icon_name="fa-crutch"),
FontIconModel(style="fas", icon_name="fa-cube"),
FontIconModel(style="fas", icon_name="fa-cubes"),
FontIconModel(style="fas", icon_name="fa-cut"),
FontIconModel(style="fas", icon_name="fa-database"),
FontIconModel(style="fas", icon_name="fa-deaf"),
FontIconModel(style="fas", icon_name="fa-democrat"),
FontIconModel(style="fas", icon_name="fa-desktop"),
FontIconModel(style="fas", icon_name="fa-dharmachakra"),
FontIconModel(style="fas", icon_name="fa-diagnoses"),
FontIconModel(style="fas", icon_name="fa-dice"),
FontIconModel(style="fas", icon_name="fa-dice-d20"),
FontIconModel(style="fas", icon_name="fa-dice-d6"),
FontIconModel(style="fas", icon_name="fa-dice-five"),
FontIconModel(style="fas", icon_name="fa-dice-four"),
FontIconModel(style="fas", icon_name="fa-dice-one"),
FontIconModel(style="fas", icon_name="fa-dice-six"),
FontIconModel(style="fas", icon_name="fa-dice-three"),
FontIconModel(style="fas", icon_name="fa-dice-two"),
FontIconModel(style="fas", icon_name="fa-digital-tachograph"),
FontIconModel(style="fas", icon_name="fa-directions"),
FontIconModel(style="fas", icon_name="fa-divide"),
FontIconModel(style="fas", icon_name="fa-dizzy"),
FontIconModel(style="fas", icon_name="fa-dna"),
FontIconModel(style="fas", icon_name="fa-dog"),
FontIconModel(style="fas", icon_name="fa-dollar-sign"),
FontIconModel(style="fas", icon_name="fa-dolly"),
FontIconModel(style="fas", icon_name="fa-dolly-flatbed"),
FontIconModel(style="fas", icon_name="fa-donate"),
FontIconModel(style="fas", icon_name="fa-door-closed"),
FontIconModel(style="fas", icon_name="fa-door-open"),
FontIconModel(style="fas", icon_name="fa-dot-circle"),
FontIconModel(style="fas", icon_name="fa-dove"),
FontIconModel(style="fas", icon_name="fa-download"),
FontIconModel(style="fas", icon_name="fa-drafting-compass"),
FontIconModel(style="fas", icon_name="fa-dragon"),
FontIconModel(style="fas", icon_name="fa-draw-polygon"),
FontIconModel(style="fas", icon_name="fa-drum"),
FontIconModel(style="fas", icon_name="fa-drum-steelpan"),
FontIconModel(style="fas", icon_name="fa-drumstick-bite"),
FontIconModel(style="fas", icon_name="fa-dumbbell"),
FontIconModel(style="fas", icon_name="fa-dumpster"),
FontIconModel(style="fas", icon_name="fa-dumpster-fire"),
FontIconModel(style="fas", icon_name="fa-dungeon"),
FontIconModel(style="fas", icon_name="fa-edit"),
FontIconModel(style="fas", icon_name="fa-egg"),
FontIconModel(style="fas", icon_name="fa-eject"),
FontIconModel(style="fas", icon_name="fa-ellipsis-h"),
FontIconModel(style="fas", icon_name="fa-ellipsis-v"),
FontIconModel(style="fas", icon_name="fa-envelope"),
FontIconModel(style="fas", icon_name="fa-envelope-open"),
FontIconModel(style="fas", icon_name="fa-envelope-open-text"),
FontIconModel(style="fas", icon_name="fa-envelope-square"),
FontIconModel(style="fas", icon_name="fa-equals"),
FontIconModel(style="fas", icon_name="fa-eraser"),
FontIconModel(style="fas", icon_name="fa-ethernet"),
FontIconModel(style="fas", icon_name="fa-euro-sign"),
FontIconModel(style="fas", icon_name="fa-exchange-alt"),
FontIconModel(style="fas", icon_name="fa-exclamation"),
FontIconModel(style="fas", icon_name="fa-exclamation-circle"),
FontIconModel(style="fas", icon_name="fa-exclamation-triangle"),
FontIconModel(style="fas", icon_name="fa-expand"),
FontIconModel(style="fas", icon_name="fa-expand-arrows-alt"),
FontIconModel(style="fas", icon_name="fa-external-link-alt"),
FontIconModel(style="fas", icon_name="fa-external-link-square-alt"),
FontIconModel(style="fas", icon_name="fa-eye"),
FontIconModel(style="fas", icon_name="fa-eye-dropper"),
FontIconModel(style="fas", icon_name="fa-eye-slash"),
FontIconModel(style="fas", icon_name="fa-fast-backward"),
FontIconModel(style="fas", icon_name="fa-fast-forward"),
FontIconModel(style="fas", icon_name="fa-fax"),
FontIconModel(style="fas", icon_name="fa-feather"),
FontIconModel(style="fas", icon_name="fa-feather-alt"),
FontIconModel(style="fas", icon_name="fa-female"),
FontIconModel(style="fas", icon_name="fa-fighter-jet"),
FontIconModel(style="fas", icon_name="fa-file"),
FontIconModel(style="fas", icon_name="fa-file-alt"),
FontIconModel(style="fas", icon_name="fa-file-archive"),
FontIconModel(style="fas", icon_name="fa-file-audio"),
FontIconModel(style="fas", icon_name="fa-file-code"),
FontIconModel(style="fas", icon_name="fa-file-contract"),
FontIconModel(style="fas", icon_name="fa-file-csv"),
FontIconModel(style="fas", icon_name="fa-file-download"),
FontIconModel(style="fas", icon_name="fa-file-excel"),
FontIconModel(style="fas", icon_name="fa-file-export"),
FontIconModel(style="fas", icon_name="fa-file-image"),
FontIconModel(style="fas", icon_name="fa-file-import"),
FontIconModel(style="fas", icon_name="fa-file-invoice"),
FontIconModel(style="fas", icon_name="fa-file-invoice-dollar"),
FontIconModel(style="fas", icon_name="fa-file-medical"),
FontIconModel(style="fas", icon_name="fa-file-medical-alt"),
FontIconModel(style="fas", icon_name="fa-file-pdf"),
FontIconModel(style="fas", icon_name="fa-file-powerpoint"),
FontIconModel(style="fas", icon_name="fa-file-prescription"),
FontIconModel(style="fas", icon_name="fa-file-signature"),
FontIconModel(style="fas", icon_name="fa-file-upload"),
FontIconModel(style="fas", icon_name="fa-file-video"),
FontIconModel(style="fas", icon_name="fa-file-word"),
FontIconModel(style="fas", icon_name="fa-fill"),
FontIconModel(style="fas", icon_name="fa-fill-drip"),
FontIconModel(style="fas", icon_name="fa-film"),
FontIconModel(style="fas", icon_name="fa-filter"),
FontIconModel(style="fas", icon_name="fa-fingerprint"),
FontIconModel(style="fas", icon_name="fa-fire"),
FontIconModel(style="fas", icon_name="fa-fire-alt"),
FontIconModel(style="fas", icon_name="fa-fire-extinguisher"),
FontIconModel(style="fas", icon_name="fa-first-aid"),
FontIconModel(style="fas", icon_name="fa-fish"),
FontIconModel(style="fas", icon_name="fa-fist-raised"),
FontIconModel(style="fas", icon_name="fa-flag"),
FontIconModel(style="fas", icon_name="fa-flag-checkered"),
FontIconModel(style="fas", icon_name="fa-flag-usa"),
FontIconModel(style="fas", icon_name="fa-flask"),
FontIconModel(style="fas", icon_name="fa-flushed"),
FontIconModel(style="fas", icon_name="fa-folder"),
FontIconModel(style="fas", icon_name="fa-folder-minus"),
FontIconModel(style="fas", icon_name="fa-folder-open"),
FontIconModel(style="fas", icon_name="fa-folder-plus"),
FontIconModel(style="fas", icon_name="fa-font"),
FontIconModel(style="fas", icon_name="fa-football-ball"),
FontIconModel(style="fas", icon_name="fa-forward"),
FontIconModel(style="fas", icon_name="fa-frog"),
FontIconModel(style="fas", icon_name="fa-frown"),
FontIconModel(style="fas", icon_name="fa-frown-open"),
FontIconModel(style="fas", icon_name="fa-funnel-dollar"),
FontIconModel(style="fas", icon_name="fa-futbol"),
FontIconModel(style="fas", icon_name="fa-gamepad"),
FontIconModel(style="fas", icon_name="fa-gas-pump"),
FontIconModel(style="fas", icon_name="fa-gavel"),
FontIconModel(style="fas", icon_name="fa-gem"),
FontIconModel(style="fas", icon_name="fa-genderless"),
FontIconModel(style="fas", icon_name="fa-ghost"),
FontIconModel(style="fas", icon_name="fa-gift"),
FontIconModel(style="fas", icon_name="fa-gifts"),
FontIconModel(style="fas", icon_name="fa-glass-cheers"),
FontIconModel(style="fas", icon_name="fa-glass-martini"),
FontIconModel(style="fas", icon_name="fa-glass-martini-alt"),
FontIconModel(style="fas", icon_name="fa-glass-whiskey"),
FontIconModel(style="fas", icon_name="fa-glasses"),
FontIconModel(style="fas", icon_name="fa-globe"),
FontIconModel(style="fas", icon_name="fa-globe-africa"),
FontIconModel(style="fas", icon_name="fa-globe-americas"),
FontIconModel(style="fas", icon_name="fa-globe-asia"),
FontIconModel(style="fas", icon_name="fa-globe-europe"),
FontIconModel(style="fas", icon_name="fa-golf-ball"),
FontIconModel(style="fas", icon_name="fa-gopuram"),
FontIconModel(style="fas", icon_name="fa-graduation-cap"),
FontIconModel(style="fas", icon_name="fa-greater-than"),
FontIconModel(style="fas", icon_name="fa-greater-than-equal"),
FontIconModel(style="fas", icon_name="fa-grimace"),
FontIconModel(style="fas", icon_name="fa-grin"),
FontIconModel(style="fas", icon_name="fa-grin-alt"),
FontIconModel(style="fas", icon_name="fa-grin-beam"),
FontIconModel(style="fas", icon_name="fa-grin-beam-sweat"),
FontIconModel(style="fas", icon_name="fa-grin-hearts"),
FontIconModel(style="fas", icon_name="fa-grin-squint"),
FontIconModel(style="fas", icon_name="fa-grin-squint-tears"),
FontIconModel(style="fas", icon_name="fa-grin-stars"),
FontIconModel(style="fas", icon_name="fa-grin-tears"),
FontIconModel(style="fas", icon_name="fa-grin-tongue"),
FontIconModel(style="fas", icon_name="fa-grin-tongue-squint"),
FontIconModel(style="fas", icon_name="fa-grin-tongue-wink"),
FontIconModel(style="fas", icon_name="fa-grin-wink"),
FontIconModel(style="fas", icon_name="fa-grip-horizontal"),
FontIconModel(style="fas", icon_name="fa-grip-lines"),
FontIconModel(style="fas", icon_name="fa-grip-lines-vertical"),
FontIconModel(style="fas", icon_name="fa-grip-vertical"),
FontIconModel(style="fas", icon_name="fa-guitar"),
FontIconModel(style="fas", icon_name="fa-h-square"),
FontIconModel(style="fas", icon_name="fa-hamburger"),
FontIconModel(style="fas", icon_name="fa-hammer"),
FontIconModel(style="fas", icon_name="fa-hamsa"),
FontIconModel(style="fas", icon_name="fa-hand-holding"),
FontIconModel(style="fas", icon_name="fa-hand-holding-heart"),
FontIconModel(style="fas", icon_name="fa-hand-holding-usd"),
FontIconModel(style="fas", icon_name="fa-hand-lizard"),
FontIconModel(style="fas", icon_name="fa-hand-middle-finger"),
FontIconModel(style="fas", icon_name="fa-hand-paper"),
FontIconModel(style="fas", icon_name="fa-hand-peace"),
FontIconModel(style="fas", icon_name="fa-hand-point-down"),
FontIconModel(style="fas", icon_name="fa-hand-point-left"),
FontIconModel(style="fas", icon_name="fa-hand-point-right"),
FontIconModel(style="fas", icon_name="fa-hand-point-up"),
FontIconModel(style="fas", icon_name="fa-hand-pointer"),
FontIconModel(style="fas", icon_name="fa-hand-rock"),
FontIconModel(style="fas", icon_name="fa-hand-scissors"),
FontIconModel(style="fas", icon_name="fa-hand-spock"),
FontIconModel(style="fas", icon_name="fa-hands"),
FontIconModel(style="fas", icon_name="fa-hands-helping"),
FontIconModel(style="fas", icon_name="fa-handshake"),
FontIconModel(style="fas", icon_name="fa-hanukiah"),
FontIconModel(style="fas", icon_name="fa-hard-hat"),
FontIconModel(style="fas", icon_name="fa-hashtag"),
FontIconModel(style="fas", icon_name="fa-hat-wizard"),
FontIconModel(style="fas", icon_name="fa-haykal"),
FontIconModel(style="fas", icon_name="fa-hdd"),
FontIconModel(style="fas", icon_name="fa-heading"),
FontIconModel(style="fas", icon_name="fa-headphones"),
FontIconModel(style="fas", icon_name="fa-headphones-alt"),
FontIconModel(style="fas", icon_name="fa-headset"),
FontIconModel(style="fas", icon_name="fa-heart"),
FontIconModel(style="fas", icon_name="fa-heart-broken"),
FontIconModel(style="fas", icon_name="fa-heartbeat"),
FontIconModel(style="fas", icon_name="fa-helicopter"),
FontIconModel(style="fas", icon_name="fa-highlighter"),
FontIconModel(style="fas", icon_name="fa-hiking"),
FontIconModel(style="fas", icon_name="fa-hippo"),
FontIconModel(style="fas", icon_name="fa-history"),
FontIconModel(style="fas", icon_name="fa-hockey-puck"),
FontIconModel(style="fas", icon_name="fa-holly-berry"),
FontIconModel(style="fas", icon_name="fa-home"),
FontIconModel(style="fas", icon_name="fa-horse"),
FontIconModel(style="fas", icon_name="fa-horse-head"),
FontIconModel(style="fas", icon_name="fa-hospital"),
FontIconModel(style="fas", icon_name="fa-hospital-alt"),
FontIconModel(style="fas", icon_name="fa-hospital-symbol"),
FontIconModel(style="fas", icon_name="fa-hot-tub"),
FontIconModel(style="fas", icon_name="fa-hotdog"),
FontIconModel(style="fas", icon_name="fa-hotel"),
FontIconModel(style="fas", icon_name="fa-hourglass"),
FontIconModel(style="fas", icon_name="fa-hourglass-end"),
FontIconModel(style="fas", icon_name="fa-hourglass-half"),
FontIconModel(style="fas", icon_name="fa-hourglass-start"),
FontIconModel(style="fas", icon_name="fa-house-damage"),
FontIconModel(style="fas", icon_name="fa-hryvnia"),
FontIconModel(style="fas", icon_name="fa-i-cursor"),
FontIconModel(style="fas", icon_name="fa-ice-cream"),
FontIconModel(style="fas", icon_name="fa-icicles"),
FontIconModel(style="fas", icon_name="fa-id-badge"),
FontIconModel(style="fas", icon_name="fa-id-card"),
FontIconModel(style="fas", icon_name="fa-id-card-alt"),
FontIconModel(style="fas", icon_name="fa-igloo"),
FontIconModel(style="fas", icon_name="fa-image"),
FontIconModel(style="fas", icon_name="fa-images"),
FontIconModel(style="fas", icon_name="fa-inbox"),
FontIconModel(style="fas", icon_name="fa-indent"),
FontIconModel(style="fas", icon_name="fa-industry"),
FontIconModel(style="fas", icon_name="fa-infinity"),
FontIconModel(style="fas", icon_name="fa-info"),
FontIconModel(style="fas", icon_name="fa-info-circle"),
FontIconModel(style="fas", icon_name="fa-italic"),
FontIconModel(style="fas", icon_name="fa-jedi"),
FontIconModel(style="fas", icon_name="fa-joint"),
FontIconModel(style="fas", icon_name="fa-journal-whills"),
FontIconModel(style="fas", icon_name="fa-kaaba"),
FontIconModel(style="fas", icon_name="fa-key"),
FontIconModel(style="fas", icon_name="fa-keyboard"),
FontIconModel(style="fas", icon_name="fa-khanda"),
FontIconModel(style="fas", icon_name="fa-kiss"),
FontIconModel(style="fas", icon_name="fa-kiss-beam"),
FontIconModel(style="fas", icon_name="fa-kiss-wink-heart"),
FontIconModel(style="fas", icon_name="fa-kiwi-bird"),
FontIconModel(style="fas", icon_name="fa-landmark"),
FontIconModel(style="fas", icon_name="fa-language"),
FontIconModel(style="fas", icon_name="fa-laptop"),
FontIconModel(style="fas", icon_name="fa-laptop-code"),
FontIconModel(style="fas", icon_name="fa-laptop-medical"),
FontIconModel(style="fas", icon_name="fa-laugh"),
FontIconModel(style="fas", icon_name="fa-laugh-beam"),
FontIconModel(style="fas", icon_name="fa-laugh-squint"),
FontIconModel(style="fas", icon_name="fa-laugh-wink"),
FontIconModel(style="fas", icon_name="fa-layer-group"),
FontIconModel(style="fas", icon_name="fa-leaf"),
FontIconModel(style="fas", icon_name="fa-lemon"),
FontIconModel(style="fas", icon_name="fa-less-than"),
FontIconModel(style="fas", icon_name="fa-less-than-equal"),
FontIconModel(style="fas", icon_name="fa-level-down-alt"),
FontIconModel(style="fas", icon_name="fa-level-up-alt"),
FontIconModel(style="fas", icon_name="fa-life-ring"),
FontIconModel(style="fas", icon_name="fa-lightbulb"),
FontIconModel(style="fas", icon_name="fa-link"),
FontIconModel(style="fas", icon_name="fa-lira-sign"),
FontIconModel(style="fas", icon_name="fa-list"),
FontIconModel(style="fas", icon_name="fa-list-alt"),
FontIconModel(style="fas", icon_name="fa-list-ol"),
FontIconModel(style="fas", icon_name="fa-list-ul"),
FontIconModel(style="fas", icon_name="fa-location-arrow"),
FontIconModel(style="fas", icon_name="fa-lock"),
FontIconModel(style="fas", icon_name="fa-lock-open"),
FontIconModel(style="fas", icon_name="fa-long-arrow-alt-down"),
FontIconModel(style="fas", icon_name="fa-long-arrow-alt-left"),
FontIconModel(style="fas", icon_name="fa-long-arrow-alt-right"),
FontIconModel(style="fas", icon_name="fa-long-arrow-alt-up"),
FontIconModel(style="fas", icon_name="fa-low-vision"),
FontIconModel(style="fas", icon_name="fa-luggage-cart"),
FontIconModel(style="fas", icon_name="fa-magic"),
FontIconModel(style="fas", icon_name="fa-magnet"),
FontIconModel(style="fas", icon_name="fa-mail-bulk"),
FontIconModel(style="fas", icon_name="fa-male"),
FontIconModel(style="fas", icon_name="fa-map"),
FontIconModel(style="fas", icon_name="fa-map-marked"),
FontIconModel(style="fas", icon_name="fa-map-marked-alt"),
FontIconModel(style="fas", icon_name="fa-map-marker"),
FontIconModel(style="fas", icon_name="fa-map-marker-alt"),
FontIconModel(style="fas", icon_name="fa-map-pin"),
FontIconModel(style="fas", icon_name="fa-map-signs"),
FontIconModel(style="fas", icon_name="fa-marker"),
FontIconModel(style="fas", icon_name="fa-mars"),
FontIconModel(style="fas", icon_name="fa-mars-double"),
FontIconModel(style="fas", icon_name="fa-mars-stroke"),
FontIconModel(style="fas", icon_name="fa-mars-stroke-h"),
FontIconModel(style="fas", icon_name="fa-mars-stroke-v"),
FontIconModel(style="fas", icon_name="fa-mask"),
FontIconModel(style="fas", icon_name="fa-medal"),
FontIconModel(style="fas", icon_name="fa-medkit"),
FontIconModel(style="fas", icon_name="fa-meh"),
FontIconModel(style="fas", icon_name="fa-meh-blank"),
FontIconModel(style="fas", icon_name="fa-meh-rolling-eyes"),
FontIconModel(style="fas", icon_name="fa-memory"),
FontIconModel(style="fas", icon_name="fa-menorah"),
FontIconModel(style="fas", icon_name="fa-mercury"),
FontIconModel(style="fas", icon_name="fa-meteor"),
FontIconModel(style="fas", icon_name="fa-microchip"),
FontIconModel(style="fas", icon_name="fa-microphone"),
FontIconModel(style="fas", icon_name="fa-microphone-alt"),
FontIconModel(style="fas", icon_name="fa-microphone-alt-slash"),
FontIconModel(style="fas", icon_name="fa-microphone-slash"),
FontIconModel(style="fas", icon_name="fa-microscope"),
FontIconModel(style="fas", icon_name="fa-minus"),
FontIconModel(style="fas", icon_name="fa-minus-circle"),
FontIconModel(style="fas", icon_name="fa-minus-square"),
FontIconModel(style="fas", icon_name="fa-mitten"),
FontIconModel(style="fas", icon_name="fa-mobile"),
FontIconModel(style="fas", icon_name="fa-mobile-alt"),
FontIconModel(style="fas", icon_name="fa-money-bill"),
FontIconModel(style="fas", icon_name="fa-money-bill-alt"),
FontIconModel(style="fas", icon_name="fa-money-bill-wave"),
FontIconModel(style="fas", icon_name="fa-money-bill-wave-alt"),
FontIconModel(style="fas", icon_name="fa-money-check"),
FontIconModel(style="fas", icon_name="fa-money-check-alt"),
FontIconModel(style="fas", icon_name="fa-monument"),
FontIconModel(style="fas", icon_name="fa-moon"),
FontIconModel(style="fas", icon_name="fa-mortar-pestle"),
FontIconModel(style="fas", icon_name="fa-mosque"),
FontIconModel(style="fas", icon_name="fa-motorcycle"),
FontIconModel(style="fas", icon_name="fa-mountain"),
FontIconModel(style="fas", icon_name="fa-mouse-pointer"),
FontIconModel(style="fas", icon_name="fa-mug-hot"),
FontIconModel(style="fas", icon_name="fa-music"),
FontIconModel(style="fas", icon_name="fa-network-wired"),
FontIconModel(style="fas", icon_name="fa-neuter"),
FontIconModel(style="fas", icon_name="fa-newspaper"),
FontIconModel(style="fas", icon_name="fa-not-equal"),
FontIconModel(style="fas", icon_name="fa-notes-medical"),
FontIconModel(style="fas", icon_name="fa-object-group"),
FontIconModel(style="fas", icon_name="fa-object-ungroup"),
FontIconModel(style="fas", icon_name="fa-oil-can"),
FontIconModel(style="fas", icon_name="fa-om"),
FontIconModel(style="fas", icon_name="fa-otter"),
FontIconModel(style="fas", icon_name="fa-outdent"),
FontIconModel(style="fas", icon_name="fa-pager"),
FontIconModel(style="fas", icon_name="fa-paint-brush"),
FontIconModel(style="fas", icon_name="fa-paint-roller"),
FontIconModel(style="fas", icon_name="fa-palette"),
FontIconModel(style="fas", icon_name="fa-pallet"),
FontIconModel(style="fas", icon_name="fa-paper-plane"),
FontIconModel(style="fas", icon_name="fa-paperclip"),
FontIconModel(style="fas", icon_name="fa-parachute-box"),
FontIconModel(style="fas", icon_name="fa-paragraph"),
FontIconModel(style="fas", icon_name="fa-parking"),
FontIconModel(style="fas", icon_name="fa-passport"),
FontIconModel(style="fas", icon_name="fa-pastafarianism"),
FontIconModel(style="fas", icon_name="fa-paste"),
FontIconModel(style="fas", icon_name="fa-pause"),
FontIconModel(style="fas", icon_name="fa-pause-circle"),
FontIconModel(style="fas", icon_name="fa-paw"),
FontIconModel(style="fas", icon_name="fa-peace"),
FontIconModel(style="fas", icon_name="fa-pen"),
FontIconModel(style="fas", icon_name="fa-pen-alt"),
FontIconModel(style="fas", icon_name="fa-pen-fancy"),
FontIconModel(style="fas", icon_name="fa-pen-nib"),
FontIconModel(style="fas", icon_name="fa-pen-square"),
FontIconModel(style="fas", icon_name="fa-pencil-alt"),
FontIconModel(style="fas", icon_name="fa-pencil-ruler"),
FontIconModel(style="fas", icon_name="fa-people-carry"),
FontIconModel(style="fas", icon_name="fa-pepper-hot"),
FontIconModel(style="fas", icon_name="fa-percent"),
FontIconModel(style="fas", icon_name="fa-percentage"),
FontIconModel(style="fas", icon_name="fa-person-booth"),
FontIconModel(style="fas", icon_name="fa-phone"),
FontIconModel(style="fas", icon_name="fa-phone-slash"),
FontIconModel(style="fas", icon_name="fa-phone-square"),
FontIconModel(style="fas", icon_name="fa-phone-volume"),
FontIconModel(style="fas", icon_name="fa-piggy-bank"),
FontIconModel(style="fas", icon_name="fa-pills"),
FontIconModel(style="fas", icon_name="fa-pizza-slice"),
FontIconModel(style="fas", icon_name="fa-place-of-worship"),
FontIconModel(style="fas", icon_name="fa-plane"),
FontIconModel(style="fas", icon_name="fa-plane-arrival"),
FontIconModel(style="fas", icon_name="fa-plane-departure"),
FontIconModel(style="fas", icon_name="fa-play"),
FontIconModel(style="fas", icon_name="fa-play-circle"),
FontIconModel(style="fas", icon_name="fa-plug"),
FontIconModel(style="fas", icon_name="fa-plus"),
FontIconModel(style="fas", icon_name="fa-plus-circle"),
FontIconModel(style="fas", icon_name="fa-plus-square"),
FontIconModel(style="fas", icon_name="fa-podcast"),
FontIconModel(style="fas", icon_name="fa-poll"),
FontIconModel(style="fas", icon_name="fa-poll-h"),
FontIconModel(style="fas", icon_name="fa-poo"),
FontIconModel(style="fas", icon_name="fa-poo-storm"),
FontIconModel(style="fas", icon_name="fa-poop"),
FontIconModel(style="fas", icon_name="fa-portrait"),
FontIconModel(style="fas", icon_name="fa-pound-sign"),
FontIconModel(style="fas", icon_name="fa-power-off"),
FontIconModel(style="fas", icon_name="fa-pray"),
FontIconModel(style="fas", icon_name="fa-praying-hands"),
FontIconModel(style="fas", icon_name="fa-prescription"),
FontIconModel(style="fas", icon_name="fa-prescription-bottle"),
FontIconModel(style="fas", icon_name="fa-prescription-bottle-alt"),
FontIconModel(style="fas", icon_name="fa-print"),
FontIconModel(style="fas", icon_name="fa-procedures"),
FontIconModel(style="fas", icon_name="fa-project-diagram"),
FontIconModel(style="fas", icon_name="fa-puzzle-piece"),
FontIconModel(style="fas", icon_name="fa-qrcode"),
FontIconModel(style="fas", icon_name="fa-question"),
FontIconModel(style="fas", icon_name="fa-question-circle"),
FontIconModel(style="fas", icon_name="fa-quidditch"),
FontIconModel(style="fas", icon_name="fa-quote-left"),
FontIconModel(style="fas", icon_name="fa-quote-right"),
FontIconModel(style="fas", icon_name="fa-quran"),
FontIconModel(style="fas", icon_name="fa-radiation"),
FontIconModel(style="fas", icon_name="fa-radiation-alt"),
FontIconModel(style="fas", icon_name="fa-rainbow"),
FontIconModel(style="fas", icon_name="fa-random"),
FontIconModel(style="fas", icon_name="fa-receipt"),
FontIconModel(style="fas", icon_name="fa-recycle"),
FontIconModel(style="fas", icon_name="fa-redo"),
FontIconModel(style="fas", icon_name="fa-redo-alt"),
FontIconModel(style="fas", icon_name="fa-registered"),
FontIconModel(style="fas", icon_name="fa-reply"),
FontIconModel(style="fas", icon_name="fa-reply-all"),
FontIconModel(style="fas", icon_name="fa-republican"),
FontIconModel(style="fas", icon_name="fa-restroom"),
FontIconModel(style="fas", icon_name="fa-retweet"),
FontIconModel(style="fas", icon_name="fa-ribbon"),
FontIconModel(style="fas", icon_name="fa-ring"),
FontIconModel(style="fas", icon_name="fa-road"),
FontIconModel(style="fas", icon_name="fa-robot"),
FontIconModel(style="fas", icon_name="fa-rocket"),
FontIconModel(style="fas", icon_name="fa-route"),
FontIconModel(style="fas", icon_name="fa-rss"),
FontIconModel(style="fas", icon_name="fa-rss-square"),
FontIconModel(style="fas", icon_name="fa-ruble-sign"),
FontIconModel(style="fas", icon_name="fa-ruler"),
FontIconModel(style="fas", icon_name="fa-ruler-combined"),
FontIconModel(style="fas", icon_name="fa-ruler-horizontal"),
FontIconModel(style="fas", icon_name="fa-ruler-vertical"),
FontIconModel(style="fas", icon_name="fa-running"),
FontIconModel(style="fas", icon_name="fa-rupee-sign"),
FontIconModel(style="fas", icon_name="fa-sad-cry"),
FontIconModel(style="fas", icon_name="fa-sad-tear"),
FontIconModel(style="fas", icon_name="fa-satellite"),
FontIconModel(style="fas", icon_name="fa-satellite-dish"),
FontIconModel(style="fas", icon_name="fa-save"),
FontIconModel(style="fas", icon_name="fa-school"),
FontIconModel(style="fas", icon_name="fa-screwdriver"),
FontIconModel(style="fas", icon_name="fa-scroll"),
FontIconModel(style="fas", icon_name="fa-sd-card"),
FontIconModel(style="fas", icon_name="fa-search"),
FontIconModel(style="fas", icon_name="fa-search-dollar"),
FontIconModel(style="fas", icon_name="fa-search-location"),
FontIconModel(style="fas", icon_name="fa-search-minus"),
FontIconModel(style="fas", icon_name="fa-search-plus"),
FontIconModel(style="fas", icon_name="fa-seedling"),
FontIconModel(style="fas", icon_name="fa-server"),
FontIconModel(style="fas", icon_name="fa-shapes"),
FontIconModel(style="fas", icon_name="fa-share"),
FontIconModel(style="fas", icon_name="fa-share-alt"),
FontIconModel(style="fas", icon_name="fa-share-alt-square"),
FontIconModel(style="fas", icon_name="fa-share-square"),
FontIconModel(style="fas", icon_name="fa-shekel-sign"),
FontIconModel(style="fas", icon_name="fa-shield-alt"),
FontIconModel(style="fas", icon_name="fa-ship"),
FontIconModel(style="fas", icon_name="fa-shipping-fast"),
FontIconModel(style="fas", icon_name="fa-shoe-prints"),
FontIconModel(style="fas", icon_name="fa-shopping-bag"),
FontIconModel(style="fas", icon_name="fa-shopping-basket"),
FontIconModel(style="fas", icon_name="fa-shopping-cart"),
FontIconModel(style="fas", icon_name="fa-shower"),
FontIconModel(style="fas", icon_name="fa-shuttle-van"),
FontIconModel(style="fas", icon_name="fa-sign"),
FontIconModel(style="fas", icon_name="fa-sign-in-alt"),
FontIconModel(style="fas", icon_name="fa-sign-language"),
FontIconModel(style="fas", icon_name="fa-sign-out-alt"),
FontIconModel(style="fas", icon_name="fa-signal"),
FontIconModel(style="fas", icon_name="fa-signature"),
FontIconModel(style="fas", icon_name="fa-sim-card"),
FontIconModel(style="fas", icon_name="fa-sitemap"),
FontIconModel(style="fas", icon_name="fa-skating"),
FontIconModel(style="fas", icon_name="fa-skiing"),
FontIconModel(style="fas", icon_name="fa-skiing-nordic"),
FontIconModel(style="fas", icon_name="fa-skull"),
FontIconModel(style="fas", icon_name="fa-skull-crossbones"),
FontIconModel(style="fas", icon_name="fa-slash"),
FontIconModel(style="fas", icon_name="fa-sleigh"),
FontIconModel(style="fas", icon_name="fa-sliders-h"),
FontIconModel(style="fas", icon_name="fa-smile"),
FontIconModel(style="fas", icon_name="fa-smile-beam"),
FontIconModel(style="fas", icon_name="fa-smile-wink"),
FontIconModel(style="fas", icon_name="fa-smog"),
FontIconModel(style="fas", icon_name="fa-smoking"),
FontIconModel(style="fas", icon_name="fa-smoking-ban"),
FontIconModel(style="fas", icon_name="fa-sms"),
FontIconModel(style="fas", icon_name="fa-snowboarding"),
FontIconModel(style="fas", icon_name="fa-snowflake"),
FontIconModel(style="fas", icon_name="fa-snowman"),
FontIconModel(style="fas", icon_name="fa-snowplow"),
FontIconModel(style="fas", icon_name="fa-socks"),
FontIconModel(style="fas", icon_name="fa-solar-panel"),
FontIconModel(style="fas", icon_name="fa-sort"),
FontIconModel(style="fas", icon_name="fa-sort-alpha-down"),
FontIconModel(style="fas", icon_name="fa-sort-alpha-up"),
FontIconModel(style="fas", icon_name="fa-sort-amount-down"),
FontIconModel(style="fas", icon_name="fa-sort-amount-up"),
FontIconModel(style="fas", icon_name="fa-sort-down"),
FontIconModel(style="fas", icon_name="fa-sort-numeric-down"),
FontIconModel(style="fas", icon_name="fa-sort-numeric-up"),
FontIconModel(style="fas", icon_name="fa-sort-up"),
FontIconModel(style="fas", icon_name="fa-spa"),
FontIconModel(style="fas", icon_name="fa-space-shuttle"),
FontIconModel(style="fas", icon_name="fa-spider"),
FontIconModel(style="fas", icon_name="fa-spinner"),
FontIconModel(style="fas", icon_name="fa-splotch"),
FontIconModel(style="fas", icon_name="fa-spray-can"),
FontIconModel(style="fas", icon_name="fa-square"),
FontIconModel(style="fas", icon_name="fa-square-full"),
FontIconModel(style="fas", icon_name="fa-square-root-alt"),
FontIconModel(style="fas", icon_name="fa-stamp"),
FontIconModel(style="fas", icon_name="fa-star"),
FontIconModel(style="fas", icon_name="fa-star-and-crescent"),
FontIconModel(style="fas", icon_name="fa-star-half"),
FontIconModel(style="fas", icon_name="fa-star-half-alt"),
FontIconModel(style="fas", icon_name="fa-star-of-david"),
FontIconModel(style="fas", icon_name="fa-star-of-life"),
FontIconModel(style="fas", icon_name="fa-step-backward"),
FontIconModel(style="fas", icon_name="fa-step-forward"),
FontIconModel(style="fas", icon_name="fa-stethoscope"),
FontIconModel(style="fas", icon_name="fa-sticky-note"),
FontIconModel(style="fas", icon_name="fa-stop"),
FontIconModel(style="fas", icon_name="fa-stop-circle"),
FontIconModel(style="fas", icon_name="fa-stopwatch"),
FontIconModel(style="fas", icon_name="fa-store"),
FontIconModel(style="fas", icon_name="fa-store-alt"),
FontIconModel(style="fas", icon_name="fa-stream"),
FontIconModel(style="fas", icon_name="fa-street-view"),
FontIconModel(style="fas", icon_name="fa-strikethrough"),
FontIconModel(style="fas", icon_name="fa-stroopwafel"),
FontIconModel(style="fas", icon_name="fa-subscript"),
FontIconModel(style="fas", icon_name="fa-subway"),
FontIconModel(style="fas", icon_name="fa-suitcase"),
FontIconModel(style="fas", icon_name="fa-suitcase-rolling"),
FontIconModel(style="fas", icon_name="fa-sun"),
FontIconModel(style="fas", icon_name="fa-superscript"),
FontIconModel(style="fas", icon_name="fa-surprise"),
FontIconModel(style="fas", icon_name="fa-swatchbook"),
FontIconModel(style="fas", icon_name="fa-swimmer"),
FontIconModel(style="fas", icon_name="fa-swimming-pool"),
FontIconModel(style="fas", icon_name="fa-synagogue"),
FontIconModel(style="fas", icon_name="fa-sync"),
FontIconModel(style="fas", icon_name="fa-sync-alt"),
FontIconModel(style="fas", icon_name="fa-syringe"),
FontIconModel(style="fas", icon_name="fa-table"),
FontIconModel(style="fas", icon_name="fa-table-tennis"),
FontIconModel(style="fas", icon_name="fa-tablet"),
FontIconModel(style="fas", icon_name="fa-tablet-alt"),
FontIconModel(style="fas", icon_name="fa-tablets"),
FontIconModel(style="fas", icon_name="fa-tachometer-alt"),
FontIconModel(style="fas", icon_name="fa-tag"),
FontIconModel(style="fas", icon_name="fa-tags"),
FontIconModel(style="fas", icon_name="fa-tape"),
FontIconModel(style="fas", icon_name="fa-tasks"),
FontIconModel(style="fas", icon_name="fa-taxi"),
FontIconModel(style="fas", icon_name="fa-teeth"),
FontIconModel(style="fas", icon_name="fa-teeth-open"),
FontIconModel(style="fas", icon_name="fa-temperature-high"),
FontIconModel(style="fas", icon_name="fa-temperature-low"),
FontIconModel(style="fas", icon_name="fa-tenge"),
FontIconModel(style="fas", icon_name="fa-terminal"),
FontIconModel(style="fas", icon_name="fa-text-height"),
FontIconModel(style="fas", icon_name="fa-text-width"),
FontIconModel(style="fas", icon_name="fa-th"),
FontIconModel(style="fas", icon_name="fa-th-large"),
FontIconModel(style="fas", icon_name="fa-th-list"),
FontIconModel(style="fas", icon_name="fa-theater-masks"),
FontIconModel(style="fas", icon_name="fa-thermometer"),
FontIconModel(style="fas", icon_name="fa-thermometer-empty"),
FontIconModel(style="fas", icon_name="fa-thermometer-full"),
FontIconModel(style="fas", icon_name="fa-thermometer-half"),
FontIconModel(style="fas", icon_name="fa-thermometer-quarter"),
FontIconModel(style="fas", icon_name="fa-thermometer-three-quarters"),
FontIconModel(style="fas", icon_name="fa-thumbs-down"),
FontIconModel(style="fas", icon_name="fa-thumbs-up"),
FontIconModel(style="fas", icon_name="fa-thumbtack"),
FontIconModel(style="fas", icon_name="fa-ticket-alt"),
FontIconModel(style="fas", icon_name="fa-times"),
FontIconModel(style="fas", icon_name="fa-times-circle"),
FontIconModel(style="fas", icon_name="fa-tint"),
FontIconModel(style="fas", icon_name="fa-tint-slash"),
FontIconModel(style="fas", icon_name="fa-tired"),
FontIconModel(style="fas", icon_name="fa-toggle-off"),
FontIconModel(style="fas", icon_name="fa-toggle-on"),
FontIconModel(style="fas", icon_name="fa-toilet"),
FontIconModel(style="fas", icon_name="fa-toilet-paper"),
FontIconModel(style="fas", icon_name="fa-toolbox"),
FontIconModel(style="fas", icon_name="fa-tools"),
FontIconModel(style="fas", icon_name="fa-tooth"),
FontIconModel(style="fas", icon_name="fa-torah"),
FontIconModel(style="fas", icon_name="fa-torii-gate"),
FontIconModel(style="fas", icon_name="fa-tractor"),
FontIconModel(style="fas", icon_name="fa-trademark"),
FontIconModel(style="fas", icon_name="fa-traffic-light"),
FontIconModel(style="fas", icon_name="fa-train"),
FontIconModel(style="fas", icon_name="fa-tram"),
FontIconModel(style="fas", icon_name="fa-transgender"),
FontIconModel(style="fas", icon_name="fa-transgender-alt"),
FontIconModel(style="fas", icon_name="fa-trash"),
FontIconModel(style="fas", icon_name="fa-trash-alt"),
FontIconModel(style="fas", icon_name="fa-trash-restore"),
FontIconModel(style="fas", icon_name="fa-trash-restore-alt"),
FontIconModel(style="fas", icon_name="fa-tree"),
FontIconModel(style="fas", icon_name="fa-trophy"),
FontIconModel(style="fas", icon_name="fa-truck"),
FontIconModel(style="fas", icon_name="fa-truck-loading"),
FontIconModel(style="fas", icon_name="fa-truck-monster"),
FontIconModel(style="fas", icon_name="fa-truck-moving"),
FontIconModel(style="fas", icon_name="fa-truck-pickup"),
FontIconModel(style="fas", icon_name="fa-tshirt"),
FontIconModel(style="fas", icon_name="fa-tty"),
FontIconModel(style="fas", icon_name="fa-tv"),
FontIconModel(style="fas", icon_name="fa-umbrella"),
FontIconModel(style="fas", icon_name="fa-umbrella-beach"),
FontIconModel(style="fas", icon_name="fa-underline"),
FontIconModel(style="fas", icon_name="fa-undo"),
FontIconModel(style="fas", icon_name="fa-undo-alt"),
FontIconModel(style="fas", icon_name="fa-universal-access"),
FontIconModel(style="fas", icon_name="fa-university"),
FontIconModel(style="fas", icon_name="fa-unlink"),
FontIconModel(style="fas", icon_name="fa-unlock"),
FontIconModel(style="fas", icon_name="fa-unlock-alt"),
FontIconModel(style="fas", icon_name="fa-upload"),
FontIconModel(style="fas", icon_name="fa-user"),
FontIconModel(style="fas", icon_name="fa-user-alt"),
FontIconModel(style="fas", icon_name="fa-user-alt-slash"),
FontIconModel(style="fas", icon_name="fa-user-astronaut"),
FontIconModel(style="fas", icon_name="fa-user-check"),
FontIconModel(style="fas", icon_name="fa-user-circle"),
FontIconModel(style="fas", icon_name="fa-user-clock"),
FontIconModel(style="fas", icon_name="fa-user-cog"),
FontIconModel(style="fas", icon_name="fa-user-edit"),
FontIconModel(style="fas", icon_name="fa-user-friends"),
FontIconModel(style="fas", icon_name="fa-user-graduate"),
FontIconModel(style="fas", icon_name="fa-user-injured"),
FontIconModel(style="fas", icon_name="fa-user-lock"),
FontIconModel(style="fas", icon_name="fa-user-md"),
FontIconModel(style="fas", icon_name="fa-user-minus"),
FontIconModel(style="fas", icon_name="fa-user-ninja"),
FontIconModel(style="fas", icon_name="fa-user-nurse"),
FontIconModel(style="fas", icon_name="fa-user-plus"),
FontIconModel(style="fas", icon_name="fa-user-secret"),
FontIconModel(style="fas", icon_name="fa-user-shield"),
FontIconModel(style="fas", icon_name="fa-user-slash"),
FontIconModel(style="fas", icon_name="fa-user-tag"),
FontIconModel(style="fas", icon_name="fa-user-tie"),
FontIconModel(style="fas", icon_name="fa-user-times"),
FontIconModel(style="fas", icon_name="fa-users"),
FontIconModel(style="fas", icon_name="fa-users-cog"),
FontIconModel(style="fas", icon_name="fa-utensil-spoon"),
FontIconModel(style="fas", icon_name="fa-utensils"),
FontIconModel(style="fas", icon_name="fa-vector-square"),
FontIconModel(style="fas", icon_name="fa-venus"),
FontIconModel(style="fas", icon_name="fa-venus-double"),
FontIconModel(style="fas", icon_name="fa-venus-mars"),
FontIconModel(style="fas", icon_name="fa-vial"),
FontIconModel(style="fas", icon_name="fa-vials"),
FontIconModel(style="fas", icon_name="fa-video"),
FontIconModel(style="fas", icon_name="fa-video-slash"),
FontIconModel(style="fas", icon_name="fa-vihara"),
FontIconModel(style="fas", icon_name="fa-volleyball-ball"),
FontIconModel(style="fas", icon_name="fa-volume-down"),
FontIconModel(style="fas", icon_name="fa-volume-mute"),
FontIconModel(style="fas", icon_name="fa-volume-off"),
FontIconModel(style="fas", icon_name="fa-volume-up"),
FontIconModel(style="fas", icon_name="fa-vote-yea"),
FontIconModel(style="fas", icon_name="fa-vr-cardboard"),
FontIconModel(style="fas", icon_name="fa-walking"),
FontIconModel(style="fas", icon_name="fa-wallet"),
FontIconModel(style="fas", icon_name="fa-warehouse"),
FontIconModel(style="fas", icon_name="fa-water"),
FontIconModel(style="fas", icon_name="fa-weight"),
FontIconModel(style="fas", icon_name="fa-weight-hanging"),
FontIconModel(style="fas", icon_name="fa-wheelchair"),
FontIconModel(style="fas", icon_name="fa-wifi"),
FontIconModel(style="fas", icon_name="fa-wind"),
FontIconModel(style="fas", icon_name="fa-window-close"),
FontIconModel(style="fas", icon_name="fa-window-maximize"),
FontIconModel(style="fas", icon_name="fa-window-minimize"),
FontIconModel(style="fas", icon_name="fa-window-restore"),
FontIconModel(style="fas", icon_name="fa-wine-bottle"),
FontIconModel(style="fas", icon_name="fa-wine-glass"),
FontIconModel(style="fas", icon_name="fa-wine-glass-alt"),
FontIconModel(style="fas", icon_name="fa-won-sign"),
FontIconModel(style="fas", icon_name="fa-wrench"),
FontIconModel(style="fas", icon_name="fa-x-ray"),
FontIconModel(style="fas", icon_name="fa-yen-sign"),
FontIconModel(style="fas", icon_name="fa-yin-yang"),
FontIconModel(style="fab", icon_name="fa-500px"),
FontIconModel(style="fab", icon_name="fa-accessible-icon"),
FontIconModel(style="fab", icon_name="fa-accusoft"),
FontIconModel(style="fab", icon_name="fa-acquisitions-incorporated"),
FontIconModel(style="fab", icon_name="fa-adn"),
FontIconModel(style="fab", icon_name="fa-adobe"),
FontIconModel(style="fab", icon_name="fa-adversal"),
FontIconModel(style="fab", icon_name="fa-affiliatetheme"),
FontIconModel(style="fab", icon_name="fa-algolia"),
FontIconModel(style="fab", icon_name="fa-alipay"),
FontIconModel(style="fab", icon_name="fa-amazon"),
FontIconModel(style="fab", icon_name="fa-amazon-pay"),
FontIconModel(style="fab", icon_name="fa-amilia"),
FontIconModel(style="fab", icon_name="fa-android"),
FontIconModel(style="fab", icon_name="fa-angellist"),
FontIconModel(style="fab", icon_name="fa-angrycreative"),
FontIconModel(style="fab", icon_name="fa-angular"),
FontIconModel(style="fab", icon_name="fa-app-store"),
FontIconModel(style="fab", icon_name="fa-app-store-ios"),
FontIconModel(style="fab", icon_name="fa-apper"),
FontIconModel(style="fab", icon_name="fa-apple"),
FontIconModel(style="fab", icon_name="fa-apple-pay"),
FontIconModel(style="fab", icon_name="fa-artstation"),
FontIconModel(style="fab", icon_name="fa-asymmetrik"),
FontIconModel(style="fab", icon_name="fa-atlassian"),
FontIconModel(style="fab", icon_name="fa-audible"),
FontIconModel(style="fab", icon_name="fa-autoprefixer"),
FontIconModel(style="fab", icon_name="fa-avianex"),
FontIconModel(style="fab", icon_name="fa-aviato"),
FontIconModel(style="fab", icon_name="fa-aws"),
FontIconModel(style="fab", icon_name="fa-bandcamp"),
FontIconModel(style="fab", icon_name="fa-behance"),
FontIconModel(style="fab", icon_name="fa-behance-square"),
FontIconModel(style="fab", icon_name="fa-bimobject"),
FontIconModel(style="fab", icon_name="fa-bitbucket"),
FontIconModel(style="fab", icon_name="fa-bitcoin"),
FontIconModel(style="fab", icon_name="fa-bity"),
FontIconModel(style="fab", icon_name="fa-black-tie"),
FontIconModel(style="fab", icon_name="fa-blackberry"),
FontIconModel(style="fab", icon_name="fa-blogger"),
FontIconModel(style="fab", icon_name="fa-blogger-b"),
FontIconModel(style="fab", icon_name="fa-bluetooth"),
FontIconModel(style="fab", icon_name="fa-bluetooth-b"),
FontIconModel(style="fab", icon_name="fa-btc"),
FontIconModel(style="fab", icon_name="fa-buromobelexperte"),
FontIconModel(style="fab", icon_name="fa-canadian-maple-leaf"),
FontIconModel(style="fab", icon_name="fa-cc-amazon-pay"),
FontIconModel(style="fab", icon_name="fa-cc-amex"),
FontIconModel(style="fab", icon_name="fa-cc-apple-pay"),
FontIconModel(style="fab", icon_name="fa-cc-diners-club"),
FontIconModel(style="fab", icon_name="fa-cc-discover"),
FontIconModel(style="fab", icon_name="fa-cc-jcb"),
FontIconModel(style="fab", icon_name="fa-cc-mastercard"),
FontIconModel(style="fab", icon_name="fa-cc-paypal"),
FontIconModel(style="fab", icon_name="fa-cc-stripe"),
FontIconModel(style="fab", icon_name="fa-cc-visa"),
FontIconModel(style="fab", icon_name="fa-centercode"),
FontIconModel(style="fab", icon_name="fa-centos"),
FontIconModel(style="fab", icon_name="fa-chrome"),
FontIconModel(style="fab", icon_name="fa-cloudscale"),
FontIconModel(style="fab", icon_name="fa-cloudsmith"),
FontIconModel(style="fab", icon_name="fa-cloudversify"),
FontIconModel(style="fab", icon_name="fa-codepen"),
FontIconModel(style="fab", icon_name="fa-codiepie"),
FontIconModel(style="fab", icon_name="fa-confluence"),
FontIconModel(style="fab", icon_name="fa-connectdevelop"),
FontIconModel(style="fab", icon_name="fa-contao"),
FontIconModel(style="fab", icon_name="fa-cpanel"),
FontIconModel(style="fab", icon_name="fa-creative-commons"),
FontIconModel(style="fab", icon_name="fa-creative-commons-by"),
FontIconModel(style="fab", icon_name="fa-creative-commons-nc"),
FontIconModel(style="fab", icon_name="fa-creative-commons-nc-eu"),
FontIconModel(style="fab", icon_name="fa-creative-commons-nc-jp"),
FontIconModel(style="fab", icon_name="fa-creative-commons-nd"),
FontIconModel(style="fab", icon_name="fa-creative-commons-pd"),
FontIconModel(style="fab", icon_name="fa-creative-commons-pd-alt"),
FontIconModel(style="fab", icon_name="fa-creative-commons-remix"),
FontIconModel(style="fab", icon_name="fa-creative-commons-sa"),
FontIconModel(style="fab", icon_name="fa-creative-commons-sampling"),
FontIconModel(style="fab", icon_name="fa-creative-commons-sampling-plus"),
FontIconModel(style="fab", icon_name="fa-creative-commons-share"),
FontIconModel(style="fab", icon_name="fa-creative-commons-zero"),
FontIconModel(style="fab", icon_name="fa-critical-role"),
FontIconModel(style="fab", icon_name="fa-css3"),
FontIconModel(style="fab", icon_name="fa-css3-alt"),
FontIconModel(style="fab", icon_name="fa-cuttlefish"),
FontIconModel(style="fab", icon_name="fa-d-and-d"),
FontIconModel(style="fab", icon_name="fa-d-and-d-beyond"),
FontIconModel(style="fab", icon_name="fa-dashcube"),
FontIconModel(style="fab", icon_name="fa-delicious"),
FontIconModel(style="fab", icon_name="fa-deploydog"),
FontIconModel(style="fab", icon_name="fa-deskpro"),
FontIconModel(style="fab", icon_name="fa-dev"),
FontIconModel(style="fab", icon_name="fa-deviantart"),
FontIconModel(style="fab", icon_name="fa-dhl"),
FontIconModel(style="fab", icon_name="fa-diaspora"),
FontIconModel(style="fab", icon_name="fa-digg"),
FontIconModel(style="fab", icon_name="fa-digital-ocean"),
FontIconModel(style="fab", icon_name="fa-discord"),
FontIconModel(style="fab", icon_name="fa-discourse"),
FontIconModel(style="fab", icon_name="fa-dochub"),
FontIconModel(style="fab", icon_name="fa-docker"),
FontIconModel(style="fab", icon_name="fa-draft2digital"),
FontIconModel(style="fab", icon_name="fa-dribbble"),
FontIconModel(style="fab", icon_name="fa-dribbble-square"),
FontIconModel(style="fab", icon_name="fa-dropbox"),
FontIconModel(style="fab", icon_name="fa-drupal"),
FontIconModel(style="fab", icon_name="fa-dyalog"),
FontIconModel(style="fab", icon_name="fa-earlybirds"),
FontIconModel(style="fab", icon_name="fa-ebay"),
FontIconModel(style="fab", icon_name="fa-edge"),
FontIconModel(style="fab", icon_name="fa-elementor"),
FontIconModel(style="fab", icon_name="fa-ello"),
FontIconModel(style="fab", icon_name="fa-ember"),
FontIconModel(style="fab", icon_name="fa-empire"),
FontIconModel(style="fab", icon_name="fa-envira"),
FontIconModel(style="fab", icon_name="fa-erlang"),
FontIconModel(style="fab", icon_name="fa-ethereum"),
FontIconModel(style="fab", icon_name="fa-etsy"),
FontIconModel(style="fab", icon_name="fa-expeditedssl"),
FontIconModel(style="fab", icon_name="fa-facebook"),
FontIconModel(style="fab", icon_name="fa-facebook-f"),
FontIconModel(style="fab", icon_name="fa-facebook-messenger"),
FontIconModel(style="fab", icon_name="fa-facebook-square"),
FontIconModel(style="fab", icon_name="fa-fantasy-flight-games"),
FontIconModel(style="fab", icon_name="fa-fedex"),
FontIconModel(style="fab", icon_name="fa-fedora"),
FontIconModel(style="fab", icon_name="fa-figma"),
FontIconModel(style="fab", icon_name="fa-firefox"),
FontIconModel(style="fab", icon_name="fa-first-order"),
FontIconModel(style="fab", icon_name="fa-first-order-alt"),
FontIconModel(style="fab", icon_name="fa-firstdraft"),
FontIconModel(style="fab", icon_name="fa-flickr"),
FontIconModel(style="fab", icon_name="fa-flipboard"),
FontIconModel(style="fab", icon_name="fa-fly"),
FontIconModel(style="fab", icon_name="fa-font-awesome"),
FontIconModel(style="fab", icon_name="fa-font-awesome-alt"),
FontIconModel(style="fab", icon_name="fa-font-awesome-flag"),
FontIconModel(style="fab", icon_name="fa-fonticons"),
FontIconModel(style="fab", icon_name="fa-fonticons-fi"),
FontIconModel(style="fab", icon_name="fa-fort-awesome"),
FontIconModel(style="fab", icon_name="fa-fort-awesome-alt"),
FontIconModel(style="fab", icon_name="fa-forumbee"),
FontIconModel(style="fab", icon_name="fa-foursquare"),
FontIconModel(style="fab", icon_name="fa-free-code-camp"),
FontIconModel(style="fab", icon_name="fa-freebsd"),
FontIconModel(style="fab", icon_name="fa-fulcrum"),
FontIconModel(style="fab", icon_name="fa-galactic-republic"),
FontIconModel(style="fab", icon_name="fa-galactic-senate"),
FontIconModel(style="fab", icon_name="fa-get-pocket"),
FontIconModel(style="fab", icon_name="fa-gg"),
FontIconModel(style="fab", icon_name="fa-gg-circle"),
FontIconModel(style="fab", icon_name="fa-git"),
FontIconModel(style="fab", icon_name="fa-git-square"),
FontIconModel(style="fab", icon_name="fa-github"),
FontIconModel(style="fab", icon_name="fa-github-alt"),
FontIconModel(style="fab", icon_name="fa-github-square"),
FontIconModel(style="fab", icon_name="fa-gitkraken"),
FontIconModel(style="fab", icon_name="fa-gitlab"),
FontIconModel(style="fab", icon_name="fa-gitter"),
FontIconModel(style="fab", icon_name="fa-glide"),
FontIconModel(style="fab", icon_name="fa-glide-g"),
FontIconModel(style="fab", icon_name="fa-gofore"),
FontIconModel(style="fab", icon_name="fa-goodreads"),
FontIconModel(style="fab", icon_name="fa-goodreads-g"),
FontIconModel(style="fab", icon_name="fa-google"),
FontIconModel(style="fab", icon_name="fa-google-drive"),
FontIconModel(style="fab", icon_name="fa-google-play"),
FontIconModel(style="fab", icon_name="fa-google-plus"),
FontIconModel(style="fab", icon_name="fa-google-plus-g"),
FontIconModel(style="fab", icon_name="fa-google-plus-square"),
FontIconModel(style="fab", icon_name="fa-google-wallet"),
FontIconModel(style="fab", icon_name="fa-gratipay"),
FontIconModel(style="fab", icon_name="fa-grav"),
FontIconModel(style="fab", icon_name="fa-gripfire"),
FontIconModel(style="fab", icon_name="fa-grunt"),
FontIconModel(style="fab", icon_name="fa-gulp"),
FontIconModel(style="fab", icon_name="fa-hacker-news"),
FontIconModel(style="fab", icon_name="fa-hacker-news-square"),
FontIconModel(style="fab", icon_name="fa-hackerrank"),
FontIconModel(style="fab", icon_name="fa-hips"),
FontIconModel(style="fab", icon_name="fa-hire-a-helper"),
FontIconModel(style="fab", icon_name="fa-hooli"),
FontIconModel(style="fab", icon_name="fa-hornbill"),
FontIconModel(style="fab", icon_name="fa-hotjar"),
FontIconModel(style="fab", icon_name="fa-houzz"),
FontIconModel(style="fab", icon_name="fa-html5"),
FontIconModel(style="fab", icon_name="fa-hubspot"),
FontIconModel(style="fab", icon_name="fa-imdb"),
FontIconModel(style="fab", icon_name="fa-instagram"),
FontIconModel(style="fab", icon_name="fa-intercom"),
FontIconModel(style="fab", icon_name="fa-internet-explorer"),
FontIconModel(style="fab", icon_name="fa-invision"),
FontIconModel(style="fab", icon_name="fa-ioxhost"),
FontIconModel(style="fab", icon_name="fa-itunes"),
FontIconModel(style="fab", icon_name="fa-itunes-note"),
FontIconModel(style="fab", icon_name="fa-java"),
FontIconModel(style="fab", icon_name="fa-jedi-order"),
FontIconModel(style="fab", icon_name="fa-jenkins"),
FontIconModel(style="fab", icon_name="fa-jira"),
FontIconModel(style="fab", icon_name="fa-joget"),
FontIconModel(style="fab", icon_name="fa-joomla"),
FontIconModel(style="fab", icon_name="fa-js"),
FontIconModel(style="fab", icon_name="fa-js-square"),
FontIconModel(style="fab", icon_name="fa-jsfiddle"),
FontIconModel(style="fab", icon_name="fa-kaggle"),
FontIconModel(style="fab", icon_name="fa-keybase"),
FontIconModel(style="fab", icon_name="fa-keycdn"),
FontIconModel(style="fab", icon_name="fa-kickstarter"),
FontIconModel(style="fab", icon_name="fa-kickstarter-k"),
FontIconModel(style="fab", icon_name="fa-korvue"),
FontIconModel(style="fab", icon_name="fa-laravel"),
FontIconModel(style="fab", icon_name="fa-lastfm"),
FontIconModel(style="fab", icon_name="fa-lastfm-square"),
FontIconModel(style="fab", icon_name="fa-leanpub"),
FontIconModel(style="fab", icon_name="fa-less"),
FontIconModel(style="fab", icon_name="fa-line"),
FontIconModel(style="fab", icon_name="fa-linkedin"),
FontIconModel(style="fab", icon_name="fa-linkedin-in"),
FontIconModel(style="fab", icon_name="fa-linode"),
FontIconModel(style="fab", icon_name="fa-linux"),
FontIconModel(style="fab", icon_name="fa-lyft"),
FontIconModel(style="fab", icon_name="fa-magento"),
FontIconModel(style="fab", icon_name="fa-mailchimp"),
FontIconModel(style="fab", icon_name="fa-mandalorian"),
FontIconModel(style="fab", icon_name="fa-markdown"),
FontIconModel(style="fab", icon_name="fa-mastodon"),
FontIconModel(style="fab", icon_name="fa-maxcdn"),
FontIconModel(style="fab", icon_name="fa-medapps"),
FontIconModel(style="fab", icon_name="fa-medium"),
FontIconModel(style="fab", icon_name="fa-medium-m"),
FontIconModel(style="fab", icon_name="fa-medrt"),
FontIconModel(style="fab", icon_name="fa-meetup"),
FontIconModel(style="fab", icon_name="fa-megaport"),
FontIconModel(style="fab", icon_name="fa-mendeley"),
FontIconModel(style="fab", icon_name="fa-microsoft"),
FontIconModel(style="fab", icon_name="fa-mix"),
FontIconModel(style="fab", icon_name="fa-mixcloud"),
FontIconModel(style="fab", icon_name="fa-mizuni"),
FontIconModel(style="fab", icon_name="fa-modx"),
FontIconModel(style="fab", icon_name="fa-monero"),
FontIconModel(style="fab", icon_name="fa-napster"),
FontIconModel(style="fab", icon_name="fa-neos"),
FontIconModel(style="fab", icon_name="fa-nimblr"),
FontIconModel(style="fab", icon_name="fa-nintendo-switch"),
FontIconModel(style="fab", icon_name="fa-node"),
FontIconModel(style="fab", icon_name="fa-node-js"),
FontIconModel(style="fab", icon_name="fa-npm"),
FontIconModel(style="fab", icon_name="fa-ns8"),
FontIconModel(style="fab", icon_name="fa-nutritionix"),
FontIconModel(style="fab", icon_name="fa-odnoklassniki"),
FontIconModel(style="fab", icon_name="fa-odnoklassniki-square"),
FontIconModel(style="fab", icon_name="fa-old-republic"),
FontIconModel(style="fab", icon_name="fa-opencart"),
FontIconModel(style="fab", icon_name="fa-openid"),
FontIconModel(style="fab", icon_name="fa-opera"),
FontIconModel(style="fab", icon_name="fa-optin-monster"),
FontIconModel(style="fab", icon_name="fa-osi"),
FontIconModel(style="fab", icon_name="fa-page4"),
FontIconModel(style="fab", icon_name="fa-pagelines"),
FontIconModel(style="fab", icon_name="fa-palfed"),
FontIconModel(style="fab", icon_name="fa-patreon"),
FontIconModel(style="fab", icon_name="fa-paypal"),
FontIconModel(style="fab", icon_name="fa-penny-arcade"),
FontIconModel(style="fab", icon_name="fa-periscope"),
FontIconModel(style="fab", icon_name="fa-phabricator"),
FontIconModel(style="fab", icon_name="fa-phoenix-framework"),
FontIconModel(style="fab", icon_name="fa-phoenix-squadron"),
FontIconModel(style="fab", icon_name="fa-php"),
FontIconModel(style="fab", icon_name="fa-pied-piper"),
FontIconModel(style="fab", icon_name="fa-pied-piper-alt"),
FontIconModel(style="fab", icon_name="fa-pied-piper-hat"),
FontIconModel(style="fab", icon_name="fa-pied-piper-pp"),
FontIconModel(style="fab", icon_name="fa-pinterest"),
FontIconModel(style="fab", icon_name="fa-pinterest-p"),
FontIconModel(style="fab", icon_name="fa-pinterest-square"),
FontIconModel(style="fab", icon_name="fa-playstation"),
FontIconModel(style="fab", icon_name="fa-product-hunt"),
FontIconModel(style="fab", icon_name="fa-pushed"),
FontIconModel(style="fab", icon_name="fa-python"),
FontIconModel(style="fab", icon_name="fa-qq"),
FontIconModel(style="fab", icon_name="fa-quinscape"),
FontIconModel(style="fab", icon_name="fa-quora"),
FontIconModel(style="fab", icon_name="fa-r-project"),
FontIconModel(style="fab", icon_name="fa-raspberry-pi"),
FontIconModel(style="fab", icon_name="fa-ravelry"),
FontIconModel(style="fab", icon_name="fa-react"),
FontIconModel(style="fab", icon_name="fa-reacteurope"),
FontIconModel(style="fab", icon_name="fa-readme"),
FontIconModel(style="fab", icon_name="fa-rebel"),
FontIconModel(style="fab", icon_name="fa-red-river"),
FontIconModel(style="fab", icon_name="fa-reddit"),
FontIconModel(style="fab", icon_name="fa-reddit-alien"),
FontIconModel(style="fab", icon_name="fa-reddit-square"),
FontIconModel(style="fab", icon_name="fa-redhat"),
FontIconModel(style="fab", icon_name="fa-renren"),
FontIconModel(style="fab", icon_name="fa-replyd"),
FontIconModel(style="fab", icon_name="fa-researchgate"),
FontIconModel(style="fab", icon_name="fa-resolving"),
FontIconModel(style="fab", icon_name="fa-rev"),
FontIconModel(style="fab", icon_name="fa-rocketchat"),
FontIconModel(style="fab", icon_name="fa-rockrms"),
FontIconModel(style="fab", icon_name="fa-safari"),
FontIconModel(style="fab", icon_name="fa-sass"),
FontIconModel(style="fab", icon_name="fa-schlix"),
FontIconModel(style="fab", icon_name="fa-scribd"),
FontIconModel(style="fab", icon_name="fa-searchengin"),
FontIconModel(style="fab", icon_name="fa-sellcast"),
FontIconModel(style="fab", icon_name="fa-sellsy"),
FontIconModel(style="fab", icon_name="fa-servicestack"),
FontIconModel(style="fab", icon_name="fa-shirtsinbulk"),
FontIconModel(style="fab", icon_name="fa-shopware"),
FontIconModel(style="fab", icon_name="fa-simplybuilt"),
FontIconModel(style="fab", icon_name="fa-sistrix"),
FontIconModel(style="fab", icon_name="fa-sith"),
FontIconModel(style="fab", icon_name="fa-sketch"),
FontIconModel(style="fab", icon_name="fa-skyatlas"),
FontIconModel(style="fab", icon_name="fa-skype"),
FontIconModel(style="fab", icon_name="fa-slack"),
FontIconModel(style="fab", icon_name="fa-slack-hash"),
FontIconModel(style="fab", icon_name="fa-slideshare"),
FontIconModel(style="fab", icon_name="fa-snapchat"),
FontIconModel(style="fab", icon_name="fa-snapchat-ghost"),
FontIconModel(style="fab", icon_name="fa-snapchat-square"),
FontIconModel(style="fab", icon_name="fa-soundcloud"),
FontIconModel(style="fab", icon_name="fa-sourcetree"),
FontIconModel(style="fab", icon_name="fa-speakap"),
FontIconModel(style="fab", icon_name="fa-spotify"),
FontIconModel(style="fab", icon_name="fa-squarespace"),
FontIconModel(style="fab", icon_name="fa-stack-exchange"),
FontIconModel(style="fab", icon_name="fa-stack-overflow"),
FontIconModel(style="fab", icon_name="fa-staylinked"),
FontIconModel(style="fab", icon_name="fa-steam"),
FontIconModel(style="fab", icon_name="fa-steam-square"),
FontIconModel(style="fab", icon_name="fa-steam-symbol"),
FontIconModel(style="fab", icon_name="fa-sticker-mule"),
FontIconModel(style="fab", icon_name="fa-strava"),
FontIconModel(style="fab", icon_name="fa-stripe"),
FontIconModel(style="fab", icon_name="fa-stripe-s"),
FontIconModel(style="fab", icon_name="fa-studiovinari"),
FontIconModel(style="fab", icon_name="fa-stumbleupon"),
FontIconModel(style="fab", icon_name="fa-stumbleupon-circle"),
FontIconModel(style="fab", icon_name="fa-superpowers"),
FontIconModel(style="fab", icon_name="fa-supple"),
FontIconModel(style="fab", icon_name="fa-suse"),
FontIconModel(style="fab", icon_name="fa-teamspeak"),
FontIconModel(style="fab", icon_name="fa-telegram"),
FontIconModel(style="fab", icon_name="fa-telegram-plane"),
FontIconModel(style="fab", icon_name="fa-tencent-weibo"),
FontIconModel(style="fab", icon_name="fa-the-red-yeti"),
FontIconModel(style="fab", icon_name="fa-themeco"),
FontIconModel(style="fab", icon_name="fa-themeisle"),
FontIconModel(style="fab", icon_name="fa-think-peaks"),
FontIconModel(style="fab", icon_name="fa-trade-federation"),
FontIconModel(style="fab", icon_name="fa-trello"),
FontIconModel(style="fab", icon_name="fa-tripadvisor"),
FontIconModel(style="fab", icon_name="fa-tumblr"),
FontIconModel(style="fab", icon_name="fa-tumblr-square"),
FontIconModel(style="fab", icon_name="fa-twitch"),
FontIconModel(style="fab", icon_name="fa-twitter"),
FontIconModel(style="fab", icon_name="fa-twitter-square"),
FontIconModel(style="fab", icon_name="fa-typo3"),
FontIconModel(style="fab", icon_name="fa-uber"),
FontIconModel(style="fab", icon_name="fa-ubuntu"),
FontIconModel(style="fab", icon_name="fa-uikit"),
FontIconModel(style="fab", icon_name="fa-uniregistry"),
FontIconModel(style="fab", icon_name="fa-untappd"),
FontIconModel(style="fab", icon_name="fa-ups"),
FontIconModel(style="fab", icon_name="fa-usb"),
FontIconModel(style="fab", icon_name="fa-usps"),
FontIconModel(style="fab", icon_name="fa-ussunnah"),
FontIconModel(style="fab", icon_name="fa-vaadin"),
FontIconModel(style="fab", icon_name="fa-viacoin"),
FontIconModel(style="fab", icon_name="fa-viadeo"),
FontIconModel(style="fab", icon_name="fa-viadeo-square"),
FontIconModel(style="fab", icon_name="fa-viber"),
FontIconModel(style="fab", icon_name="fa-vimeo"),
FontIconModel(style="fab", icon_name="fa-vimeo-square"),
FontIconModel(style="fab", icon_name="fa-vimeo-v"),
FontIconModel(style="fab", icon_name="fa-vine"),
FontIconModel(style="fab", icon_name="fa-vk"),
FontIconModel(style="fab", icon_name="fa-vnv"),
FontIconModel(style="fab", icon_name="fa-vuejs"),
FontIconModel(style="fab", icon_name="fa-weebly"),
FontIconModel(style="fab", icon_name="fa-weibo"),
FontIconModel(style="fab", icon_name="fa-weixin"),
FontIconModel(style="fab", icon_name="fa-whatsapp"),
FontIconModel(style="fab", icon_name="fa-whatsapp-square"),
FontIconModel(style="fab", icon_name="fa-whmcs"),
FontIconModel(style="fab", icon_name="fa-wikipedia-w"),
FontIconModel(style="fab", icon_name="fa-windows"),
FontIconModel(style="fab", icon_name="fa-wix"),
FontIconModel(style="fab", icon_name="fa-wizards-of-the-coast"),
FontIconModel(style="fab", icon_name="fa-wolf-pack-battalion"),
FontIconModel(style="fab", icon_name="fa-wordpress"),
FontIconModel(style="fab", icon_name="fa-wordpress-simple"),
FontIconModel(style="fab", icon_name="fa-wpbeginner"),
FontIconModel(style="fab", icon_name="fa-wpexplorer"),
FontIconModel(style="fab", icon_name="fa-wpforms"),
FontIconModel(style="fab", icon_name="fa-wpressr"),
FontIconModel(style="fab", icon_name="fa-xbox"),
FontIconModel(style="fab", icon_name="fa-xing"),
FontIconModel(style="fab", icon_name="fa-xing-square"),
FontIconModel(style="fab", icon_name="fa-y-combinator"),
FontIconModel(style="fab", icon_name="fa-yahoo"),
FontIconModel(style="fab", icon_name="fa-yandex"),
FontIconModel(style="fab", icon_name="fa-yandex-international"),
FontIconModel(style="fab", icon_name="fa-yarn"),
FontIconModel(style="fab", icon_name="fa-yelp"),
FontIconModel(style="fab", icon_name="fa-yoast"),
FontIconModel(style="fab", icon_name="fa-youtube"),
FontIconModel(style="fab", icon_name="fa-youtube-square"),
FontIconModel(style="fab", icon_name="fa-zhihu"),
FontIconModel(style="far", icon_name="fa-address-book"),
FontIconModel(style="far", icon_name="fa-address-card"),
FontIconModel(style="far", icon_name="fa-angry"),
FontIconModel(style="far", icon_name="fa-arrow-alt-circle-down"),
FontIconModel(style="far", icon_name="fa-arrow-alt-circle-left"),
FontIconModel(style="far", icon_name="fa-arrow-alt-circle-right"),
FontIconModel(style="far", icon_name="fa-arrow-alt-circle-up"),
FontIconModel(style="far", icon_name="fa-bell"),
FontIconModel(style="far", icon_name="fa-bell-slash"),
FontIconModel(style="far", icon_name="fa-bookmark"),
FontIconModel(style="far", icon_name="fa-building"),
FontIconModel(style="far", icon_name="fa-calendar"),
FontIconModel(style="far", icon_name="fa-calendar-alt"),
FontIconModel(style="far", icon_name="fa-calendar-check"),
FontIconModel(style="far", icon_name="fa-calendar-minus"),
FontIconModel(style="far", icon_name="fa-calendar-plus"),
FontIconModel(style="far", icon_name="fa-calendar-times"),
FontIconModel(style="far", icon_name="fa-caret-square-down"),
FontIconModel(style="far", icon_name="fa-caret-square-left"),
FontIconModel(style="far", icon_name="fa-caret-square-right"),
FontIconModel(style="far", icon_name="fa-caret-square-up"),
FontIconModel(style="far", icon_name="fa-chart-bar"),
FontIconModel(style="far", icon_name="fa-check-circle"),
FontIconModel(style="far", icon_name="fa-check-square"),
FontIconModel(style="far", icon_name="fa-circle"),
FontIconModel(style="far", icon_name="fa-clipboard"),
FontIconModel(style="far", icon_name="fa-clock"),
FontIconModel(style="far", icon_name="fa-clone"),
FontIconModel(style="far", icon_name="fa-closed-captioning"),
FontIconModel(style="far", icon_name="fa-comment"),
FontIconModel(style="far", icon_name="fa-comment-alt"),
FontIconModel(style="far", icon_name="fa-comment-dots"),
FontIconModel(style="far", icon_name="fa-comments"),
FontIconModel(style="far", icon_name="fa-compass"),
FontIconModel(style="far", icon_name="fa-copy"),
FontIconModel(style="far", icon_name="fa-copyright"),
FontIconModel(style="far", icon_name="fa-credit-card"),
FontIconModel(style="far", icon_name="fa-dizzy"),
FontIconModel(style="far", icon_name="fa-dot-circle"),
FontIconModel(style="far", icon_name="fa-edit"),
FontIconModel(style="far", icon_name="fa-envelope"),
FontIconModel(style="far", icon_name="fa-envelope-open"),
FontIconModel(style="far", icon_name="fa-eye"),
FontIconModel(style="far", icon_name="fa-eye-slash"),
FontIconModel(style="far", icon_name="fa-file"),
FontIconModel(style="far", icon_name="fa-file-alt"),
FontIconModel(style="far", icon_name="fa-file-archive"),
FontIconModel(style="far", icon_name="fa-file-audio"),
FontIconModel(style="far", icon_name="fa-file-code"),
FontIconModel(style="far", icon_name="fa-file-excel"),
FontIconModel(style="far", icon_name="fa-file-image"),
FontIconModel(style="far", icon_name="fa-file-pdf"),
FontIconModel(style="far", icon_name="fa-file-powerpoint"),
FontIconModel(style="far", icon_name="fa-file-video"),
FontIconModel(style="far", icon_name="fa-file-word"),
FontIconModel(style="far", icon_name="fa-flag"),
FontIconModel(style="far", icon_name="fa-flushed"),
FontIconModel(style="far", icon_name="fa-folder"),
FontIconModel(style="far", icon_name="fa-folder-open"),
FontIconModel(style="far", icon_name="fa-frown"),
FontIconModel(style="far", icon_name="fa-frown-open"),
FontIconModel(style="far", icon_name="fa-futbol"),
FontIconModel(style="far", icon_name="fa-gem"),
FontIconModel(style="far", icon_name="fa-grimace"),
FontIconModel(style="far", icon_name="fa-grin"),
FontIconModel(style="far", icon_name="fa-grin-alt"),
FontIconModel(style="far", icon_name="fa-grin-beam"),
FontIconModel(style="far", icon_name="fa-grin-beam-sweat"),
FontIconModel(style="far", icon_name="fa-grin-hearts"),
FontIconModel(style="far", icon_name="fa-grin-squint"),
FontIconModel(style="far", icon_name="fa-grin-squint-tears"),
FontIconModel(style="far", icon_name="fa-grin-stars"),
FontIconModel(style="far", icon_name="fa-grin-tears"),
FontIconModel(style="far", icon_name="fa-grin-tongue"),
FontIconModel(style="far", icon_name="fa-grin-tongue-squint"),
FontIconModel(style="far", icon_name="fa-grin-tongue-wink"),
FontIconModel(style="far", icon_name="fa-grin-wink"),
FontIconModel(style="far", icon_name="fa-hand-lizard"),
FontIconModel(style="far", icon_name="fa-hand-paper"),
FontIconModel(style="far", icon_name="fa-hand-peace"),
FontIconModel(style="far", icon_name="fa-hand-point-down"),
FontIconModel(style="far", icon_name="fa-hand-point-left"),
FontIconModel(style="far", icon_name="fa-hand-point-right"),
FontIconModel(style="far", icon_name="fa-hand-point-up"),
FontIconModel(style="far", icon_name="fa-hand-pointer"),
FontIconModel(style="far", icon_name="fa-hand-rock"),
FontIconModel(style="far", icon_name="fa-hand-scissors"),
FontIconModel(style="far", icon_name="fa-hand-spock"),
FontIconModel(style="far", icon_name="fa-handshake"),
FontIconModel(style="far", icon_name="fa-hdd"),
FontIconModel(style="far", icon_name="fa-heart"),
FontIconModel(style="far", icon_name="fa-hospital"),
FontIconModel(style="far", icon_name="fa-hourglass"),
FontIconModel(style="far", icon_name="fa-id-badge"),
FontIconModel(style="far", icon_name="fa-id-card"),
FontIconModel(style="far", icon_name="fa-image"),
FontIconModel(style="far", icon_name="fa-images"),
FontIconModel(style="far", icon_name="fa-keyboard"),
FontIconModel(style="far", icon_name="fa-kiss"),
FontIconModel(style="far", icon_name="fa-kiss-beam"),
FontIconModel(style="far", icon_name="fa-kiss-wink-heart"),
FontIconModel(style="far", icon_name="fa-laugh"),
FontIconModel(style="far", icon_name="fa-laugh-beam"),
FontIconModel(style="far", icon_name="fa-laugh-squint"),
FontIconModel(style="far", icon_name="fa-laugh-wink"),
FontIconModel(style="far", icon_name="fa-lemon"),
FontIconModel(style="far", icon_name="fa-life-ring"),
FontIconModel(style="far", icon_name="fa-lightbulb"),
FontIconModel(style="far", icon_name="fa-list-alt"),
FontIconModel(style="far", icon_name="fa-map"),
FontIconModel(style="far", icon_name="fa-meh"),
FontIconModel(style="far", icon_name="fa-meh-blank"),
FontIconModel(style="far", icon_name="fa-meh-rolling-eyes"),
FontIconModel(style="far", icon_name="fa-minus-square"),
FontIconModel(style="far", icon_name="fa-money-bill-alt"),
FontIconModel(style="far", icon_name="fa-moon"),
FontIconModel(style="far", icon_name="fa-newspaper"),
FontIconModel(style="far", icon_name="fa-object-group"),
FontIconModel(style="far", icon_name="fa-object-ungroup"),
FontIconModel(style="far", icon_name="fa-paper-plane"),
FontIconModel(style="far", icon_name="fa-pause-circle"),
FontIconModel(style="far", icon_name="fa-play-circle"),
FontIconModel(style="far", icon_name="fa-plus-square"),
FontIconModel(style="far", icon_name="fa-question-circle"),
FontIconModel(style="far", icon_name="fa-registered"),
FontIconModel(style="far", icon_name="fa-sad-cry"),
FontIconModel(style="far", icon_name="fa-sad-tear"),
FontIconModel(style="far", icon_name="fa-save"),
FontIconModel(style="far", icon_name="fa-share-square"),
FontIconModel(style="far", icon_name="fa-smile"),
FontIconModel(style="far", icon_name="fa-smile-beam"),
FontIconModel(style="far", icon_name="fa-smile-wink"),
FontIconModel(style="far", icon_name="fa-snowflake"),
FontIconModel(style="far", icon_name="fa-square"),
FontIconModel(style="far", icon_name="fa-star"),
FontIconModel(style="far", icon_name="fa-star-half"),
FontIconModel(style="far", icon_name="fa-sticky-note"),
FontIconModel(style="far", icon_name="fa-stop-circle"),
FontIconModel(style="far", icon_name="fa-sun"),
FontIconModel(style="far", icon_name="fa-surprise"),
FontIconModel(style="far", icon_name="fa-thumbs-down"),
FontIconModel(style="far", icon_name="fa-thumbs-up"),
FontIconModel(style="far", icon_name="fa-times-circle"),
FontIconModel(style="far", icon_name="fa-tired"),
FontIconModel(style="far", icon_name="fa-trash-alt"),
FontIconModel(style="far", icon_name="fa-user"),
FontIconModel(style="far", icon_name="fa-user-circle"),
FontIconModel(style="far", icon_name="fa-window-close"),
FontIconModel(style="far", icon_name="fa-window-maximize"),
FontIconModel(style="far", icon_name="fa-window-minimize"),
FontIconModel(style="far", icon_name="fa-window-restore"),
])
``` |
{
"source": "JostCrow/django-maintenance-window",
"score": 2
} |
#### File: django-maintenance-window/django_maintenance_window/middleware.py
```python
import django
from django.urls import reverse
from django.shortcuts import render
from django.utils.cache import add_never_cache_headers
from . import settings
from .models import MaintenanceMode
class MaintenanceModeMiddleware:
"""This middleware will check if the maintenance page should be shown."""
def __init__(self, get_response):
self.get_response = get_response
# One-time configuration and initialization.
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
maintenance_response = self.check_maintenance(request)
if maintenance_response:
return maintenance_response
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
return response
def check_maintenance(self, request):
"""
This function will check if maintenance is activated.
If so return the maintenance page.
Otherwise continue the request.
"""
config = MaintenanceMode.get_solo()
admin_root_url = reverse('admin:index')
skip_maintenance = False
if request.path.startswith(admin_root_url) and settings.MAINTENANCE_EXCLUDE_ADMIN_URLS:
skip_maintenance = True
if request.user and request.user.is_superuser and settings.MAINTENANCE_EXCLUDE_SUPER_USER:
skip_maintenance = True
if request.user and request.user.is_staff and settings.MAINTENANCE_EXCLUDE_STAFF_USER:
skip_maintenance = True
if config.maintenance and not skip_maintenance:
kwargs = {
'end_date': config.maintenance_until,
'display_end_date': settings.MAINTENANCE_DISPLAY_END_DATE
}
response = render(
request, settings.MAINTENANCE_TEMPLATE,
content_type='text/html', status=503, context=kwargs)
add_never_cache_headers(response)
return response
return None
```
#### File: django-maintenance-window/tests/test_models.py
```python
from django.test import TestCase
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django_maintenance_window import models
class TestDjango_maintenance_window(TestCase):
def setUp(self):
pass
def test_str(self):
maintenance = models.MaintenanceMode()
self.assertEqual(str(maintenance), _("Maintenance Mode"))
def test_from_can_not_be_saved_without_until(self):
maintenance = models.MaintenanceMode(maintenance_from=timezone.now())
with self.assertRaises(ValidationError) as exc_context:
maintenance.save()
# assert False, exc_context.exception.messages
self.assertEqual(
exc_context.exception.messages,
[_('You can not set "maintenance_from" without setting "maintenance_until"')] # noqa
)
def test_can_be_saved_empty(self):
maintenance = models.MaintenanceMode()
maintenance.save()
def test_can_be_saved_filled(self):
maintenance = models.MaintenanceMode(
maintenance=True,
maintenance_from=timezone.now(),
maintenance_until=timezone.now()
)
maintenance.save()
def tearDown(self):
pass
``` |
{
"source": "JostCrow/django-reviews",
"score": 2
} |
#### File: django-reviews/reviews/utils.py
```python
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.db.models import Count, Avg
# review imports
from reviews.models import Review
def get_best_rated():
"""Returns the best rated instance for all models.
"""
result = Review.objects \
.filter(active=True) \
.values('content_type_id', 'content_id') \
.annotate(Avg('score')) \
.order_by('-score__avg') \
.first()
try:
ctype = ContentType.objects.get_for_id(result['content_type_id'])
content = ctype.model_class().objects.get(pk=result['content_id'])
return content, result['score__avg']
except (TypeError, ObjectDoesNotExist):
return None
def get_best_rated_for_model(instance):
"""Returns the best rated instance for given model or instance of a model.
"""
ctype = ContentType.objects.get_for_model(instance)
result = Review.objects \
.filter(content_type=ctype.id, active=True) \
.values('content_id') \
.annotate(Avg('score')) \
.order_by('-score__avg') \
.first()
try:
content = ctype.model_class().objects.get(pk=result['content_id'])
return content, result['score__avg']
except (TypeError, ObjectDoesNotExist):
return None
def get_reviews_for_instance(instance):
"""Returns active reviews for given instance.
"""
ctype = ContentType.objects.get_for_model(instance)
return Review.objects.active().filter(content_type=ctype.id, content_id=instance.id)
def get_average_for_instance(instance):
"""Returns the average score and the amount of reviews for the given
instance. Takes only active reviews into account.
Returns (average, amount)
"""
content_type = ContentType.objects.get_for_model(instance)
query = Review.objects.filter(content_type=content_type.id, content_id=instance.id, active=True).aggregate(
Avg('score'), Count('id'))
return query.get('score__avg'), query.get('id__count')
def has_rated(request, instance):
"""Returns True if the current user has already rated for the given
instance.
"""
ctype = ContentType.objects.get_for_model(instance)
try:
if request.user.is_authenticated:
Review.objects.get(
content_type=ctype.id,
content_id=instance.id,
user=request.user
)
else:
Review.objects.get(
content_type=ctype.id,
content_id=instance.id,
session_id=request.session.session_key
)
except ObjectDoesNotExist:
return False
else:
return True
``` |
{
"source": "josteinbf/rrcforest",
"score": 3
} |
#### File: rrcforest/rrcforest/forest.py
```python
from typing import Sequence, Union, Iterable
from rrcf import rrcf
import numpy as np
class RCForest:
_forest: Sequence[rrcf.RCTree]
_tree_size: int
_point_index: int
def __init__(
self, n_trees: int = 100, tree_size: int = 256,
random_state: Union[None, int, np.random.RandomState] = None):
if random_state is None:
rng = np.random.RandomState()
elif isinstance(random_state, int):
rng = np.random.RandomState(random_state)
else:
rng = random_state
max_seed = np.iinfo(np.int32).max
self._forest = [
rrcf.RCTree(random_state=rng.randint(max_seed))
for _ in range(n_trees)
]
self._tree_size = tree_size
self._point_index = 0
@property
def tree_size(self) -> int:
return self._tree_size
@property
def n_trees(self) -> int:
return len(self._forest)
def insert_batch(self, points: Iterable[np.ndarray]):
return np.asarray(
[self.insert(point) for point in points])
def insert(self, point: np.ndarray):
'''Insert a point into the tree and report its anomaly score.'''
if point.ndim != 1:
raise ValueError('expected 1D array')
codisp_sum = 0
for tree in self._forest:
if len(tree.leaves) > self.tree_size:
tree.forget_point(self._point_index - self.tree_size)
tree.insert_point(point, index=self._point_index)
codisp_sum += tree.codisp(self._point_index)
self._point_index += 1
codisp = codisp_sum / self.n_trees
return codisp
```
#### File: rrcforest/tests/test_rcforest.py
```python
import numpy as np
from rrcforest import RCForest
def test_constants():
n_observations = 300
n_features = 4
data = np.ones(shape=(n_observations, n_features), dtype=float)
data *= 3.1415
forest = RCForest(random_state=42)
scores = forest.insert_batch(data)
assert scores.shape == (n_observations,)
assert np.all(scores == 0.)
def test_random():
n_observations = 60
n_features = 5
data = np.random.uniform(size=(n_observations, n_features))
print(data.shape)
forest = RCForest(random_state=42)
scores = forest.insert_batch(data)
assert scores.shape == (n_observations,)
assert scores[0] == 0.
assert np.all(scores[1:] != 0.)
``` |
{
"source": "JosteinBrevik/KerbalController",
"score": 3
} |
#### File: JosteinBrevik/KerbalController/serialReader.py
```python
import serial
import time
import win32com.client
#The following line is for serial over GPIO
port = 'COM7'
ard = serial.Serial(port,9600,timeout=5)
shell = win32com.client.Dispatch("WScript.Shell")
#shell.AppActivate("Notepad")
def findPreamble(msg):
if(len(msg) < 5):
return False
preamble = "FFFE"
for i in range(len(preamble)):
if(msg[i] != preamble[0]):
return False
return True
#print("here")
while (True):
# Serial read section
msg = ard.readline()
msgStr = msg.decode("utf-8")
#print("Message from arduino: ")
#print(msg)
time.sleep(0.05)
if(findPreamble(msgStr)):
shell.SendKeys(str(msgStr[4]))
ard.reset_input_buffer()
#print(str(msg)) # CTRL+A may "select all" depending on which window's focused
#shell.SendKeys("{DELETE}") # Delete selected text? Depends on context. :P
#shell.SendKeys("{TAB}") #Press tab... to change focus or whatever
exit()
``` |
{
"source": "josteinl/advent2020",
"score": 4
} |
#### File: advent2020/day11/main.py
```python
from itertools import chain
import collections
def occupied_count(seating):
return ''.join(chain.from_iterable(seating)).count('#')
def adjacent_occupied(x, y, seating):
occupied = 0
# Above
for look_x in range(x - 1, x + 2):
for look_y in range(y - 1, y + 2):
if look_x == x and look_y == y:
continue
if seating[look_y][look_x] == '#':
occupied += 1
return occupied
def sit_down(seating, dim_x, dim_y):
return_seating = seating.copy()
for y in range(1, dim_y):
for x in range(1, dim_x):
occupied = adjacent_occupied(x, y, seating)
if seating[y][x] == 'L' and occupied == 0:
# Empty seat
# and no adjacent occupied
return_seating[y] = return_seating[y][:x] + '#' + return_seating[y][x + 1:]
elif seating[y][x] == '#' and occupied >= 4:
# Occupied and 4 or more adjecent seats, raise up
return_seating[y] = return_seating[y][:x] + 'L' + return_seating[y][x + 1:]
return return_seating
def see_occupied_in_direction(x, y, dir_x, dir_y, seating):
max_x = len(seating[0]) - 1
max_y = len(seating) - 1
cur_x = x
cur_y = y
cur_x += dir_x
cur_y += dir_y
while 0 <= cur_x <= max_x and 0 <= cur_y <= max_y:
if seating[cur_y][cur_x] == '#':
return 1
elif seating[cur_y][cur_x] == 'L':
return 0
cur_x += dir_x
cur_y += dir_y
return 0
def seen_occupied(x, y, seating):
occupied = 0
for look_x in range(- 1, + 2):
for look_y in range(- 1, + 2):
if look_x == 0 and look_y == 0:
continue
occupied += see_occupied_in_direction(x, y, look_x, look_y, seating)
return occupied
def sit_down_part_two(seating, dim_x, dim_y):
return_seating = seating.copy()
for y in range(0, dim_y):
for x in range(0, dim_x):
occupied = seen_occupied(x, y, seating)
if seating[y][x] == 'L' and occupied == 0:
# Empty seat
# and no adjacent occupied
return_seating[y] = return_seating[y][:x] + '#' + return_seating[y][x + 1:]
elif seating[y][x] == '#' and occupied >= 5:
# Occupied and 5 or more seen seats, raise up
return_seating[y] = return_seating[y][:x] + 'L' + return_seating[y][x + 1:]
return return_seating
def part_two():
with open('data.txt', 'r') as f:
seating = [data.strip() for data in f]
dimension_x = len(seating[0])
dimension_y = len(seating)
last_seating = None
while collections.Counter(last_seating) != collections.Counter(seating):
last_seating = seating.copy()
seating = sit_down_part_two(seating, dimension_x, dimension_y)
return occupied_count(last_seating)
def part_one():
with open('data.txt', 'r') as f:
seating = [data.strip() for data in f]
dimension_x = len(seating[0])
dimension_y = len(seating)
# Extend seating with empty space all around, makes it easier to count later
for row_number in range(dimension_y):
seating[row_number] = '.' + seating[row_number] + '.'
seating = ['.' * (dimension_x + 2)] + seating + ['.' * (dimension_x + 2)]
last_seating = None
while collections.Counter(last_seating) != collections.Counter(seating):
last_seating = seating.copy()
seating = sit_down(seating, dimension_x + 1, dimension_y + 1)
return occupied_count(last_seating)
if __name__ == '__main__':
# Part one:
# result = part_one()
# print(f'Result {result}')
result = part_two()
print(f'Result {result}')
```
#### File: advent2020/day16/main.py
```python
from pprint import pprint
from ClusterShell.RangeSet import RangeSet
fields_dict = {}
def part_one():
with open('data.txt', 'r') as f:
data = f.read()
fields, your_ticket, nearby_tickets = data.split('\n\n')
# Build dictionary of fields and valid ranges
all_ranges = RangeSet()
fields = fields.split('\n')
fields_dict = {}
for field_line in fields:
field_name, field_ranges = field_line.split(':')
field_ranges = field_ranges.split('or')
for field_range in field_ranges:
if field_name in fields_dict:
fields_dict[field_name] = fields_dict[field_name].union(RangeSet(field_range))
else:
fields_dict[field_name] = RangeSet(field_range)
all_ranges = all_ranges.union(RangeSet(field_range))
# Nearby tickets
part_one_answer = 0
nearby_tickets = nearby_tickets.split('\n')[1:]
for ticket in nearby_tickets:
for field_value in ticket.split(','):
if field_value not in all_ranges:
part_one_answer += int(field_value)
return part_one_answer
# found at https://cs.lmu.edu/~ray/notes/backtracking/
def solve(values, safe_up_to, size):
"""Finds a solution to a backtracking problem.
values -- a sequence of values to try, in order. For a map coloring
problem, this may be a list of colors, such as ['red',
'green', 'yellow', 'purple']
safe_up_to -- a function with two arguments, solution and position, that
returns whether the values assigned to slots 0..pos in
the solution list, satisfy the problem constraints.
size -- the total number of “slots” you are trying to fill
Return the solution as a list of values.
"""
solution = [None] * size
def extend_solution(position):
for value in values:
solution[position] = value
if safe_up_to(solution, position):
if position >= size - 1 or extend_solution(position + 1):
return solution
return None
return extend_solution(0)
def is_solution_valid(solution_list, top_position):
""" a function with two arguments, solution and position, that
returns whether the values assigned to slots 0..pos in
the solution list, satisfy the problem constraints.
"""
global fields_dict
# If list contains duplicates it is wrong:
if len(solution_list[:top_position + 1]) != len(set(solution_list[:top_position + 1])):
return False
for position in range(top_position + 1):
if position not in fields_dict[solution_list[position]]['possible_indexes']:
return False
return True
def part_two():
with open('data.txt', 'r') as f:
data = f.read()
fields, your_ticket, nearby_tickets = data.split('\n\n')
# Build dictionary of fields and valid ranges
all_ranges = RangeSet()
fields = fields.split('\n')
for field_line in fields:
field_name, field_ranges = field_line.split(':')
field_ranges = field_ranges.split('or')
for field_range in field_ranges:
if field_name in fields_dict:
fields_dict[field_name]['range'] = fields_dict[field_name]['range'].union(RangeSet(field_range))
else:
fields_dict[field_name] = {'range': RangeSet(field_range),
'possible_indexes': []}
all_ranges = all_ranges.union(RangeSet(field_range))
# Nearby tickets
valid_tickets = []
nearby_tickets = nearby_tickets.split('\n')[1:]
for ticket in nearby_tickets:
all_fields_ok = True
for field_value in ticket.split(','):
if field_value not in all_ranges:
all_fields_ok = False
if all_fields_ok:
valid_tickets.append([int(x) for x in ticket.split(',')])
# Include own ticket to valid_tickets
your_ticket = [int(x) for x in your_ticket.split('\n')[1].split(',')]
valid_tickets.append(your_ticket)
# pprint(valid_tickets, width=120)
for field in fields_dict:
for i in range(len(your_ticket)):
# is field valid for all tickets index i?
field_valid_for_all = True
for valid_ticket in valid_tickets:
if valid_ticket[i] not in fields_dict[field]['range']:
field_valid_for_all = False
break
if field_valid_for_all:
fields_dict[field]['possible_indexes'].append(i)
pprint(fields_dict, width=120)
solution = solve(fields_dict.keys(), is_solution_valid, len(fields_dict.keys()))
result = 1
for i in range(len(solution)):
if solution[i].startswith('departure'):
result *= your_ticket[i]
return result
if __name__ == '__main__':
# result = part_one()
# print('Result', result)
result = part_two()
print('Result', result)
```
#### File: advent2020/day4/main.py
```python
import string
def valid_field(field):
field_name, data = field.split(':')
print(f'{field_name}: {data}')
if field_name == 'byr': # (Birth Year)
# four digits; at least 1920 and at most 2002.
return 1920 <= int(data) <= 2002
elif field_name == 'iyr': # (Issue Year)
# four digits; at least 2010 and at most 2020
return 2010 <= int(data) <= 2020
elif field_name == 'eyr': # (Expiration Year)
# four digits; at least 2020 and at most 2030.
return 2020 <= int(data) <= 2030
elif field_name == 'hgt': # (Height)
# hgt (Height) - a number followed by either cm or in:
# If cm, the number must be at least 150 and at most 193.
# If in, the number must be at least 59 and at most 76.
if 'cm' not in data and 'in' not in data:
return False
unit = data[-2:]
height = int(data[:-2])
if unit == 'cm':
return 150 <= int(height) <= 193
return 59 <= int(height) <= 76
elif field_name == 'hcl': # (Hair Color)
# a # followed by exactly six characters 0-9 or a-f.
if data[0] != '#':
return False
if len(data) != 7:
return False
for digit in data[1:]:
if digit not in string.hexdigits:
return False
return True
elif field_name == 'ecl': # (Eye Color)
# exactly one of: amb blu brn gry grn hzl oth.
return data in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']
elif field_name == 'pid': # (Passport ID)
# a nine-digit number, including leading zeroes.
if len(data) != 9:
return False
for digit in data:
if digit not in string.digits:
return False
return True
def yr_consistency(record):
"""
Check internal year consistency.
byr <= iyr <= eyr
"""
byr = iyr = eyr = None
fields = record.split()
for field in fields:
field_name, data = field.split(':')
if field_name == 'byr':
byr = int(data)
elif field_name == 'iyr':
iyr = int(data)
elif field_name == 'eyr':
eyr = int(data)
return byr <= iyr <= eyr
def valid_passport(record):
"""
byr (Birth Year)
iyr (Issue Year)
eyr (Expiration Year)
hgt (Height)
hcl (Hair Color)
ecl (Eye Color)
pid (Passport ID)
cid (Country ID)
"""
required_fields = ['byr', # (Birth Year)
'iyr', # (Issue Year)
'eyr', # (Expiration Year)
'hgt', # (Height)
'hcl', # (Hair Color)
'ecl', # (Eye Color)
'pid', # (Passport ID)
# cid (Country ID)
]
for field in required_fields:
if f'{field}:' not in record:
print(f'---** missing field: "{field}" from:\n{record}\n-----')
return False
fields = record.split()
for field in fields:
if field.startswith('cid:'):
continue
if not valid_field(field):
print(f'---** invalid field: {field} ')
return False
if not yr_consistency(record):
return False
print('---** Valid passport')
return True
def main(testfile):
with open(testfile, 'r') as f:
data = f.read()
records = data.split('\n\n')
print(f'Number of records: {len(records)}')
valid_passports = 0
invalid_passports = 0
for record in records:
if valid_passport(record):
valid_passports += 1
else:
invalid_passports += 1
print(f'valid passports {valid_passports}')
print(f'invalid passports {invalid_passports}')
return valid_passports
if __name__ == '__main__':
# Part two:
valid_passports = main('data.txt')
print(f'valid passports {valid_passports}')
```
#### File: advent2020/day6/main.py
```python
import operator
from functools import reduce
def count_unique_answers(group):
group = group.strip().replace('\n', '')
return len(set(group))
def count_all_yes_answers(group):
# Make list of person, with a set for an individual persons answer
persons = [set(answers) for answers in group.split('\n')]
# Intersect groups answers
result = reduce(operator.and_, persons)
return len(result)
def main():
with open('data.txt', 'r') as f:
data = f.read()
groups = data.split('\n\n')
print(f'Number of groups: {len(groups)}')
total_yes_answers = 0
total_all_yes_answers = 0
for group in groups:
total_yes_answers += count_unique_answers(group)
total_all_yes_answers += count_all_yes_answers(group)
print(f'Total number of yes answers (part I): {total_yes_answers}')
print(f'Total number of group all yes answers (part II): {total_all_yes_answers}')
if __name__ == '__main__':
main()
``` |
{
"source": "josteinl/advent2021",
"score": 3
} |
#### File: advent2021/day3/main.py
```python
from typing import List
def main_1():
gamma = ""
epsilon = ""
bit_count = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
line_number = 1
with open("data.txt") as f:
for line in f.readlines():
line_number += 1
for i, bit in enumerate(line):
if bit == "1":
bit_count[i] += 1
for bit in bit_count:
if bit > line_number / 2:
gamma += "1"
epsilon += "0"
else:
gamma += "0"
epsilon += "1"
gamma = int(gamma, 2)
epsilon = int(epsilon, 2)
print(f"{gamma=} * {epsilon=} = {gamma*epsilon}")
def count(lines, bit_pos, keep_equal):
"""
if keep_equal = "1", then return the most number of bits
otherwise return the fewest number of bits
"""
count_bits = 0
for line in lines:
if line[bit_pos] == keep_equal:
count_bits += 1
if keep_equal == "0":
# Return fewest
if count_bits <= len(lines) / 2:
return "0"
else:
return "1"
else:
# Return most
if count_bits >= len(lines) / 2:
return "1"
else:
return "0"
def remove(lines, keep_equal: str) -> List[str]:
number_of_bits = len(lines[0].strip())
while len(lines) > 1:
for bit_pos in range(number_of_bits):
keep = count(lines, bit_pos, keep_equal)
for i in range(len(lines) - 1, -1, -1):
if len(lines) == 1:
return lines
if keep != lines[i][bit_pos]:
del lines[i]
return lines
def main_2():
with open("data.txt") as f:
oxygen_lines = f.readlines()
scrubber_lines = oxygen_lines.copy()
oxygen_lines = remove(oxygen_lines, keep_equal="1")
scrubber_lines = remove(scrubber_lines, keep_equal="0")
oxygen = int(oxygen_lines[0], 2)
scrubber = int(scrubber_lines[0], 2)
print(f"{oxygen=} * {scrubber=} = {oxygen*scrubber}")
if __name__ == "__main__":
# main_1()
main_2()
```
#### File: advent2021/day5/main.py
```python
from typing import List, Optional
class Diagram:
def __init__(self):
self._diagram = {}
def add_point(self, x, y):
if (x, y) in self._diagram:
self._diagram[(x, y)] += 1
else:
self._diagram[(x, y)] = 1
def delta(self, x1, x2):
if x1 == x2:
return 0
if x1 < x2:
return 1
return -1
def add(self, f_x, f_y, t_x, t_y):
delta_x = self.delta(f_x, t_x)
delta_y = self.delta(f_y, t_y)
x = f_x
y = f_y
while True:
self.add_point(x, y)
x += delta_x
y += delta_y
if x == t_x and y == t_y:
self.add_point(x, y)
break
def number_of_crossings(self):
count = 0
for key, value in self._diagram.items():
if value >= 2:
count += 1
return count
def main_1():
with open("data.txt") as f:
diagram = Diagram()
for line in f.readlines():
from_x_y, to_x_y = line.split(" -> ")
from_x_y = from_x_y.split(",")
to_x_y = to_x_y.split(",")
diagram.add(
int(from_x_y[0]), int(from_x_y[1]), int(to_x_y[0]), int(to_x_y[1])
)
count = diagram.number_of_crossings()
print(f"Number of crossings {count}!")
# 21059 wrong
def main_2():
with open("data.txt") as f:
print(f"No bingo found!")
if __name__ == "__main__":
main_1()
# main_2()
```
#### File: advent2021/day6/main.py
```python
from typing import List, Optional
class Fish:
new_timer = 8
reset_timer = 6
def __init__(self, start_number):
if start_number:
self.timer = start_number
else:
self.timer = self.new_timer
def tick(self):
"Return Ture if spawning is needed"
self.timer -= 1
if self.timer < 0:
self.timer = self.reset_timer
return True
else:
return False
def __repr__(self):
return f"{self.timer}"
def main_1():
with open("data.txt") as f:
start_numbers = [int(number) for number in f.readline().split(",")]
fish = []
for start_number in start_numbers:
fish.append(Fish(start_number))
print(f"init {fish}")
day = 0
while day < 256:
new_fish = []
for f in fish:
if f.tick():
new_fish.append(Fish(None))
day += 1
fish += new_fish
# print(f"{day=} {fish}")
result = len(fish)
print(f"{result=}")
def main_2():
"""Try to optimize by grouping all fish that has the same timer"""
with open("data.txt") as f:
start_numbers = [int(number) for number in f.readline().split(",")]
buckets = [0, 0, 0, 0, 0, 0, 0, 0, 0]
for start_number in start_numbers:
buckets[start_number] += 1
print(f"init {buckets}")
day = 0
while day < 256:
new_buckets = buckets[1:] + [0]
new_buckets[6] += buckets[0]
new_buckets[8] += buckets[0]
buckets = new_buckets
day += 1
print(f"{day=} {buckets} {sum(buckets)=}")
result = sum(buckets)
print(f"{result=}")
if __name__ == "__main__":
# main_1()
main_2()
```
#### File: advent2021/kode24/day6.py
```python
sangen = [
"Spredt og i klynger der elven seg slynger",
"ligger du Porsblomstens by.",
"Dampflø<NAME> og sagblader synger",
"muntert ved kveld og ved gry.",
"",
"ref.:",
"",
"For ditt vell vårt hjerte banker,",
"og fra fremmed havn",
"hjem til deg går våre tanker,",
"kjært er Porsgrunns navn.",
"",
"Klang ifra ambolt og svingende hammer,",
"kullrøyk fra piper mot sky,",
"elven med tauing av flåter og prammer, -",
"- baugen er vendt mot det ny.",
"",
"ref.",
"",
"Vendte vi hjemad der ute fra verden,",
"Telemarks fjelde de blå",
"vinket imot oss: velkommen fra ferden,",
"- byen ved elven vi så.",
"",
"ref.",
]
arrangement = """Fra linje nummer 7: Tegn nummer så lang som linje nummer 8 er lang.
Fra linje nummer 15: Tegn nummer så lang som linje nummer 20 er lang.
Fra linje nummer 5: Tegn nummer så lang som linje nummer 4 er lang.
Fra linje nummer 13: Tegn nummer så lang som linje nummer 5 er lang.
Fra linje nummer 1: Tegn nummer så lang som linje nummer 10 er lang.
Fra linje nummer 2: Tegn nummer så lang som linje nummer 1 er lang."""
def main():
for line in arrangement.split("\n"):
line_words = line.split()
source_line_number = int(line_words[3][:-1])
source_line = sangen[source_line_number]
inspect_line_number = int(line_words[11])
character_number = len(sangen[inspect_line_number])
print(f"{source_line=} {character_number=} = {source_line[character_number]=}")
# print(f"{source_line=}") # [source_line_number-1]}")
if __name__ == "__main__":
main()
```
#### File: advent2021/kode24/day7.py
```python
import re
sang = """På låven sitter nissen med sin julegrøt,
Så god og søt, så god og søt.
Han nikker, og han smiler, og han er så glad,
For julegrøten vil han gjerne ha.
Men rundt omkring står alle de små rotter,
Og de skotter, og de skotter.
De vil så gjerne ha litt julegodter,
Og de danser, danser rundt i ring.
Men nissefar han truer med sin store skje,
Nei, bare se, og kom avsted.
For julegrøten min den vil jeg ha i fred,
Og ingen, ingen vil jeg dele med.
Men rottene de hopper, og de danser,
Og de svinser, og de svanser.
De klorer etter grøten og de stanser,
Og de står om nissen tett i ring.
Men nissefar han er en liten hissigpropp,
Og med sin kropp, han gjør et hopp.
Jeg henter katten hvis de ikke holder opp.
Når katten kommer, skal det nok bli stopp.
Da løper alle rottene så bange,
Ja, så bange, Ja, så bange,
De svinser og de svanser noen gange,
Og på en-to-tre så er de vekk, vekk vekk
"""
pattern = re.compile(r"([A-b])([v])\w+")
def main():
ord = re.findall(r"([A-b])([v])\w+", sang)
print(f"{ord=}")
if __name__ == "__main__":
main()
``` |
{
"source": "josteinl/fastapi-demo",
"score": 3
} |
#### File: josteinl/fastapi-demo/database.py
```python
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, Session
from fastapi.requests import Request
engine = create_engine("sqlite:///./sql_app.db")
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
def get_db(request: Request) -> Session:
db = SessionLocal()
try:
yield db
finally:
db.close()
``` |
{
"source": "josteinl/libsgfdata",
"score": 3
} |
#### File: libsgfdata/libsgfdata/cmd_dtm.py
```python
import click
from . import dtm
import rasterio
class sample_dtm(object):
@classmethod
def decorate(cls, cmd):
cmd = click.option('--crs', type=int, help="CRS of boreholes")(cmd)
cmd = click.option('--sample-dtm', type=click.File('rb'), help="Sample a DTM raster. Note: requires crs.")(cmd)
return cmd
@classmethod
def transform(cls, data, crs, sample_dtm, **kw):
assert crs is not None, "--crs is required for sampling a dtm"
with rasterio.open(sample_dtm) as f:
dtm.sample_z_coordinate_from_dtm(data, crs, f, overwrite=True)
return data
```
#### File: libsgfdata/libsgfdata/dtm.py
```python
import pyproj
import rasterio
def project(innproj, utproj, xinn, yinn):
innproj = int(innproj)
utproj = int(utproj)
#UTM convention is coordinate order Northing-Easting. CCh, 2020-06-18
return pyproj.Transformer.from_crs(innproj, utproj, always_xy=True).transform(xinn, yinn)
def sample_z_coordinate_from_dtm(sections, crs, raster, overwrite=True):
"""Sample z coordinate for boreholes from a DTM raster. If overwrite
is false, only add z-coordinate if it is missing."""
if isinstance(raster, str):
with rasterio.open(dtm) as f:
_sample_z_coordinate_from_dtm(sections, crs, f, overwrite)
else:
_sample_z_coordinate_from_dtm(sections, crs, raster, overwrite)
def _sample_z_coordinate_from_dtm(sections, crs, raster, overwrite):
raster_crs = raster.crs.to_epsg()
for section in sections:
if "main" not in section or not len(section["main"]):
continue
if not overwrite and "z_coordinate" in section["main"][0]:
continue
if "x_coordinate" not in section["main"][0] or "y_coordinate" not in section["main"][0]:
continue
x, y = project(crs, raster_crs,
section["main"][0]["x_coordinate"],
section["main"][0]["y_coordinate"])
section["main"][0]["z_coordinate"] = next(iter(raster.sample([(x, y)])))[0]
```
#### File: libsgfdata/libsgfdata/dumper.py
```python
import re
import pkg_resources
import pandas as pd
import numpy as np
import slugify
import codecs
import copy
import dateutil.parser
import datetime
import logging
from pathlib import Path
import sys
import cchardet as chardet
from . import metadata
logger = logging.getLogger(__name__)
def _unconv(b, k, v):
if k == "DatumTid":
return v.strftime("%Y%m%d%H%M%S%f")[:-3] # Milliseconds are not supported by strftime, so use %f and remove three decimals
elif isinstance(v, datetime.date):
return v.strftime("%Y%m%d")
elif isinstance(v, datetime.datetime):
return v.strftime("%Y%m%d%H%M")
else:
return str(v)
def _dump_line(block, line):
return ",".join("%s=%s" % (k,_unconv(block, k, v))
for k,v in line.items()
if str(v) and (not isinstance(v, float) or not np.isnan(v)))
def _dump_raw(sections, output_filename=None, *arg, **kw):
if isinstance(output_filename, str):
with open(output_filename, "wb") as f:
_dump_raw_to_file(sections, f, *arg, **kw)
elif output_filename is not None:
_dump_raw_to_file(sections, output_filename, *arg, **kw)
else:
raise NotImplementedError
def _dump_raw_to_file(sections, f, encoding="latin-1"):
f = codecs.getwriter(encoding)(f, errors='ignore')
for section in sections:
for blockname in ("$", "£", "#", "€", "#$"):
if blockname == "$" or (blockname in section and section[blockname]):
f.write(blockname + "\n")
for row in section.get(blockname, []):
f.write(_dump_line(metadata.blocknames[blockname], row) + "\n")
def _unrename_blocks(sections):
for idx in range(len(sections)):
sections[idx] = {metadata.unblocknames.get(name, name): block
for name, block in sections[idx].items()}
def _unmake_dfs(sections):
for idx in range(len(sections)):
if "data" in sections[idx]:
sections[idx]["data"] = sections[idx]["data"].to_dict('records')
def _unrename_data_columns(sections):
for idx in range(len(sections)):
if "data" in sections[idx]:
sections[idx]["data"] = sections[idx]["data"].rename(columns = metadata.undata.code.to_dict())
def _unrename_main(sections):
for idx in range(len(sections)):
sections[idx]["main"] = [
{metadata.unmain.loc[key, "code"] if key in metadata.unmain.index else key: value
for key, value in row.items()}
for row in sections[idx]["main"]]
def _unrename_method(sections):
for idx in range(len(sections)):
if "method" not in sections[idx]:
continue
sections[idx]["method"] = [
{metadata.unmethod.loc[key, "code"] if key in metadata.unmethod.index else key: value
for key, value in row.items()}
for row in sections[idx]["method"]]
def _unrename_values_method_code(sections):
for section in sections:
for row in section["main"]:
if 'method_code' in row:
code = str(row['method_code'])
if code in metadata.unmethods.index:
row['method_code'] = metadata.unmethods.loc[code, "code"]
def _unrename_values_comments(sections):
key = "comments"
for section in sections:
if key in section["data"].columns:
codes = section["data"][key]
missing = list(set(codes.unique()) - set(metadata.uncomments.index))
labels = pd.concat((metadata.uncomments,
pd.DataFrame([{"code": code} for code in missing], index=missing)))
section["data"][key] = labels.loc[codes, "code"].values
def _unrename_values_data_flags(sections):
key = "allocated_value_during_performance_of_sounding"
for section in sections:
if key in section["data"].columns:
codes = section["data"][key]
missing = list(set(codes.unique()) - set(metadata.undata_flags.index))
labels = pd.concat((metadata.undata_flags,
pd.DataFrame([{"code": code} for code in missing], index=missing)))
section["data"][key] = labels.loc[codes, "code"].values
def dump(sections, *arg, **kw):
sections = copy.deepcopy(sections)
_unrename_values_data_flags(sections)
_unrename_values_comments(sections)
_unrename_data_columns(sections)
_unmake_dfs(sections)
_unrename_method(sections)
_unrename_values_method_code(sections)
_unrename_main(sections)
_unrename_blocks(sections)
sections = _dump_raw(sections, *arg, **kw)
return sections
```
#### File: tests/unit/test_parser.py
```python
from datetime import time
import pytest
from libsgfdata.parser import _parse_line
class TestParseLine:
@pytest.mark.parametrize('block, line, test_case', [
('data', 'D=10.500,J=12.601,', 'WRONG_TYPE'),
('main', 'HD=20210324,HI=004940,HM=24', 'DATE_FIELD'),
('main', 'HD=20210324,HI=2340,HM=24', 'DATE_FIELD'),
('main', 'HD=20210324,HI=,HM=24', 'DATE_FIELD'),
])
def test_parse_line(self, block: str, line: str, test_case: str):
"""
Test special real world data rows encountered in the wild
"""
parsed_line = _parse_line(block, line)
if test_case == 'WRONG_TYPE':
assert type(parsed_line['J']) == float
elif test_case == 'DATE_FIELD':
assert type(parsed_line['HI']) == time
``` |
{
"source": "josteinstraume/cracking-coding-interview",
"score": 4
} |
#### File: interview-questions/test-in-python/1-2-reverse-null-terminated-str.py
```python
import unittest
def reverse(s):
return s[::-1]
class TestReverse(unittest.TestCase):
def test_reverse(self):
self.assertEqual(reverse(''), '')
self.assertEqual(reverse('abc'), 'cba')
self.assertEqual(reverse('zz-1'), '1-zz')
if __name__ == '__main__':
unittest.main()
```
#### File: interview-questions/test-in-python/1-3-two-string-permutation.py
```python
import unittest
def isPerm(s1, s2):
# If the two strings are of unequal length, then we automatically know
# that one cannot be the anagram of the other.
if len(s1) != len(s2):
return False
return sorted(s1) == sorted(s2)
class Test(unittest.TestCase):
def test_isPerm(self):
self.assertEqual(isPerm('abc', 'abcd'), False)
self.assertEqual(isPerm('abc', 'cab'), True)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "josteinstraume/leetcode",
"score": 4
} |
#### File: leetcode/top_interview_questions/1_two_sum.py
```python
def twoSum(nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
n = len(nums)
for i in range(1, n):
for j in range(2, n):
if nums[i] + nums[j] == target:
result = '[%d,%d]'%(i,j)
print(result)
return result
nums = [2, 7, 11, 15]
target = 9
# O(n) run time
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
map = {}
n = len(nums)
for i in range(n):
# target is 9, first num in nums is 2, so we're looking for 9-2=7 in nums
if nums[i] not in map:
map[target - nums[i]] = i
else:
return map[nums[i]], i
# does not add up to the given target
return -1, -1
``` |
{
"source": "jostimian/POS-Inventory-",
"score": 3
} |
#### File: jostimian/POS-Inventory-/pos.py
```python
from tkinter import *
from tkinter import scrolledtext
import json
jsonfile = "stock.json"
class main:
def __init__(self,master,stockFile):
frame = Frame(master)
frame.grid()
self.stockFile = stockFile
self.orderview = Text(frame, font=("Monaco",11))
self.orderview.grid(row = 0,column = 1)
self.editstock = Button(frame,text = "Edit Stock",font = ("Calibri",8))
self.editstock.grid(row = 0, column = 0)
root = Tk()
root.geometry("1920x969")
root.title("POS")
win = main(root,jsonfile)
root.mainloop()
``` |
{
"source": "jostimian/Python-To-Facebook",
"score": 3
} |
#### File: jostimian/Python-To-Facebook/main.py
```python
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
email = input("Enter Your Email: ")
password = input("Enter Your Password: ")
def login():
browser = webdriver.Chrome()
browser.get("https://www.facebook.com")
emailid = browser.find_element_by_id("email")
passid = browser.find_element_by_id("pass")
loginid = browser.find_element_by_id("loginbutton")
emailid.send_keys(email)
passid.send_keys(password)
loginid.click()
while email or password == "":
print("\n")
email = input("Enter Your Email: ")
password = input("Enter Your Password")
if email and password != "":
login()
if email and password != "":
login()
``` |
{
"source": "josting/CS538_Project",
"score": 2
} |
#### File: josting/CS538_Project/mobi_parse_data.py
```python
import os
import datetime as dt
import random
import networkx
# import matplotlib as mpl
import matplotlib.pyplot as plt
from const import *
activity = {}
with open(os.path.join(DATA_DIR, "mobiclique", "activity.csv")) as activity_fd:
for line in activity_fd.readlines():
line = line.strip()
if "#" in line:
line = line[:line.index("#")]
if not line:
continue
user_id, start_ts, end_ts = line.split(';')
if user_id not in activity:
activity[user_id] = []
activity[user_id].append( (int(start_ts), int(end_ts)) )
def is_awake(user_id, ts, activity):
for start_ts, end_ts in activity.get(user_id, []):
if ts >= start_ts and ts <= end_ts:
return True
return False
transmission = {}
with open(os.path.join(DATA_DIR, "mobiclique", "transmission.csv")) as transmission_fd:
for line in transmission_fd.readlines():
line = line.strip()
if "#" in line:
line = line[:line.index("#")]
if not line:
continue
msg_type, msg_id, bytes, src_user_id, dst_user_id, ts, status = line.split(';')
#if status != '0':
# continue
if src_user_id not in transmission:
transmission[src_user_id] = {}
if dst_user_id not in transmission[src_user_id]:
transmission[src_user_id][dst_user_id] = []
ts = int(ts)
transmission[src_user_id][dst_user_id].append(ts)
reception = {}
with open(os.path.join(DATA_DIR, "mobiclique", "reception.csv")) as reception_fd:
for line in reception_fd.readlines():
line = line.strip()
if "#" in line:
line = line[:line.index("#")]
if not line:
continue
msg_type, msg_id, src_user_id, dst_user_id, ts = line.split(';')
if src_user_id not in reception:
reception[src_user_id] = {}
if dst_user_id not in reception[src_user_id]:
reception[src_user_id][dst_user_id] = []
ts = int(ts)
reception[src_user_id][dst_user_id].append(ts)
drift_dict = {}
for src_user_id in sorted(reception):
for dst_user_id in sorted(reception[src_user_id]):
for rcp_ts in reception[src_user_id][dst_user_id]:
if src_user_id not in transmission:
continue
transmissions = transmission[src_user_id].get(dst_user_id, None)
if transmissions is None:
continue
if (src_user_id, dst_user_id) not in drift_dict:
drift_dict[(src_user_id, dst_user_id)] = []
diff = [abs(rcp_ts - trn_ts) for trn_ts in transmissions]
idx = diff.index(min(diff))
trn_ts = transmission[src_user_id][dst_user_id][idx]
drift = trn_ts - rcp_ts
drift_dict[(src_user_id, dst_user_id)].append((trn_ts, drift))
for (src_user_id, dst_user_id) in sorted(drift_dict):
print src_user_id, dst_user_id, drift_dict[(src_user_id, dst_user_id)]
break
proximity = {}
with open(os.path.join(DATA_DIR, "mobiclique", "proximity.csv")) as proximity_fd:
for line in proximity_fd.readlines():
line = line.strip()
if "#" in line:
line = line[:line.index("#")]
if not line:
continue
ts, user_id, seen_user_id, major_code, minor_code = line.split(';')
ts = int(ts)
if ts not in proximity:
proximity[ts] = []
proximity[ts].append((user_id, seen_user_id))
def visit(node, edges, unvisited):
if node not in unvisited:
return []
unvisited.remove(node)
my_network = [node]
for (node1, node2) in edges:
if node == node1 and node2 in unvisited:
my_network.extend(visit(node2, edges, unvisited))
elif node == node2 and node1 in unvisited:
my_network.extend(visit(node1, edges, unvisited))
return my_network
def get_networks(nodes, edges):
networks = []
unvisited = list(nodes)
while unvisited:
node = unvisited[0]
my_network = []
networks.append(visit(node, edges, unvisited))
return map(sorted,(map(set,networks)))
MAX_RNG = 75
timestamps = sorted(proximity)
#write traces to user.dat files
if 0:
user_fds = {}
for ts in timestamps:
for (user_id, seen_id) in proximity[ts]:
if user_id not in user_fds:
fd = open(r"mobiclique\%s.dat" % user_id, 'w')
last_ts = -1
user_fds[user_id] = [fd, last_ts]
else:
[fd, last_ts] = user_fds[user_id]
if last_ts != ts:
if last_ts > 0:
fd.write('\n')
fd.write("{} {} {}".format(ts, user_id, seen_id))
else:
fd.write(",{}".format(seen_id))
user_fds[user_id][1] = ts
for (fd, last_ts) in user_fds.values():
fd.close()
# Graph using networkx
if 1:
idx = random.sample(xrange(len(timestamps)), 25)
idx.sort()
sample_timestamps = map(timestamps.__getitem__, idx)
sample_dts = map(lambda ts: START_DT + dt.timedelta(seconds=ts),sample_timestamps)
for ts in sample_timestamps:
other_timestamps = filter(lambda x: abs(x-ts) < MAX_RNG, timestamps)
edges = sorted(set(reduce(list.__add__, [proximity[x] for x in other_timestamps])))
G = networkx.Graph(edges)
networkx.draw(G)
fig_fname = os.path.join(r"C:\Users\Jon\Google Drive\Grad_School\CS 538\project\scripts\figures", "%s.png" % ts)
plt.savefig(fig_fname)
plt.close()
networks = []
n_networks = []
max_size = []
idx = random.sample(xrange(len(timestamps)), 1500)
idx.sort()
sample_timestamps = map(timestamps.__getitem__, idx)
sample_dts = map(lambda ts: START_DT + dt.timedelta(seconds=ts),sample_timestamps)
for ts in sample_timestamps:
other_timestamps = filter(lambda x: abs(x-ts) < MAX_RNG, timestamps)
edges = sorted(set(reduce(list.__add__, [proximity[x] for x in other_timestamps])))
nodes = sorted(set(reduce(list.__add__, map(list, edges))))
new_networks = get_networks(nodes, edges)
networks.append(new_networks)
n_networks.append(len(new_networks))
max_size.append(max(map(len,new_networks)))
fd = open("output2.csv", 'w')
for vals in zip(sample_dts, n_networks, max_size):
fd.write(','.join(map(str,(vals))))
fd.write('\n')
fd.close()
# Get networks
if 0:
networks = []
n_networks = []
max_size = []
idx = random.sample(xrange(len(timestamps)), 1500)
idx.sort()
sample_timestamps = map(timestamps.__getitem__, idx)
sample_dts = map(lambda ts: START_DT + dt.timedelta(seconds=ts),sample_timestamps)
for ts in sample_timestamps:
other_timestamps = filter(lambda x: abs(x-ts) < MAX_RNG, timestamps)
edges = sorted(set(reduce(list.__add__, [proximity[x] for x in other_timestamps])))
nodes = sorted(set(reduce(list.__add__, map(list, edges))))
new_networks = get_networks(nodes, edges)
networks.append(new_networks)
n_networks.append(len(new_networks))
max_size.append(max(map(len,new_networks)))
fd = open("output2.csv", 'w')
for vals in zip(sample_dts, n_networks, max_size):
fd.write(','.join(map(str,(vals))))
fd.write('\n')
fd.close()
``` |
{
"source": "jostl/masters-thesis",
"score": 2
} |
#### File: masters-thesis/benchmark/__init__.py
```python
from .goal_suite import PointGoalSuite
VERSION = '0911'
WEATHER_1 = [1, 3, 6, 8]
WEATHER_2 = [4, 14]
WEATHER_3 = [10, 14]
WEATHER_4 = [1, 8, 14]
_suites = dict()
def _add(suite_name, *args, **kwargs):
assert suite_name not in _suites, '%s is already registered!' % suite_name
town = None
if 'Town01' in suite_name:
town = 'Town01'
elif 'Town02' in suite_name:
town = 'Town02'
else:
raise Exception('No town specified: %s.' % suite_name)
if 'NoCrash' in suite_name:
benchmark = 'carla100'
elif 'DataCollection' in suite_name:
benchmark = 'data_collection'
elif 'Debug' in suite_name:
benchmark = 'debug'
else:
benchmark = 'corl2017'
suite = None
if 'Turn' in suite_name:
suite = 'turn'
elif 'Straight' in suite_name:
suite = 'straight'
elif 'Full' in suite_name or "DataCollection" in suite_name:
suite = 'full'
elif 'NoCrash' in suite_name:
suite = 'nocrash'
elif "Debug" in suite_name:
suite = 'debug'
else:
raise Exception('No suite specified: %s.' % suite_name)
kwargs['town'] = town
kwargs['poses_txt'] = '%s/%s/%s_%s.txt' % (benchmark, VERSION, suite, town)
kwargs['col_is_failure'] = 'NoCrash' in suite_name
_suites[suite_name] = (args, kwargs)
## ============= Register Suites ============ ##
# _add('DebugTown01-v0', DebugSuite, n_vehicles=10, viz_camera=True)
# _add('FullTown01-v0', n_vehicles=0, viz_camera=True)
# _add('FullTown02-v0', n_vehicles=0, viz_camera=True)
# data collection town; no respawn to prevent missing frames
_add('FullTown01-v0', n_vehicles=0, weathers=WEATHER_1, respawn_peds=False)
# Train town, train weathers.
_add('FullTown01-v1', n_vehicles=0, weathers=WEATHER_1)
_add('StraightTown01-v1', n_vehicles=0, weathers=WEATHER_1)
_add('TurnTown01-v1', n_vehicles=0, weathers=WEATHER_1)
# Train town, test weathers.
_add('FullTown01-v2', n_vehicles=0, weathers=WEATHER_2)
_add('StraightTown01-v2', n_vehicles=0, weathers=WEATHER_2)
_add('TurnTown01-v2', n_vehicles=0, weathers=WEATHER_2)
# Train town, more vehicles
_add('FullTown01-v3', n_vehicles=20, n_pedestrians=50, weathers=WEATHER_1)
_add('FullTown01-v4', n_vehicles=20, n_pedestrians=50, weathers=WEATHER_2)
# No ped versions
_add('FullTown01-v3-np', n_vehicles=20, n_pedestrians=0, weathers=WEATHER_1)
_add('FullTown01-v4-np', n_vehicles=20, n_pedestrians=0, weathers=WEATHER_2)
# Test town, train weathers.
_add('FullTown02-v1', n_vehicles=0, weathers=WEATHER_1)
_add('StraightTown02-v1', n_vehicles=0, weathers=WEATHER_1)
_add('TurnTown02-v1', n_vehicles=0, weathers=WEATHER_1)
# Test town, test weathers.
_add('FullTown02-v2', n_vehicles=0, weathers=WEATHER_2)
_add('StraightTown02-v2', n_vehicles=0, weathers=WEATHER_2)
_add('TurnTown02-v2', n_vehicles=0, weathers=WEATHER_2)
# Test town, more vehicles.
_add('FullTown02-v3', n_vehicles=15, n_pedestrians=50, weathers=WEATHER_1)
_add('FullTown02-v4', n_vehicles=15, n_pedestrians=50, weathers=WEATHER_2)
# No ped versions
_add('FullTown02-v3-np', n_vehicles=15, n_pedestrians=0, weathers=WEATHER_1)
_add('FullTown02-v4-np', n_vehicles=15, n_pedestrians=0, weathers=WEATHER_2)
_add('NoCrashTown01-v1', n_vehicles=0, disable_two_wheels=True, weathers=WEATHER_1)
_add('NoCrashTown01-v2', n_vehicles=0, disable_two_wheels=True, weathers=WEATHER_3)
_add('NoCrashTown01-v3', n_vehicles=20, disable_two_wheels=True, n_pedestrians=50, weathers=WEATHER_1)
_add('NoCrashTown01-v4', n_vehicles=20, disable_two_wheels=True, n_pedestrians=50, weathers=WEATHER_3)
_add('NoCrashTown01-v5', n_vehicles=100, disable_two_wheels=True, n_pedestrians=250, weathers=WEATHER_1)
_add('NoCrashTown01-v6', n_vehicles=100, disable_two_wheels=True, n_pedestrians=250, weathers=WEATHER_3)
# No ped versions
_add('NoCrashTown01-v3-np', n_vehicles=20, disable_two_wheels=True, n_pedestrians=0, weathers=WEATHER_1)
_add('NoCrashTown01-v4-np', n_vehicles=20, disable_two_wheels=True, n_pedestrians=0, weathers=WEATHER_3)
_add('NoCrashTown01-v5-np', n_vehicles=100, disable_two_wheels=True, n_pedestrians=0, weathers=WEATHER_1)
_add('NoCrashTown01-v6-np', n_vehicles=100, disable_two_wheels=True, n_pedestrians=0, weathers=WEATHER_3)
_add('NoCrashTown02-v1', n_vehicles=0, disable_two_wheels=True, weathers=WEATHER_1)
_add('NoCrashTown02-v2', n_vehicles=0, disable_two_wheels=True, weathers=WEATHER_3)
_add('NoCrashTown02-v3', n_vehicles=15, disable_two_wheels=True, n_pedestrians=50, weathers=WEATHER_1)
_add('NoCrashTown02-v4', n_vehicles=15, disable_two_wheels=True, n_pedestrians=50, weathers=WEATHER_3)
_add('NoCrashTown02-v5', n_vehicles=70, disable_two_wheels=True, n_pedestrians=150, weathers=WEATHER_1)
_add('NoCrashTown02-v6', n_vehicles=70, disable_two_wheels=True, n_pedestrians=150, weathers=WEATHER_3)
# No ped versions
_add('NoCrashTown02-v3-np', n_vehicles=15, disable_two_wheels=True, n_pedestrians=0, weathers=WEATHER_1)
_add('NoCrashTown02-v4-np', n_vehicles=15, disable_two_wheels=True, n_pedestrians=0, weathers=WEATHER_3)
_add('NoCrashTown02-v5-np', n_vehicles=70, disable_two_wheels=True, n_pedestrians=0, weathers=WEATHER_1)
_add('NoCrashTown02-v6-np', n_vehicles=70, disable_two_wheels=True, n_pedestrians=0, weathers=WEATHER_3)
# Demo
_add('NoCrashTown01-v7', n_vehicles=100, n_pedestrians=250, weathers=WEATHER_1)
_add('NoCrashTown01-v8', n_vehicles=100, n_pedestrians=250, weathers=WEATHER_2)
_add('NoCrashTown02-v7', n_vehicles=70, n_pedestrians=150, weathers=WEATHER_1)
_add('NoCrashTown02-v8', n_vehicles=70, n_pedestrians=150, weathers=WEATHER_2)
# Weather primes.
_add('FullTown01-v5', n_vehicles=0, weathers=WEATHER_4)
_add('FullTown01-v6', n_vehicles=20, weathers=WEATHER_4)
_add('StraightTown01-v3', n_vehicles=0, weathers=WEATHER_4)
_add('TurnTown01-v3', n_vehicles=0, weathers=WEATHER_4)
_add('FullTown02-v5', n_vehicles=0, weathers=WEATHER_4)
_add('FullTown02-v6', n_vehicles=15, weathers=WEATHER_4)
_add('StraightTown02-v3', n_vehicles=0, weathers=WEATHER_4)
_add('TurnTown02-v3', n_vehicles=0, weathers=WEATHER_4)
# Random
_add('NoCrashTown01_noweather_empty', weathers=[1], n_vehicles=0)
_add('NoCrashTown01_noweather_regular', weathers=[1], n_vehicles=20, n_pedestrians=50)
_add('NoCrashTown01_noweather_dense', weathers=[1], n_vehicles=100, n_pedestrians=250)
_add('NoCrashTown02_noweather_empty', weathers=[1], n_vehicles=0)
_add('NoCrashTown02_noweather_regular', weathers=[1], n_vehicles=15, n_pedestrians=50)
_add('NoCrashTown02_noweather_dense', weathers=[1], n_vehicles=70, n_pedestrians=200)
_add('StraightTown01-noweather', n_vehicles=0, weathers=[1])
_add('TurnTown01-noweather', n_vehicles=0, weathers=[1])
_add('FullTown01-noweather-nav', n_vehicles=0, weathers=[1])
_add('FullTown01-noweather', n_vehicles=20, weathers=[1])
_add('StraightTown02-noweather', n_vehicles=0, weathers=[1])
_add('TurnTown02-noweather', n_vehicles=0, weathers=[1])
_add('FullTown02-noweather-nav', n_vehicles=0, weathers=[1])
_add('FullTown02-noweather', n_vehicles=15, weathers=[1])
# Data collection
_add("DataCollectionTown01")
# Debug suites, used for validating routes in the simulator
_add("DebugTown01-v1", weathers=WEATHER_1, n_vehicles=0, n_pedestrians=0)
_add("DebugTown02-v1", weathers=WEATHER_1, n_vehicles=0, n_pedestrians=0)
_aliases = {
'town1': [
'FullTown01-v1', 'FullTown01-v2', 'FullTown01-v3', 'FullTown01-v4',
'StraightTown01-v1', 'StraightTown01-v2',
'TurnTown01-v1', 'TurnTown01-v2'],
'town2': [
'FullTown02-v1', 'FullTown02-v2', 'FullTown02-v3', 'FullTown02-v4',
'StraightTown02-v1', 'StraightTown02-v2',
'TurnTown02-v1', 'TurnTown02-v2'],
'town1p': [
'FullTown01-v5', 'FullTown01-v6',
'StraightTown01-v3', 'TurnTown01-v3',
'FullTown01-v5', 'FullTown01-v6',
],
'town2p': [
'FullTown02-v5', 'FullTown02-v6',
'StraightTown02-v3', 'TurnTown02-v3',
'FullTown02-v5', 'FullTown02-v6',
],
'ntown1p': [
'NoCrashTown01-v7', 'NoCrashTown01-v8', 'NoCrashTown01-v9',
],
'ntown2p': [
'NoCrashTown02-v7', 'NoCrashTown02-v8', 'NoCrashTown02-v9',
],
'empty': [
'NoCrashTown01-v1', 'NoCrashTown01-v2',
'NoCrashTown02-v1', 'NoCrashTown02-v2',
],
'regular': [
'NoCrashTown01-v3', 'NoCrashTown01-v4',
'NoCrashTown02-v3', 'NoCrashTown02-v4',
],
'regular-np': [
'NoCrashTown01-v3-np', 'NoCrashTown01-v4-np',
'NoCrashTown02-v3-np', 'NoCrashTown02-v4-np',
],
'dense': [
'NoCrashTown01-v5', 'NoCrashTown01-v6',
'NoCrashTown02-v5', 'NoCrashTown02-v6',
],
'dense-np': [
'NoCrashTown01-v5-np', 'NoCrashTown01-v6-np',
'NoCrashTown02-v5-np', 'NoCrashTown02-v6-np',
],
'custom': [
'NoCrashTown02-v1', 'NoCrashTown02-v2',
'NoCrashTown02-v3', 'NoCrashTown02-v4',
'NoCrashTown02-v5', 'NoCrashTown02-v6',
'NoCrashTown01-v1', 'NoCrashTown01-v2',
'NoCrashTown01-v3', 'NoCrashTown01-v4',
'NoCrashTown01-v5', 'NoCrashTown01-v6',
],
#'custom': [
# 'NoCrashTown02-v1', 'NoCrashTown02-v2',
# 'NoCrashTown02-v3', 'NoCrashTown02-v4',
# 'NoCrashTown02-v5', 'NoCrashTown02-v6',
# 'NoCrashTown01-v1', 'NoCrashTown01-v2',
# 'NoCrashTown01-v3', 'NoCrashTown01-v4',
# 'NoCrashTown01-v5', 'NoCrashTown01-v6',
#],
}
_aliases['all'] = _aliases['town1'] + _aliases['town2']
ALL_SUITES = list(_suites.keys()) + list(_aliases.keys())
def make_suite(suite_name, port=2000, big_cam=False, planner='new', client=None, use_cv=False):
assert suite_name in _suites, '%s is not registered!'%suite_name
args, kwargs = _suites[suite_name]
kwargs['port'] = port
kwargs['big_cam'] = big_cam
kwargs['planner'] = planner
kwargs['client'] = client
kwargs['use_cv'] = use_cv
return PointGoalSuite(*args, **kwargs)
def get_suites(suite_name):
if suite_name.lower() in _aliases:
return _aliases[suite_name]
return [suite_name]
```
#### File: masters-thesis/perception/custom_datasets.py
```python
from pathlib import Path
import cv2
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import augmenter
from perception.utils.helpers import get_segmentation_tensor
from perception.utils.segmentation_labels import DEFAULT_CLASSES
class MultiTaskDataset(Dataset):
"""Dataset of folder with rgb, segmentation and depth subfolders"""
def __init__(self, root_folder: str, transform=None, semantic_classes=DEFAULT_CLASSES, max_n_instances=None,
augment_strategy=None):
self.root_folder = Path(root_folder)
self.transform = transform
self.semantic_classes = semantic_classes
self.rgb_folder = self.root_folder / "rgb"
self.semantic_folder = self.root_folder / "segmentation"
self.depth_folder = self.root_folder / "depth"
self.rgb_imgs = [x for x in self.rgb_folder.iterdir()]
self.semantic_imgs = [x for x in self.semantic_folder.iterdir()]
self.depth_imgs = [x for x in self.depth_folder.iterdir()]
self.rgb_imgs.sort()
self.semantic_imgs.sort()
self.depth_imgs.sort()
self.rgb_imgs = self.rgb_imgs[:max_n_instances]
self.semantic_imgs = self.semantic_imgs[:max_n_instances]
self.depth_imgs = self.depth_imgs[:max_n_instances]
assert len(self.rgb_imgs) == len(self.depth_imgs)
assert len(self.rgb_imgs) == len(self.semantic_imgs)
self.num_imgs = len(self.rgb_imgs)
print("Len of dataset is:", self.num_imgs)
print("augment with", augment_strategy)
if augment_strategy is not None and augment_strategy != "None":
self.augmenter = getattr(augmenter, augment_strategy)
else:
self.augmenter = None
self.batch_read_number = 819200
def __len__(self):
return self.num_imgs
def __getitem__(self, idx):
def transpose(img, normalize: bool):
img = img.transpose(2, 0, 1)
return img / 255 if normalize else img
def read_rgb(img_path):
return cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB)
rgb_target = read_rgb(str(self.rgb_imgs[idx]))
if self.augmenter:
rgb_input = self.augmenter(self.batch_read_number).augment_image(rgb_target)
else:
rgb_input = rgb_target
rgb_input = transpose(rgb_input, normalize=True)
rgb_target = transpose(rgb_target, normalize=True)
semantic_img = transpose(
get_segmentation_tensor(read_rgb(str(self.semantic_imgs[idx])), classes=self.semantic_classes),
normalize=False)
depth_img = np.array([cv2.imread(str(self.depth_imgs[idx]), cv2.IMREAD_GRAYSCALE)]) / 255
self.batch_read_number += 1
return rgb_input, rgb_target, semantic_img, depth_img
class SegmentationDataset(Dataset):
"""Dataset of folder with rgb, segmentation subfolders"""
def __init__(self, root_folder: str, transform=None, semantic_classes=DEFAULT_CLASSES, max_n_instances=None,
augment_strategy=None):
self.root_folder = Path(root_folder)
self.transform = transform
self.semantic_classes = semantic_classes
self.rgb_folder = self.root_folder / "rgb"
self.semantic_folder = self.root_folder / "segmentation"
self.rgb_imgs = [x for x in self.rgb_folder.iterdir()]
self.semantic_imgs = [x for x in self.semantic_folder.iterdir()]
self.rgb_imgs.sort()
self.semantic_imgs.sort()
self.rgb_imgs = self.rgb_imgs[:max_n_instances]
self.semantic_imgs = self.semantic_imgs[:max_n_instances]
assert len(self.rgb_imgs) == len(self.semantic_imgs)
self.num_imgs = len(self.rgb_imgs)
print("Len of dataset is:", self.num_imgs)
print("augment with", augment_strategy)
if augment_strategy is not None and augment_strategy != "None":
self.augmenter = getattr(augmenter, augment_strategy)
else:
self.augmenter = None
self.batch_read_number = 819200
self.to_tensor = transforms.Compose([
transforms.ToTensor()
])
self.to_tensor_and_normalize = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
def __len__(self):
return self.num_imgs
def __getitem__(self, idx):
def read_rgb(img_path):
return cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB)
rgb_target = read_rgb(str(self.rgb_imgs[idx]))
if self.augmenter:
rgb_input = self.augmenter(self.batch_read_number).augment_image(rgb_target)
else:
rgb_input = rgb_target
rgb_raw = rgb_input.transpose(2, 0, 1)
rgb_input = self.to_tensor_and_normalize(rgb_input)
rgb_target = self.to_tensor_and_normalize(rgb_target)
semantic_img = self.to_tensor(get_segmentation_tensor(read_rgb(str(self.semantic_imgs[idx])),
classes=self.semantic_classes))
self.batch_read_number += 1
return rgb_input, rgb_target, semantic_img, rgb_raw, str(self.semantic_imgs[idx])
class DepthDataset(Dataset):
"""Dataset of folder with rgb and depth subfolders"""
def __init__(self, root_folder: str, transform=None, max_n_instances=None, augment_strategy=None,
use_transform=None):
self.root_folder = Path(root_folder)
self.transform = transform
self.rgb_folder = self.root_folder / "rgb"
self.depth_folder = self.root_folder / "depth"
self.rgb_imgs = [x for x in self.rgb_folder.iterdir()]
self.depth_imgs = [x for x in self.depth_folder.iterdir()]
self.rgb_imgs.sort()
self.depth_imgs.sort()
self.rgb_imgs = self.rgb_imgs[:max_n_instances]
self.depth_imgs = self.depth_imgs[:max_n_instances]
assert len(self.rgb_imgs) == len(self.depth_imgs)
self.num_imgs = len(self.rgb_imgs)
print("Len of dataset is:", self.num_imgs)
print("augment with", augment_strategy)
if augment_strategy is not None and augment_strategy != "None":
self.augmenter = getattr(augmenter, augment_strategy)
else:
self.augmenter = None
self.batch_read_number = 819200
if use_transform is None:
print("DepthDataset: Using normal transform")
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
elif use_transform == "midas_small":
print("DepthDataset: Using small midas transform")
midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms")
self.transform = midas_transforms.small_transform
else:
print("DepthDataset: Using big midas transform")
midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms")
self.transform = midas_transforms.default_transform
self.to_tensor = transforms.Compose([
transforms.ToTensor()
])
# TODO bruk midas sine egne transforms - de reshaper til 384 x 384
def __len__(self):
return self.num_imgs
def __getitem__(self, idx):
def read_rgb(img_path):
return cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB)
rgb_target = read_rgb(str(self.rgb_imgs[idx]))
if self.augmenter:
rgb_input = self.augmenter(self.batch_read_number).augment_image(rgb_target)
else:
rgb_input = rgb_target
rgb_raw = rgb_input.transpose(2, 0, 1)
rgb_input = self.transform(rgb_input)
rgb_target = self.transform(rgb_target)
depth_img = (np.array([cv2.imread(str(self.depth_imgs[idx]), cv2.IMREAD_GRAYSCALE)]) / 255)
self.batch_read_number += 1
return rgb_input, rgb_target, depth_img, rgb_raw, str(self.depth_imgs[idx])
class ComparisonDataset(Dataset):
"""Dataset of folder with rgb, segmentation and depth subfolders"""
def __init__(self, root_folder: str, segmentation_models, depth_models,
semantic_classes=DEFAULT_CLASSES, transform=None, max_n_instances=None):
self.root_folder = Path(root_folder)
self.transform = transform
self.semantic_classes = semantic_classes
self.rgb_folder = self.root_folder / "rgb"
self.semantic_folder = self.root_folder / "segmentation"
self.depth_folder = self.root_folder / "depth"
self.rgb_imgs = [x for x in self.rgb_folder.iterdir()]
self.semantic_imgs = [x for x in self.semantic_folder.iterdir()]
self.depth_imgs = [x for x in self.depth_folder.iterdir()]
self.rgb_imgs.sort()
self.semantic_imgs.sort()
self.depth_imgs.sort()
self.rgb_imgs = self.rgb_imgs[:max_n_instances]
self.semantic_imgs = self.semantic_imgs[:max_n_instances]
self.depth_imgs = self.depth_imgs[:max_n_instances]
assert len(self.rgb_imgs) == len(self.depth_imgs)
assert len(self.rgb_imgs) == len(self.semantic_imgs)
self.num_imgs = len(self.rgb_imgs)
print("Len of dataset is:", self.num_imgs)
# same setup but for variable number of prediction models
self.segmentation_model_imgs = {}
for model in segmentation_models:
self.segmentation_model_imgs[model[0]] = [x for x in model[1].iterdir()]
self.segmentation_model_imgs[model[0]].sort()
self.segmentation_model_imgs[model[0]] = self.segmentation_model_imgs[model[0]][:max_n_instances]
assert len(self.segmentation_model_imgs[model[0]]) == self.num_imgs
self.depth_model_imgs = {}
for model in depth_models:
self.depth_model_imgs[model[0]] = [x for x in model[1].iterdir()]
self.depth_model_imgs[model[0]].sort()
self.depth_model_imgs[model[0]] = self.depth_model_imgs[model[0]][:max_n_instances]
assert len(self.depth_model_imgs[model[0]]) == self.num_imgs
self.depth_model_invert = {}
for model in depth_models:
self.depth_model_invert[model[0]] = model[2]
def __len__(self):
return self.num_imgs
def __getitem__(self, idx):
def transpose(img, normalize: bool):
img = img.transpose(2, 0, 1)
return img / 255 if normalize else img
def read_rgb(img_path):
return cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB)
rgb_img = read_rgb(str(self.rgb_imgs[idx]))
semantic_img = transpose(
get_segmentation_tensor(read_rgb(str(self.semantic_imgs[idx])), classes=self.semantic_classes),
normalize=False)
depth_img = np.array([cv2.imread(str(self.depth_imgs[idx]), cv2.IMREAD_GRAYSCALE)]) / 255
semantic_model_preds = {}
for model_name in self.segmentation_model_imgs:
semantic_model_preds[model_name] = transpose(
get_segmentation_tensor(read_rgb(str(self.segmentation_model_imgs[model_name][idx])),
classes=self.semantic_classes), normalize=False)
depth_model_preds = {}
for model_name in self.depth_model_imgs:
# some models treat white as close and black as far away, invert some models so that they are "aligned"
if self.depth_model_invert[model_name]:
depth_model_preds[model_name] = (255 - np.array([cv2.imread(str(self.depth_model_imgs[model_name][idx])
, cv2.IMREAD_GRAYSCALE)])) / 255
else:
depth_model_preds[model_name] = np.array([cv2.imread(str(self.depth_model_imgs[model_name][idx])
, cv2.IMREAD_GRAYSCALE)]) / 255
return rgb_img, semantic_img, depth_img, semantic_model_preds, depth_model_preds
if __name__ == '__main__':
dataset = MultiTaskDataset("data/perception/prepped_256x288_mtl", semantic_classes=DEFAULT_CLASSES)
dataloader = DataLoader(dataset, batch_size=2, shuffle=True, num_workers=0,
pin_memory=True)
for i, data in enumerate(dataloader):
rgb, semantic, depth = data
print(semantic.shape)
```
#### File: perception/evaluation/model_evaluation.py
```python
import functools
from collections import defaultdict
from pathlib import Path
import numpy as np
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from perception.custom_datasets import ComparisonDataset
from perception.utils.visualization import plot_segmentation, plot_image, display_images_horizontally
from perception.utils.segmentation_labels import DEFAULT_CLASSES
def mean_jaccard_index(target, predictions):
"""
Semantic segmentation metric. Calculates mean intersection over union over all classes.
Works for batches of data.
"""
intersection = torch.logical_and(target, predictions)
union = torch.logical_or(target, predictions)
intersection_sums = torch.sum(intersection, dim=(-2, -1))
union_sums = torch.sum(union, dim=(-2,-1))
class_exists_mask = union_sums != 0
# union_sums will contain 0's if a class is not present in an image, which will give division by zero
iou_scores_classwise = intersection_sums / (union_sums + 0.00000000001)
iou_scores_imagewise_sum = iou_scores_classwise.sum(dim=1)
class_exists_mask_sum = class_exists_mask.sum(dim=1)
iou_scores_imagewise_mean = iou_scores_imagewise_sum / class_exists_mask_sum
iou_score_batch_mean = torch.mean(iou_scores_imagewise_mean)
return iou_score_batch_mean.numpy(), iou_scores_classwise.numpy()
def weighted_jaccard_index(target, predictions):
"""
Semantic segmentation metric. Calculates mean intersection over union over all classes, weighted by class.
Works for batches of data.
"""
class_counts = torch.sum(target, dim=(-2, -1), dtype=torch.int32)
n_pixels = torch.sum(class_counts, dim=(-1))[0]
class_frequencies = class_counts / n_pixels
intersection = torch.logical_and(target, predictions)
union = torch.logical_or(target, predictions)
intersection_sums = torch.sum(intersection, dim=(-2, -1))
union_sums = torch.sum(union, dim=(-2,-1))
# union_sums will contain 0's if a class is not present in an image, which will give division by zero
iou_scores_classwise = intersection_sums / (union_sums + 0.00000000001)
iou_scores_weighted = torch.sum(iou_scores_classwise * class_frequencies, dim=(-1))
iou_score_batch_weighted_mean = torch.mean(iou_scores_weighted)
return iou_score_batch_weighted_mean.numpy()
def rmse(targets, predictions):
"""
Depth estimation evaluation method. Average Root Mean Squared Error for each pixel in an image.
Should work for batches and single images.
"""
return np.sqrt(np.average((targets-predictions)**2))
def accuracy_within_threshold(targets, predictions, threshold=1.25):
"""
Depth estimation evaluation method. Calculates a delta value for each pixel in an image, and
then checks which percentage of pixels are within a certain threshold.
Should work for batches and single images.
"""
targets_over_preds = targets / predictions
preds_over_targets = predictions / targets
deltas = np.maximum(targets_over_preds, preds_over_targets)
within_threshold_matrix = deltas < threshold
uniques, counts = np.unique(within_threshold_matrix, return_counts=True)
if len(counts) > 1:
accuracy = counts[1] / (counts[0] + counts[1]) # this will work as long as there are both True and False values
else:
if True in uniques:
accuracy = 1.
# print("Accuracy within threshold warning: Accuracy is 1. uniques:", uniques)# TODO uncomment for real eval
else:
accuracy = 0.
print("Accuracy within threshold warning: Accuracy is 0. uniques:", uniques)
return accuracy
def compare_models(data_folder, segmentation_models, depth_models, batch_size=1, max_n_instances=None,
n_classes=len(DEFAULT_CLASSES)+1):
targets = ComparisonDataset(data_folder, segmentation_models, depth_models, max_n_instances=max_n_instances)
dataloader = DataLoader(targets, batch_size=batch_size, shuffle=False, num_workers=0,
pin_memory=True)
# semantic segmentation metrics
mean_intersection_over_union_accumulated = defaultdict(int)
weighted_mean_intersection_over_union_accumulated = defaultdict(int)
# depth estimation metrics
accuracy_with_threshold_accumulated = defaultdict(int)
accuracy_with_threshold2_accumulated = defaultdict(int)
accuracy_with_threshold3_accumulated = defaultdict(int)
rmse_accumulated = defaultdict(int)
classwise_iou_accumulated = defaultdict(functools.partial(np.zeros, n_classes))
classwise_iou_class_counts = defaultdict(functools.partial(np.zeros, n_classes))
for rgb_targets, segmentation_targets, depth_targets, segmentation_preds, depth_preds in tqdm(dataloader):
#print("SEMANTIC SEGMENTATION:")
#pepe = depth_targets[0].numpy().transpose(1, 2, 0)
#plot_image(depth_targets[0].numpy().transpose(1, 2, 0), title="ground truth")
#plot_image(depth_targets[0].numpy().transpose(1, 2, 0), title="ground truth gray", cmap="gray")
for model in segmentation_preds:
mean_iou, batch_classwise_iou = mean_jaccard_index(segmentation_targets, segmentation_preds[model])
mean_intersection_over_union_accumulated[model] += mean_iou
for img_classwise_iou in batch_classwise_iou:
classwise_iou_accumulated[model] += img_classwise_iou
# count if class actually is in img, to get correct averages
for i_class in range(len(img_classwise_iou)):
if img_classwise_iou[i_class] > 0:
classwise_iou_class_counts[model][i_class] += 1
weighted_mean_intersection_over_union_accumulated[model] \
+= weighted_jaccard_index(segmentation_targets, segmentation_preds[model])
#img = segmentation_preds[model].numpy()[0].transpose(1, 2, 0)
#plot_segmentation(img, title=model)
#print("\nDEPTH ESTIMATION")
for model in depth_preds:
accuracy_with_threshold_accumulated[model] += accuracy_within_threshold(depth_targets, depth_preds[model],
threshold=1.25)
accuracy_with_threshold2_accumulated[model] += accuracy_within_threshold(depth_targets, depth_preds[model],
threshold=1.25**2)
accuracy_with_threshold3_accumulated[model] += accuracy_within_threshold(depth_targets, depth_preds[model],
threshold=1.25**3)
rmse_accumulated[model] += rmse(depth_targets, depth_preds[model])
#img = depth_preds[model].numpy()[0].transpose(1, 2, 0)
#plot_image(img, title=model, cmap="gray")
n_batches = np.ceil(len(targets) / batch_size)
# calculate average over batches, semantic segmentation
mean_intersection_over_union_avg = {}
weighted_mean_intersection_over_union_avg = {}
class_intersection_over_union_avg = defaultdict(functools.partial(np.zeros, n_classes))
for model in segmentation_models:
model_name = model[0]
mean_intersection_over_union_avg[model_name] = mean_intersection_over_union_accumulated[model_name] / n_batches
weighted_mean_intersection_over_union_avg[model_name] = weighted_mean_intersection_over_union_accumulated[model_name] / n_batches
for i_class in range(len(classwise_iou_accumulated[model_name])):
class_intersection_over_union_avg[model_name][i_class] = classwise_iou_accumulated[model_name][i_class] / (classwise_iou_class_counts[model_name][i_class]+0.0000000001)
print("---")
print("Model:", model_name, "has mean jaccard index avg:", mean_intersection_over_union_avg[model_name])
print("Model:", model_name, "has weighted jaccard index avg:", weighted_mean_intersection_over_union_avg[model_name])
print("Model:", model_name, "has classwise iou's:", [i for i in class_intersection_over_union_avg[model_name]])
print("---")
# calculate average over batches, depth estimation
accuracy_within_threshold_avg = {}
accuracy_within_threshold2_avg = {}
accuracy_within_threshold3_avg = {}
rmse_avg = {}
for model in depth_models:
model_name = model[0]
accuracy_within_threshold_avg[model_name] = accuracy_with_threshold_accumulated[model_name] / n_batches
accuracy_within_threshold2_avg[model_name] = accuracy_with_threshold2_accumulated[model_name] / n_batches
accuracy_within_threshold3_avg[model_name] = accuracy_with_threshold3_accumulated[model_name] / n_batches
rmse_avg[model_name] = rmse_accumulated[model_name] / n_batches
print("---")
print("Model:", model_name, "has accuracy within threshold avg:", accuracy_within_threshold_avg[model_name])
print("Model:", model_name, "has accuracy within threshold2 avg:", accuracy_within_threshold2_avg[model_name])
print("Model:", model_name, "has accuracy within threshold3 avg:", accuracy_within_threshold3_avg[model_name])
print("Model:", model_name, "has rmse avg:", rmse_avg[model_name])
print("---")
if __name__ == "__main__":
test = "test2"
# location of where to find training, test1, test2
data_folder = Path("data/perception") / test
predictions_folder = Path("data/perception/predictions")
# lagres på formatet (Navn, lokasjon)
segmentation_models = [("unet_resnet50", predictions_folder / "semseg/unet_resnet50" / test),
("unet_resnet50_weighted_2.5", predictions_folder / "semseg/unet_resnet50_weighted_2.5" / test),
("unet_resnet50_weighted_5", predictions_folder / "semseg/unet_resnet50_weighted_5" / test),
("fcn_resnet101", predictions_folder / "semseg/fcn_resnet101" / test),
("deeplabv3-mobilenet", predictions_folder / "semseg/deeplabv3_mobilenet" / test),
("deeplabv3-resnet50", predictions_folder / "semseg/deeplabv3_resnet50" / test),
("deeplabv3-resnet101", predictions_folder / "semseg/deeplabv3_resnet101" / test),
]
#("semantic-test1 (ground truf)", predictions_folder / "semantic_test1"),
#("semantic-test2 (ground truf)", predictions_folder / "semantic_test2")]
# lagres på formatet (Navn, lokasjon, invert_pixels_in_loading)
# ("test1-depth", data_folder / "depth", False)
depth_models = [("midas-small", predictions_folder / "depth/midas_small" / test, True),
("midas-large", predictions_folder / "depth/midas_large" / test, True),
("UNet", predictions_folder / "depth/unet" / test, False),
("UNet-resnet34", predictions_folder / "depth/unet_resnet34" / test, False)
]
#("depth-test1", predictions_folder / "depth_test1", False),
#("depth-test2", predictions_folder / "depth_test2", False)
#]
compare_models(data_folder, segmentation_models, depth_models, batch_size=20, max_n_instances=None)
```
#### File: masters-thesis/perception/perception_model.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class MobileNetUNet(nn.Module):
def __init__(self, n_semantic_classes):
super(MobileNetUNet, self).__init__()
self.encoder = MobileNetEncoder()
self.rgb_decoder = UNetDecoder(3)
self.semantic_seg_decoder = UNetDecoder(n_semantic_classes)
self.depth_decoder = UNetDecoder(1)
self.use_rgb_decoder = True
def forward(self, x: torch.Tensor):
# Encode input
x = self.encoder(x)
# Decode into RGB, semantic segmentation, and depth map.
semantic_seg_pred = F.softmax(self.semantic_seg_decoder(x), dim=1)
depth_pred = torch.sigmoid(self.depth_decoder(x))
if self.use_rgb_decoder:
rgb_pred = F.relu(self.rgb_decoder(x))
return rgb_pred, semantic_seg_pred, depth_pred
return semantic_seg_pred, depth_pred
def set_rgb_decoder(self, use_rgb_decoder: bool):
self.use_rgb_decoder = use_rgb_decoder
def get_encoder(self):
return self.encoder
class MobileNetEncoder(nn.Module):
def __init__(self):
super(MobileNetEncoder, self).__init__()
# Input Conv layer
self.input_conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=(1, 1))
self.intput_batch_norm1 = nn.BatchNorm2d(32)
self.input_conv2 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=2, padding=(1, 1))
self.intput_batch_norm2 = nn.BatchNorm2d(32)
# For every second depthwise separable conv operation, the resolution is downsampled by using stride = (2,2)
self.depthwise_separable1 = DepthwiseSeparableConv(32, 64)
self.depthwise_separable2 = DepthwiseSeparableConv(64, 128, depthwise_stride=(2, 2))
self.depthwise_separable3 = DepthwiseSeparableConv(128, 128)
self.depthwise_separable4 = DepthwiseSeparableConv(128, 256, depthwise_stride=(2, 2))
self.depthwise_separable5 = DepthwiseSeparableConv(256, 256)
self.depthwise_separable6 = DepthwiseSeparableConv(256, 512, depthwise_stride=(2, 2))
# Block of five repeated depthwise separable conv operations
self.depthwise_separable7 = DepthwiseSeparableConv(512, 512)
self.depthwise_separable8 = DepthwiseSeparableConv(512, 512)
self.depthwise_separable9 = DepthwiseSeparableConv(512, 512)
self.depthwise_separable10 = DepthwiseSeparableConv(512, 512)
self.depthwise_separable11 = DepthwiseSeparableConv(512, 512)
# The two final depthwise separable conv operations, outputting 1024 feature maps
self.depthwise_separable12 = DepthwiseSeparableConv(512, 512, depthwise_stride=(2, 2))
self.depthwise_separable13 = DepthwiseSeparableConv(512, 1024)
def forward(self, x: torch.Tensor):
x = F.relu(self.intput_batch_norm1(self.input_conv1(x)))
f1 = x
x = F.relu(self.intput_batch_norm2(self.input_conv2(x)))
x = self.depthwise_separable1(x)
f2 = x
x = self.depthwise_separable2(x)
x = self.depthwise_separable3(x)
f3 = x
x = self.depthwise_separable4(x)
x = self.depthwise_separable5(x)
f4 = x
x = self.depthwise_separable6(x)
x = self.depthwise_separable7(x)
x = self.depthwise_separable8(x)
x = self.depthwise_separable9(x)
x = self.depthwise_separable10(x)
x = self.depthwise_separable11(x)
f5 = x
x = self.depthwise_separable12(x)
x = self.depthwise_separable13(x)
f6 = x
return [f1, f2, f3, f4, f5, f6]
class DepthwiseSeparableConv(nn.Module):
def __init__(self, input_channels, output_channels, depthwise_kernel_size=3, depthwise_stride=(1, 1),
padding=(1, 1),
padding_mode="zeros"):
super(DepthwiseSeparableConv, self).__init__()
self.depthwise = nn.Conv2d(in_channels=input_channels, out_channels=input_channels,
kernel_size=depthwise_kernel_size, stride=depthwise_stride,
padding=padding, padding_mode=padding_mode, groups=input_channels)
self.batchnorm1 = nn.BatchNorm2d(input_channels)
self.pointwise = nn.Conv2d(in_channels=input_channels, out_channels=output_channels, kernel_size=1)
self.batchnorm2 = nn.BatchNorm2d(output_channels)
def forward(self, x: torch.Tensor):
x = F.relu(self.batchnorm1(self.depthwise(x)))
x = F.relu(self.batchnorm2(self.pointwise(x)))
return x
class UNetDecoder(nn.Module):
def __init__(self, n_classes):
"""
UNet consists of an encoder (contracting path) and a decoder (expansive path).
This is actually just the implementation of the decoder (i.e. the expansive path).
"""
super(UNetDecoder, self).__init__()
self.init_conv = nn.Conv2d(1024, 512, kernel_size=3, padding=1)
self.init_batch_norm = nn.BatchNorm2d(512)
self.up1 = UpConv(1024, 256)
self.up2 = UpConv(512, 128)
self.up3 = UpConv(256, 64)
self.up4 = UpConv(128, 32)
self.up5 = UpConv(64, 32)
self.conv = nn.Conv2d(32, 32, kernel_size=3, padding=1)
self.final_conv = nn.Conv2d(32, n_classes, kernel_size=1)
def forward(self, features):
f1, f2, f3, f4, f5, f6 = features
x = F.relu(self.init_batch_norm(self.init_conv(f6)))
x = self.up1(x, f5)
x = self.up2(x, f4)
x = self.up3(x, f3)
x = self.up4(x, f2)
x = self.up5(x, f1)
x = self.conv(x)
x = self.final_conv(x)
return x
class UpConv(nn.Module):
def __init__(self, input_channels, output_channels):
super(UpConv, self).__init__()
self.upsample = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True)
self.conv1 = nn.Conv2d(in_channels=input_channels, out_channels=output_channels, kernel_size=3, padding=1)
self.batch_norm1 = nn.BatchNorm2d(output_channels)
self.conv2 = nn.Conv2d(in_channels=output_channels, out_channels=output_channels, kernel_size=3, padding=1)
self.batch_norm2 = nn.BatchNorm2d(output_channels)
def forward(self, x1: torch.Tensor, x2: torch.Tensor):
x1 = self.upsample(x1)
# dim 0 is batch dimension, dim 1 is channel dimension.
x1 = torch.cat([x2, x1], dim=1)
x1 = F.relu(self.batch_norm1(self.conv1(x1)))
x1 = F.relu(self.batch_norm2(self.conv2(x1)))
return x1
```
#### File: perception/training/models.py
```python
import torch
from torchvision.models.segmentation.deeplabv3 import DeepLabHead
from torchvision.models.segmentation.fcn import FCNHead
from torchvision import models
def createDeepLabv3(outputchannels=1, backbone="resnet50", pretrained=True):
"""DeepLabv3 class with custom head
Args:
outputchannels (int, optional): The number of output channels
in your dataset masks. Defaults to 1.
Returns:
model: Returns the DeepLabv3 model with the ResNet101 backbone.
"""
if backbone == "resnet50":
print("DeepLabv3: Using resnet50 as backbone")
model = models.segmentation.deeplabv3_resnet50(pretrained=pretrained,
progress=True)
model.classifier = DeepLabHead(2048, outputchannels)
elif backbone == "mobilenet":
print("DeepLabv3: Using mobilenet as backbone")
model = models.segmentation.deeplabv3_mobilenet_v3_large(pretrained=pretrained, progress=True)
model.classifier = DeepLabHead(960, outputchannels)
else:
print("DeepLabv3: Using resnet101 as backbone")
model = models.segmentation.deeplabv3_resnet101(pretrained=pretrained,
progress=True)
model.classifier = DeepLabHead(2048, outputchannels)
model.aux_classifier = None
#for param in model.parameters():
# param.requires_grad = False
return model
def createFCN(outputchannels=1, backbone="resnet50", pretrained=True):
if backbone == "resnet50":
print("FCN: Using resnet50 as backbone")
model = models.segmentation.fcn_resnet50(pretrained=pretrained, progress=True,
num_classes=21, aux_loss=False)
else:
print("FCN: Using resnet101 as backbone")
model = models.segmentation.fcn_resnet101(pretrained=pretrained, progress=True,
num_classes=21, aux_loss=False)
model.aux_classifier = None
#for param in model.parameters():
# param.requires_grad = False
model.classifier = FCNHead(2048, outputchannels)
return model
def createUNet():
from perception.unet.unet_model import UNet
model = UNet(n_channels=3, n_classes=1, bilinear=True)
return model
def createUNetResNet():
import segmentation_models_pytorch as smp
model = smp.Unet(encoder_name="resnet34", encoder_weights="imagenet", in_channels=3, classes=1,
activation="sigmoid")
return model
def createUNetResNetSemSeg(n_classes):
import segmentation_models_pytorch as smp
model = smp.Unet(encoder_name="resnet50", encoder_weights="imagenet", in_channels=3, classes=n_classes,
activation="softmax2d")
return model
def createMidas(use_large_model=True):
if use_large_model:
midas = torch.hub.load("intel-isl/MiDaS", "MiDaS")
else:
midas = torch.hub.load("intel-isl/MiDaS", "MiDaS_small")
return midas
"""
midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms")
if use_large_model:
transform = midas_transforms.default_transform
else:
transform = midas_transforms.small_transform
import cv2
import urllib.request
import numpy as np
import matplotlib.pyplot as plt
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
urllib.request.urlretrieve(url, filename)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
midas.to(device)
midas.eval()
img = cv2.imread("data/perception/test1/rgb/clear_noon_1823_463.png")
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
input_batch = transform(img).to(device)
with torch.no_grad():
prediction = midas(input_batch)
prediction = torch.nn.functional.interpolate(
prediction.unsqueeze(1),
size=img.shape[:2],
mode="bicubic",
align_corners=False,
).squeeze()
output = prediction.cpu().numpy()
plt.imshow(output)
plt.show()
"""
if __name__ == "__main__":
#createDeepLabv3(outputchannels=9, backbone="resnet50", pretrained=True)
createMidas()
#createFCN(outputchannels=9, backbone="resnet101", pretrained=True)
```
#### File: perception/utils/helpers.py
```python
import os
import time
from multiprocessing.dummy import Pool
from pathlib import Path
import cv2
import numpy as np
def convert_segmentation_images(input_folder, output_folder, classes_array):
filenames = os.listdir(input_folder)
verify_folder_exists(output_folder)
i = 0
for filename in filenames:
# In BGR format
img = cv2.cvtColor(cv2.imread(input_folder + "/" + filename), cv2.COLOR_BGR2RGB)
output_img = np.zeros(img.shape)
for j, class_colors in enumerate(classes_array):
for color in class_colors:
mask = (img == color).all(axis=2)
output_img[mask] = [j + 1, 0, 0]
write_path = str(output_folder + "/" + filename)
cv2.imwrite(write_path, output_img)
i += 1
if i % 100 == 0:
print("Progress: ", i, " of ", len(filenames))
def read_and_crop_image(input_dir, output_dir, filename, counter):
try:
file_path = input_dir + "/" + filename
write_path = os.path.join(output_dir, filename)
if ".npz" in filename:
try:
img = np.load(file_path)["arr_0"]
img = img.reshape((img.shape[0], img.shape[1], 1)) * 255 * 3 # 1 Channel
img = img.astype("uint8")
except Exception as e:
print("Exception:", e)
raise e
else:
img = cv2.imread(file_path)
try:
cv2.imwrite(write_path.replace("npz", "jpg"), crop_and_resize_img(img))
except Exception as e:
print("Exception on write:", e)
raise e
if counter % 500 == 0:
print("Progress:", counter)
except Exception as e:
print("Exception:", e)
def crop_and_resize(input_dir, output_dir):
verify_folder_exists(Path(output_dir))
filenames = list(os.listdir(input_dir))
print("output_dir", output_dir)
print("Processing {} images".format(len(filenames)))
with Pool(processes=8) as pool: # this should be the same as your processor cores (or less)
chunksize = 56 # making this larger might improve speed (less important the longer a single function call takes)
print("chunksize", chunksize)
result = pool.starmap_async(read_and_crop_image, # function to send to the worker pool
((input_dir, output_dir, file, i) for i, file in enumerate(filenames)),
# generator to fill in function args
chunksize) # how many jobs to submit to each worker at once
while not result.ready(): # print out progress to indicate program is still working.
# with counter.get_lock(): #you could lock here but you're not modifying the value, so nothing bad will happen if a write occurs simultaneously
# just don't `time.sleep()` while you're holding the lock
time.sleep(.1)
print('\nCompleted all images')
def crop_and_resize_img(img):
side_len = min(img.shape[0], img.shape[1])
side_len -= side_len % 32
cropped_img = img[0:side_len, 0:side_len]
return cv2.resize(cropped_img, (288, 256), interpolation=cv2.INTER_NEAREST)
def verify_folder_exists(path):
if not os.path.exists(str(path)):
os.makedirs(str(path))
def get_segmentation_tensor(image: np.ndarray, classes):
n_classes = len(classes) + 1
height, width, _ = image.shape
segmentation_labels = np.zeros((height, width, n_classes))
image = image[:, :, 0]
for i, c in enumerate(classes):
segmentation_labels[:, :, i] = (image == c).real
filter = (image == c).real
segmentation_labels[:, :, -1] = np.logical_or(segmentation_labels[:, :, -1], filter)
segmentation_labels[:, :, -1] = np.logical_not(segmentation_labels[:, :, -1])
return segmentation_labels
if __name__ == '__main__':
semantic_image = "data/perception/test1/segmentation/clear_noon_1228_163.png"
rgb_image = "data/perception/test1/rgb/clear_noon_1228_163.png"
rgb = cv2.cvtColor(cv2.imread(rgb_image), cv2.COLOR_BGR2RGB)
classes = [8, 7, 6, 4, 10, 5, 18, 14]
semantic_img = get_segmentation_tensor(cv2.cvtColor(cv2.imread(semantic_image), cv2.COLOR_BGR2RGB),
classes=classes)
import matplotlib.pyplot as plt
from perception.utils.visualization import get_rgb_segmentation, get_segmentation_colors
# plot_segmentation(semantic_img)
colors = get_segmentation_colors(len(classes) + 1, class_indxs=classes)
rgb_segmentation = get_rgb_segmentation(semantic_img, colors) / 255
plt.imshow(rgb_segmentation)
plt.show()
plt.imshow(rgb)
plt.show()
```
#### File: perception/utils/visualization.py
```python
import random
import matplotlib.pyplot as plt
import numpy as np
import torch
from perception.utils.segmentation_labels import CARLA_CLASSES, DEFAULT_CLASSES
def get_segmentation_colors(n_classes, only_random=False, class_indxs=None, color_seed=73):
assert only_random or class_indxs
random.seed(color_seed)
class_colors = []
if only_random:
for _ in range(n_classes):
class_colors.append((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)))
return class_colors
elif class_indxs:
for c in class_indxs:
class_colors.append(CARLA_CLASSES[c][1])
class_colors.append((0, 0, 0))
return class_colors
def get_rgb_segmentation(semantic_image: np.ndarray, class_colors):
"""
Creates a RGB image from a semantic image. Semantic image must have shape: (Height, Width, #Semantic Classes)
"""
height, width, n_classes = semantic_image.shape
semantic_image_rgb = np.zeros((height, width, 3))
semantic_pred_argmax = semantic_image.argmax(axis=2)
for c in range(n_classes):
semantic_image_rgb[:, :, 0] += ((semantic_pred_argmax[:, :] == c) * (class_colors[c][0])).astype('uint8')
semantic_image_rgb[:, :, 1] += ((semantic_pred_argmax[:, :] == c) * (class_colors[c][1])).astype('uint8')
semantic_image_rgb[:, :, 2] += ((semantic_pred_argmax[:, :] == c) * (class_colors[c][2])).astype('uint8')
return semantic_image_rgb
def display_images_horizontally(images, fig_width, fig_height, display=True, title=None, subplot_titles=None):
# Inspired from Hands-On Machine Learning with SciKit-learn, Keras and TensorFlow, page 574
# Displays the list of images horizontally.
def plot_image(image, cmap="binary"):
# todo: https://stackoverflow.com/questions/49643907/clipping-input-data-to-the-valid-range-for-imshow-with-rgb-data-0-1-for-floa
plt.imshow(image, cmap=cmap)
plt.axis("off")
# plt.show()
n_images = len(images)
if subplot_titles is not None:
assert len(subplot_titles) == n_images, "need a subtitle for every image"
if n_images > 0:
fig = plt.figure(figsize=(fig_width, fig_height))
for image_index in range(n_images):
image = images[image_index]
ax = plt.subplot(1, n_images, 1 + image_index)
if subplot_titles is not None:
ax.set_title(subplot_titles[image_index])
cmap = "binary" if len(images[image_index].shape) == 3 else "gray"
plot_image(image, cmap=cmap)
if title is not None:
fig.suptitle(title, fontsize="x-large")
if display:
fig.show()
array = get_np_array_from_figure(fig)
plt.close()
return array
def get_np_array_from_figure(fig):
"""Returns numpy rgb array from matplotlib figure"""
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return data
def display_originals_with_decoded(original_images, decoded_images, title=""):
# Inspired by Hands-On Machine Learning with SciKit-learn, Keras and TensorFlow, page 574.
# Meant to be used for visualization of target images and predicted images in multi-task learning.
# Target images displayed in top row, predicted images in row below.
def plot_image(image, cmap="binary"):
# todo: https://stackoverflow.com/questions/49643907/clipping-input-data-to-the-valid-range-for-imshow-with-rgb-data-0-1-for-floa
plt.imshow(image, cmap=cmap)
plt.axis("off")
# plt.show()
n_images = len(original_images)
if n_images > 0:
fig = plt.figure(figsize=(n_images * 1.2, 3))
fig.suptitle(title, fontsize=10)
for image_index in range(n_images):
cmap = "binary" if original_images[image_index].shape[-1] == 3 else "gray"
plt.subplot(2, n_images, 1 + image_index)
plot_image(original_images[image_index], cmap=cmap)
plt.subplot(2, n_images, 1 + n_images + image_index)
plot_image(decoded_images[image_index], cmap=cmap)
fig.show()
def show_predictions(model, inputs, device, semantic_classes, n_displays=1, title=""):
# input_image has size (Height, Width, N-Channels).
# Have to add batch dimension, and transpose it to able to make predictions
rgb_inputs, rgb_targets, semantic_targets, depth_targets = inputs[0].to(device), inputs[1].to(device), inputs[2].to(
device), inputs[3].to(device)
model.eval()
with torch.no_grad():
predictions = model(rgb_inputs)
# Send all predictions and target tensors to cpu
n_displays = min(n_displays, len(rgb_inputs))
rgb_preds, semantic_preds, depth_preds = [pred.cpu().numpy().transpose(0, 2, 3, 1)[:n_displays] for pred in
predictions]
rgb_targets = rgb_targets.cpu().numpy().transpose(0, 2, 3, 1)[:n_displays]
depth_targets = depth_targets.cpu().numpy().transpose(0, 2, 3, 1)[:n_displays]
semantic_targets = semantic_targets.cpu().numpy().transpose(0, 2, 3, 1)[:n_displays]
for i in range(n_displays):
rgb_pred = rgb_preds[i]
semantic_pred = semantic_preds[i]
depth_pred = depth_preds[i]
rgb_target = rgb_targets[i]
semantic_target = semantic_targets[i]
depth_target = depth_targets[i]
class_colors = get_segmentation_colors(n_classes=len(semantic_classes) + 1, class_indxs=semantic_classes)
semantic_pred_rgb = get_rgb_segmentation(semantic_image=semantic_pred, class_colors=class_colors)
semantic_target_rgb = get_rgb_segmentation(semantic_image=semantic_target, class_colors=class_colors)
semantic_pred_rgb = semantic_pred_rgb / 255
semantic_target_rgb = semantic_target_rgb / 255
# Setup original images for display
original_images = [rgb_target, semantic_target_rgb, depth_target]
# Setup decoded images for display
decoded_images = [rgb_pred, semantic_pred_rgb, depth_pred]
# Show rgb, semantic segmentation and depth images with corresponding predictions
display_originals_with_decoded(original_images=original_images, decoded_images=decoded_images, title=title)
def plot_image(image, title="", cmap="binary"):
# todo: https://stackoverflow.com/questions/49643907/clipping-input-data-to-the-valid-range-for-imshow-with-rgb-data-0-1-for-floa
plt.imshow(image, cmap=cmap)
plt.title(title)
plt.show()
def plot_segmentation(image: np.ndarray, title=None):
_, _, n_classes = image.shape
class_colors = get_segmentation_colors(n_classes=n_classes, class_indxs=DEFAULT_CLASSES)
semantic_image_rgb = get_rgb_segmentation(image, class_colors=class_colors) / 255
plot_image(semantic_image_rgb, title=title)
plt.show()
```
#### File: masters-thesis/perception/vehicle_spawner.py
```python
import math
import random
from typing import List
import glob
import os
import sys
from typing import Dict
import carla
class VehicleSpawner(object):
def __init__(self, client: carla.Client, world: carla.World, safe_mode=True):
self.client = client
self.world = world
self.spawn_points = self.world.get_map().get_spawn_points()
self.blueprints = self.world.get_blueprint_library().filter("vehicle.*")
self.blueprintsWalkers = world.get_blueprint_library().filter("walker.pedestrian.*")
self.vehicles_list: List[int] = []
self.walkers_list = []
self.all_id = []
self.all_actors = []
self.safe_mode = safe_mode
self._bad_colors = [
"255,255,255", "183,187,162", "237,237,237",
"134,134,134", "243,243,243", "127,130,135",
"109,109,109", "181,181,181", "140,140,140",
"181,178,124", "171,255,0", "251,241,176",
"158,149,129", "233,216,168", "233,216,168",
"108,109,126", "193,193,193", "227,227,227",
"151,150,125", "206,206,206", "255,222,218",
"211,211,211", "191,191,191"
] if safe_mode else []
def init_traffic_manager(self):
traffic_manager = self.client.get_trafficmanager(8000)
traffic_manager.set_global_distance_to_leading_vehicle(2.0)
traffic_manager.global_percentage_speed_difference(25.0)
traffic_manager.set_hybrid_physics_mode(True)
traffic_manager.set_synchronous_mode(True)
def spawn_nearby(self, hero_spawn_point_index, number_of_vehicles_min, number_of_vehicles_max,
number_of_walkers_min, number_of_walkers_max, radius):
number_of_vehicles = random.randint(number_of_vehicles_min, number_of_vehicles_max)
number_of_walkers = random.randint(number_of_walkers_min, number_of_walkers_max)
print(f"Attempting to spawn {number_of_vehicles} vehicles, {number_of_walkers} walkers")
valid_spawn_points = self.get_valid_spawn_points(hero_spawn_point_index, radius)
if self.safe_mode:
self.blueprints = [x for x in self.blueprints if int(x.get_attribute('number_of_wheels')) == 4]
self.blueprints = [x for x in self.blueprints if not x.id.endswith('isetta')]
self.blueprints = [x for x in self.blueprints if not x.id.endswith('carlacola')]
self.blueprints = [x for x in self.blueprints if not x.id.endswith('cybertruck')]
self.blueprints = [x for x in self.blueprints if not x.id.endswith('t2')]
self.blueprints = [x for x in self.blueprints if not x.id.endswith('coupe')]
number_of_spawn_points = len(valid_spawn_points)
if number_of_spawn_points > number_of_vehicles:
random.shuffle(valid_spawn_points)
elif number_of_vehicles > number_of_spawn_points:
msg = 'requested %d vehicles, but could only find %d spawn points'
number_of_vehicles = number_of_spawn_points
# @todo cannot import these directly.
SpawnActor = carla.command.SpawnActor
SetAutopilot = carla.command.SetAutopilot
FutureActor = carla.command.FutureActor
batch = []
for n, transform in enumerate(valid_spawn_points):
if n >= number_of_vehicles:
break
blueprint = random.choice(self.blueprints)
if blueprint.has_attribute('color'):
color = random.choice(blueprint.get_attribute('color').recommended_values)
while color in self._bad_colors:
color = random.choice(blueprint.get_attribute('color').recommended_values)
blueprint.set_attribute('color', color)
if blueprint.has_attribute('driver_id'):
driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values)
blueprint.set_attribute('driver_id', driver_id)
blueprint.set_attribute('role_name', 'autopilot')
batch.append(SpawnActor(blueprint, transform).then(SetAutopilot(FutureActor, True)))
for response in self.client.apply_batch_sync(batch, True):
if response.error:
print(f"Vehicle spawn error: {response.error}")
else:
self.vehicles_list.append(response.actor_id)
# -------------
# Spawn Walkers
# -------------
# some settings
percentagePedestriansRunning = 0.0 # how many pedestrians will run
percentagePedestriansCrossing = 0.0 # how many pedestrians will walk through the road
# 1. take all the random locations to spawn
spawn_points = []
for i in range(number_of_walkers):
spawn_point = carla.Transform()
loc = self.world.get_random_location_from_navigation()
if (loc != None):
spawn_point.location = loc
spawn_points.append(spawn_point)
# 2. we spawn the walker object
batch = []
walker_speed = []
for spawn_point in spawn_points:
walker_bp = random.choice(self.blueprintsWalkers)
# set as not invincible
if walker_bp.has_attribute('is_invincible'):
walker_bp.set_attribute('is_invincible', 'false')
# set the max speed
if walker_bp.has_attribute('speed'):
if (random.random() > percentagePedestriansRunning):
# walking
walker_speed.append(walker_bp.get_attribute('speed').recommended_values[1])
else:
# running
walker_speed.append(walker_bp.get_attribute('speed').recommended_values[2])
else:
print("Walker has no speed")
walker_speed.append(0.0)
batch.append(SpawnActor(walker_bp, spawn_point))
results = self.client.apply_batch_sync(batch, True)
walker_speed2 = []
for i in range(len(results)):
if results[i].error:
print(results[i].error)
else:
self.walkers_list.append({"id": results[i].actor_id})
walker_speed2.append(walker_speed[i])
walker_speed = walker_speed2
# 3. we spawn the walker controller
batch = []
walker_controller_bp = self.world.get_blueprint_library().find('controller.ai.walker')
for i in range(len(self.walkers_list)):
batch.append(SpawnActor(walker_controller_bp, carla.Transform(), self.walkers_list[i]["id"]))
results = self.client.apply_batch_sync(batch, True)
for i in range(len(results)):
if results[i].error:
print(results[i].error)
else:
self.walkers_list[i]["con"] = results[i].actor_id
# 4. we put altogether the walkers and controllers id to get the objects from their id
for i in range(len(self.walkers_list)):
self.all_id.append(self.walkers_list[i]["con"])
self.all_id.append(self.walkers_list[i]["id"])
self.all_actors = self.world.get_actors(self.all_id)
# tick to ensure client receives the last transform of the walkers we have just created
self.world.tick()
# 5. initialize each controller and set target to walk to (list is [controler, actor, controller, actor ...])
# set how many pedestrians can cross the road
self.world.set_pedestrians_cross_factor(percentagePedestriansCrossing)
for i in range(0, len(self.all_id), 2):
# start walker
self.all_actors[i].start()
# set walk to random point
self.all_actors[i].go_to_location(self.world.get_random_location_from_navigation())
# max speed
self.all_actors[i].set_max_speed(float(walker_speed[int(i / 2)]))
print(f'Spawned {len(self.vehicles_list):d} vehicles and {len(self.walkers_list):d} walkers,')
def get_valid_spawn_points(self, hero_spawn_point_index, radius):
hero_spawn_point = self.spawn_points[hero_spawn_point_index]
hero_x = hero_spawn_point.location.x
hero_y = hero_spawn_point.location.y
valid_spawn_points = []
for spawn_point in self.spawn_points:
# Distance between spaw points
loc = hero_spawn_point.location
dx = spawn_point.location.x - loc.x
dy = spawn_point.location.y - loc.y
distance = math.sqrt(dx * dx + dy * dy)
min_distance = 10
if spawn_point == hero_spawn_point or distance < min_distance:
continue
if radius != 0:
x = spawn_point.location.x
y = spawn_point.location.y
yaw = spawn_point.rotation.yaw
angle_diff = hero_spawn_point.rotation.yaw - yaw
angle_diff = abs((angle_diff + 180) % 360 - 180)
if abs(hero_x - x) <= radius and abs(hero_y - y) <= radius and angle_diff < 50:
valid_spawn_points.append(spawn_point)
else:
valid_spawn_points.append(spawn_point)
return valid_spawn_points
def destroy_vehicles(self):
print(f'Destroying {len(self.vehicles_list):d} vehicles.\n')
self.client.apply_batch_sync([carla.command.DestroyActor(x) for x in self.vehicles_list], True)
self.vehicles_list.clear()
# stop walker controllers (list is [controller, actor, controller, actor ...])
for i in range(0, len(self.all_id), 2):
self.all_actors[i].stop()
print('\ndestroying %d walkers' % len(self.walkers_list))
self.client.apply_batch_sync([carla.command.DestroyActor(x) for x in self.all_id], True)
self.walkers_list = []
self.all_id = []
self.all_actors = []
```
#### File: training/ppo_utils/critic.py
```python
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from bird_view.models import common
from bird_view.models.birdview import spatial_softmax_base
STEPS = 5
SPEED_STEPS = 3
COMMANDS = 4
DT = 0.1
CROP_SIZE = 192
PIXELS_PER_METER = 5
class BirdViewCritic(common.ResnetBase):
def __init__(self, device, backbone='resnet18', input_channel=7, n_step=5, all_branch=False, **kwargs):
super().__init__(backbone=backbone, input_channel=input_channel, bias_first=False)
self.deconv = spatial_softmax_base()
self.value_pred = nn.ModuleList([
nn.Sequential(
nn.BatchNorm2d(64),
nn.Flatten(),
nn.Linear(64 * 48 * 48, 200),
nn.BatchNorm1d(200),
nn.ReLU(),
nn.Linear(200, 25),
nn.BatchNorm1d(25),
nn.ReLU(),
nn.Linear(25, 1)
) for _ in range(4)
])
self.all_branch = all_branch
self.transform = transforms.ToTensor()
self.one_hot = torch.FloatTensor(torch.eye(4))
self.device = device
def forward(self, bird_view, velocity, command):
h = self.conv(bird_view)
b, c, kh, kw = h.size()
# Late fusion for velocity
velocity = velocity[..., None, None, None].repeat((1, 128, kh, kw))
h = torch.cat((h, velocity), dim=1)
h = self.deconv(h)
value_preds = [value_pred(h) for value_pred in self.value_pred]
value_preds = torch.stack(value_preds, dim=1)
value_pred = common.select_branch(value_preds, command)
if self.all_branch:
return value_pred, value_preds
return value_pred
def evaluate(self, birdview, speed, command):
self.eval()
with torch.no_grad():
if self.all_branch:
state_value, _ = self.forward(birdview, speed, command)
else:
state_value = self.forward(birdview, speed, command)
return state_value.squeeze()
def prepare_data(self, birdview, speed, command):
_birdview = self.transform(birdview).to(self.device).unsqueeze(0)
_speed = torch.FloatTensor([speed]).to(self.device)
_command = self.one_hot[int(command) - 1].to(self.device).unsqueeze(0)
return _birdview, _speed, _command
class BirdViewCritic2(common.ResnetBase):
def __init__(self, device, backbone='resnet18', input_channel=7, n_step=5, all_branch=False, **kwargs):
super().__init__(backbone=backbone, input_channel=input_channel, bias_first=False)
self.value_pred = nn.ModuleList([
nn.Sequential(
nn.Linear(6400, 200),
nn.BatchNorm1d(200),
nn.ReLU(),
nn.Linear(200, 25),
nn.BatchNorm1d(25),
nn.ReLU(),
nn.Linear(25, 1)
) for _ in range(4)
])
self.flatten = nn.Flatten()
self.conv1 = nn.Conv2d(640, 256, 2)
self.batchnorm1 = nn.BatchNorm2d(256)
self.all_branch = all_branch
self.transform = transforms.ToTensor()
self.one_hot = torch.FloatTensor(torch.eye(4))
self.device = device
def forward(self, bird_view, velocity, command):
h = self.conv(bird_view)
b, c, kh, kw = h.size()
# Late fusion for velocity
velocity = velocity[..., None, None, None].repeat((1, 128, kh, kw))
h = torch.cat((h, velocity), dim=1)
h = self.flatten(nn.functional.relu(self.batchnorm1(self.conv1(h))))
value_preds = [value_pred(h) for value_pred in self.value_pred]
value_preds = torch.stack(value_preds, dim=1)
value_pred = common.select_branch(value_preds, command)
if self.all_branch:
return value_pred, value_preds
return value_pred
def evaluate(self, birdview, speed, command):
self.eval()
with torch.no_grad():
if self.all_branch:
state_value, _ = self.forward(birdview, speed, command)
else:
state_value = self.forward(birdview, speed, command)
return state_value.squeeze()
def prepare_data(self, birdview, speed, command):
_birdview = self.transform(birdview).to(self.device).unsqueeze(0)
_speed = torch.FloatTensor([speed]).to(self.device)
_command = self.one_hot[int(command) - 1].to(self.device).unsqueeze(0)
return _birdview, _speed, _command
class BirdViewCritic3(common.ResnetBase):
def __init__(self, device, backbone='resnet18', input_channel=7, n_step=5, all_branch=False, **kwargs):
super().__init__(backbone=backbone, input_channel=input_channel, bias_first=False)
self.deconv = spatial_softmax_base()
self.value_pred = nn.ModuleList([
nn.Sequential(
nn.BatchNorm2d(64),
nn.Conv2d(64, 32, kernel_size=3),
nn.MaxPool2d(kernel_size=(2, 2)),
nn.ReLU(),
nn.BatchNorm2d(32),
nn.Conv2d(32, 16, kernel_size=3),
nn.MaxPool2d(kernel_size=(2, 2), padding=(1, 1)),
nn.ReLU(),
nn.Flatten(),
nn.Linear(16 * 11 * 11, 200),
nn.BatchNorm1d(200),
nn.ReLU(),
nn.Linear(200, 25),
nn.BatchNorm1d(25),
nn.ReLU(),
nn.Linear(25, 1)
) for _ in range(4)
])
self.all_branch = all_branch
self.transform = transforms.ToTensor()
self.one_hot = torch.FloatTensor(torch.eye(4))
self.device = device
def forward(self, bird_view, velocity, command):
h = self.conv(bird_view)
b, c, kh, kw = h.size()
# Late fusion for velocity
velocity = velocity[..., None, None, None].repeat((1, 128, kh, kw))
h = torch.cat((h, velocity), dim=1)
h = self.deconv(h)
value_preds = [value_pred(h) for value_pred in self.value_pred]
value_preds = torch.stack(value_preds, dim=1)
value_pred = common.select_branch(value_preds, command)
if self.all_branch:
return value_pred, value_preds
return value_pred
def evaluate(self, birdview, speed, command):
self.eval()
with torch.no_grad():
if self.all_branch:
state_value, _ = self.forward(birdview, speed, command)
else:
state_value = self.forward(birdview, speed, command)
return state_value.squeeze()
def prepare_data(self, birdview, speed, command):
_birdview = self.transform(birdview).to(self.device).unsqueeze(0)
_speed = torch.FloatTensor([speed]).to(self.device)
_command = self.one_hot[int(command) - 1].to(self.device).unsqueeze(0)
return _birdview, _speed, _command
```
#### File: jostl/masters-thesis/view_benchmark_results_modified.py
```python
import re
from collections import defaultdict
import numpy as np
import pandas as pd
from terminaltables import DoubleTable
from pathlib import Path
def weather_table(weather_dict, path):
table_data = []
for weather, seeds in weather_dict.items():
successes = []
totals = []
collisions = []
collided_and_success = []
total_lights = []
total_lights_ran = []
for seed in seeds:
successes.append(seeds[seed]["success"])
totals.append(seeds[seed]["total"])
collisions.append(seeds[seed]["collided"])
collided_and_success.append(seeds[seed]["collided_and_success"])
total_lights.append(seeds[seed]["total_lights"])
total_lights_ran.append(seeds[seed]["total_lights_ran"])
successes = np.array(successes)
totals = np.array(totals)
collisions = np.array(collisions)
collided_and_success = np.array(collided_and_success)
total_lights = np.array(total_lights)
total_lights_ran = np.array(total_lights_ran)
success_rates = successes / totals * 100
lights_ran_rates = total_lights_ran / total_lights * 100
timeouts = totals - successes - collisions + collided_and_success
collision_rates = collisions / totals * 100
timeout_rates = timeouts / totals * 100
collided_and_success_rates= collided_and_success / totals * 100
for elem in abs(timeout_rates + collision_rates + success_rates - collided_and_success_rates):
assert 99.9 < elem < 100.1, "rates do not sum to 100"
if len(seeds) > 1:
table_data.append([weather, "%.1f ± %.1f" % (np.mean(success_rates), np.std(success_rates, ddof=1)),
"%d/%d" % (sum(successes), sum(totals)), ','.join(sorted(seeds.keys())),
"%.1f ± %.1f" % (np.mean(collision_rates), np.std(collision_rates, ddof=1)),
"%.1f ± %.1f" % (np.mean(timeout_rates), np.std(timeout_rates, ddof=1)),
"%.1f ± %.1f" % (np.mean(lights_ran_rates), np.std(lights_ran_rates, ddof=1)),
"%d" % np.sum(collided_and_success)])
else:
table_data.append([weather, "%.1f" % np.mean(success_rates), "%d/%d" % (sum(successes), sum(totals)),
','.join(sorted(seeds.keys())),
"%.1f" % collision_rates, "%.1f" % timeout_rates, "%.1f" % lights_ran_rates,
"%.d" % collided_and_success])
table_data = sorted(table_data, key=lambda row: row[0])
table_data = [('Weather', 'Success Rate %', 'Total', 'Seeds', "Collision %", "Timeout %", "Lights ran %",
"Collided+Success")] + table_data
table = DoubleTable(table_data, "Performance of %s" % path.name)
print(table.table)
def main(path_name, separate_seeds=False, create_weather_table=False):
performance = dict()
weather_dict = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))
path = Path(path_name)
for summary_path in path.glob('*/summary.csv'):
name = summary_path.parent.name
match = re.search('^(?P<suite_name>.*Town.*-v[0-9]+.*)_seed(?P<seed>[0-9]+)', name)
suite_name = match.group('suite_name')
seed = match.group('seed')
summary = pd.read_csv(summary_path)
if suite_name not in performance:
performance[suite_name] = dict()
collided_and_success_dataframe = np.logical_and(summary["success"], summary["collided"])
performance[suite_name][seed] = (summary['success'].sum(), len(summary), summary["collided"].sum(),
collided_and_success_dataframe.sum(),
summary["total_lights"].sum(), summary["total_lights_ran"].sum())
if create_weather_table:
# need to iterate over each route
for i in range(len(summary)):
weather_dict[summary["weather"][i]][seed]["success"] += summary["success"][i]
weather_dict[summary["weather"][i]][seed]["total"] += 1
weather_dict[summary["weather"][i]][seed]["collided"] += summary["collided"][i]
weather_dict[summary["weather"][i]][seed]["collided_and_success"] += np.logical_and(summary["success"][i], summary["collided"][i])
weather_dict[summary["weather"][i]][seed]["total_lights"] += summary["total_lights"][i]
weather_dict[summary["weather"][i]][seed]["total_lights_ran"] += summary["total_lights_ran"][i]
if create_weather_table:
weather_table(weather_dict, path)
return
table_data = []
for suite_name, seeds in performance.items():
if separate_seeds:
for seed in seeds:
successes, totals, collisions, collided_and_success, total_lights, total_lights_ran = np.array(seeds[seed])
success_rates = successes / totals * 100
lights_ran_rates = total_lights_ran / total_lights * 100
timeouts = totals - successes - collisions + collided_and_success
collision_rates = collisions / totals * 100
timeout_rates = timeouts / totals * 100
table_data.append(
[suite_name+"-seed-"+seed, "%.1f" % success_rates, "%d/%d" % (successes, totals),
','.join(seed),
"%.1f" % collision_rates, "%.1f" % timeout_rates, "%.1f" % lights_ran_rates,
"%d" % collided_and_success])
else:
successes, totals, collisions, collided_and_success, total_lights, total_lights_ran = np.array(list(zip(*seeds.values())))
success_rates = successes / totals * 100
lights_ran_rates = total_lights_ran / total_lights * 100
timeouts = totals - successes - collisions + collided_and_success
collision_rates = collisions / totals * 100
timeout_rates = timeouts / totals * 100
if len(seeds) > 1:
table_data.append([suite_name, "%.1f ± %.1f"%(np.mean(success_rates), np.std(success_rates, ddof=1)),
"%d/%d"%(sum(successes),sum(totals)), ','.join(sorted(seeds.keys())),
"%.1f ± %.1f"%(np.mean(collision_rates), np.std(collision_rates, ddof=1)),
"%.1f ± %.1f"%(np.mean(timeout_rates), np.std(timeout_rates, ddof=1)),
"%.1f ± %.1f"%(np.mean(lights_ran_rates), np.std(lights_ran_rates, ddof=1)),
"%d"%np.sum(collided_and_success)])
else:
table_data.append([suite_name, "%.1f"%np.mean(success_rates), "%d/%d"%(sum(successes),sum(totals)), ','.join(sorted(seeds.keys())),
"%.1f"%collision_rates, "%.1f"%timeout_rates, "%.1f"%lights_ran_rates, "%d"%collided_and_success])
table_data = sorted(table_data, key=lambda row: row[0])
table_data = [('Suite Name', 'Success Rate %', 'Total', 'Seeds', "Collision %", "Timeout %", "Lights ran %", "Collided+Success")] + table_data
table = DoubleTable(table_data, "Performance of %s"%path.name)
print(table.table)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--path', help='path of benchmark folder')
parser.add_argument("--separate-seeds", action="store_true")
parser.add_argument("--weather", action="store_true")
args = parser.parse_args()
main(args.path, args.separate_seeds, create_weather_table=args.weather)
``` |
{
"source": "jostmey/RWA",
"score": 2
} |
#### File: reber_grammar/dataset/input_data.py
```python
import os
import numpy as np
##########################################################################################
# Settings
##########################################################################################
# Reber grammar
#
states = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
transitions = {
1: [2, 7],
2: [3, 4],
3: [3, 4],
4: [5, 6],
5: [12],
6: [8, 9],
7: [8, 9],
8: [8, 9],
9: [10, 11],
10: [5, 6],
11: [12],
}
aliases = {
1: 'B', 2: 'T', 3: 'S', 4: 'X', 5: 'S', 6: 'X',
7: 'P', 8: 'T', 9: 'V', 10: 'P', 11: 'V', 12: 'E',
}
encoding = {'B': 0, 'E': 1, 'P': 2, 'S': 3, 'T': 4, 'V': 5, 'X': 6}
# Data dimensions
#
num_train = 10000
num_test = 10000
max_length = 50
num_features = 7
##########################################################################################
# Utilities
##########################################################################################
def make_chain():
chain = [1]
while chain[-1] != states[-1]:
choices = transitions[chain[-1]]
j = np.random.randint(len(choices))
chain.append(choices[j])
return chain
def valid_chain(chain):
if len(chain) == 0:
return False
if chain[0] != states[0]:
return False
for i in range(1, len(chain)):
if chain[i] not in transitions[chain[i-1]]:
return False
return True
def convert_chain(chain):
sequence = ''
for value in chain:
sequence += aliases[value]
return sequence
##########################################################################################
# Generate/Load dataset
##########################################################################################
# Make directory
#
_path = '/'.join(__file__.split('/')[:-1])
os.makedirs(_path+'/bin', exist_ok=True)
# Training data
#
if not os.path.isfile(_path+'/bin/xs_train.npy') or \
not os.path.isfile(_path+'/bin/ls_train.npy') or \
not os.path.isfile(_path+'/bin/ys_train.npy'):
xs_train = np.zeros((num_train, max_length, num_features))
ls_train = np.zeros(num_train)
ys_train = np.zeros(num_train)
for i in range(num_train):
chain = make_chain()
valid = 1.0
if np.random.rand() >= 0.5: # Randomly insert a single typo with proability 0.5
hybrid = chain
while valid_chain(hybrid):
chain_ = make_chain()
j = np.random.randint(len(chain))
j_ = np.random.randint(len(chain_))
hybrid = chain[:j]+chain_[j_:]
chain = hybrid
valid = 0.0
sequence = convert_chain(chain)
for j, symbol in enumerate(sequence):
k = encoding[sequence[j]]
xs_train[i,j,k] = 1.0
ls_train[i] = len(sequence)
ys_train[i] = valid
np.save(_path+'/bin/xs_train.npy', xs_train)
np.save(_path+'/bin/ls_train.npy', ls_train)
np.save(_path+'/bin/ys_train.npy', ys_train)
else:
xs_train = np.load(_path+'/bin/xs_train.npy')
ls_train = np.load(_path+'/bin/ls_train.npy')
ys_train = np.load(_path+'/bin/ys_train.npy')
# Test data
#
if not os.path.isfile(_path+'/bin/xs_test.npy') or \
not os.path.isfile(_path+'/bin/ls_test.npy') or \
not os.path.isfile(_path+'/bin/ys_test.npy'):
xs_test = np.zeros((num_test, max_length, num_features))
ls_test = np.zeros(num_test)
ys_test = np.zeros(num_test)
for i in range(num_test):
chain = make_chain()
valid = 1.0
if np.random.rand() >= 0.5: # Randomly insert a single typo with proability 0.5
hybrid = chain
while valid_chain(hybrid):
chain_ = make_chain()
j = np.random.randint(len(chain))
j_ = np.random.randint(len(chain))
hybrid = chain[:j]+chain_[j_:]
chain = hybrid
valid = 0.0
sequence = convert_chain(chain)
for j, symbol in enumerate(sequence):
k = encoding[sequence[j]]
xs_test[i,j,k] = 1.0
ls_test[i] = len(sequence)
ys_test[i] = valid
np.save(_path+'/bin/xs_test.npy', xs_test)
np.save(_path+'/bin/ls_test.npy', ls_test)
np.save(_path+'/bin/ys_test.npy', ys_test)
else:
xs_test = np.load(_path+'/bin/xs_test.npy')
ls_test = np.load(_path+'/bin/ls_test.npy')
ys_test = np.load(_path+'/bin/ys_test.npy')
``` |
{
"source": "JostMigenda/hop-SNalert-app",
"score": 2
} |
#### File: hop-SNalert-app/snews/generate.py
```python
from collections import namedtuple
import datetime
import logging
import os
import random
import time
import uuid
from dotenv import load_dotenv
from hop import Stream
from hop.plugins.snews import SNEWSHeartbeat, SNEWSObservation
logger = logging.getLogger("snews")
Detector = namedtuple("Detector", "detector_id location")
def generate_message(time_string_format, detectors, alert_probability=0.1):
"""Generate fake SNEWS alerts/heartbeats.
"""
detector = detectors[random.randint(0, len(detectors) - 1)]
if random.random() > alert_probability:
logging.debug(f"generating heartbeat from {detector.location} at {detector.detector_id}")
return SNEWSHeartbeat(
message_id=str(uuid.uuid4()),
detector_id=detector.detector_id,
sent_time=datetime.datetime.utcnow().strftime(time_string_format),
machine_time=datetime.datetime.utcnow().strftime(time_string_format),
location=detector.location,
status="On",
content="For testing",
)
else:
logging.debug(f"generating alert from {detector.location} at {detector.detector_id}")
return SNEWSObservation(
message_id=str(uuid.uuid4()),
detector_id=detector.detector_id,
sent_time=datetime.datetime.utcnow().strftime(time_string_format),
neutrino_time=datetime.datetime.utcnow().strftime(time_string_format),
machine_time=datetime.datetime.utcnow().strftime(time_string_format),
location=detector.location,
p_value=0.5,
status="On",
content="For testing",
)
def _add_parser_args(parser):
"""Parse arguments for broker, configurations and options
"""
parser.add_argument('-v', '--verbose', action='count', default=0, help="Be verbose.")
parser.add_argument('-f', '--env-file', type=str, help="The path to the .env file.")
parser.add_argument("--no-auth", action="store_true", help="If set, disable authentication.")
parser.add_argument('-d', '--detector', type=str,
help=("Set a specific detector:location pair to simulate messages from. "
"If not set, generates messages from multiple random locations."))
parser.add_argument('--rate', type=float, default=0.5,
help="Rate to send alerts, default=0.5s")
parser.add_argument('--alert-probability', type=float, default=0.1,
help="Probability of generating an alert. Default = 0.1.")
parser.add_argument('-p',
'--persist',
action="store_true",
help="If set, persist and send messages indefinitely. "
"Otherwise send a single message.")
def main(args):
"""generate synthetic observation/heartbeat messages
"""
# set up logging
verbosity = [logging.WARNING, logging.INFO, logging.DEBUG]
logging.basicConfig(
level=verbosity[min(args.verbose, 2)],
format="%(asctime)s | model : %(levelname)s : %(message)s",
)
# load environment variables
load_dotenv(dotenv_path=args.env_file)
# choose set of detector/location pairs
if args.detector:
detectors = [Detector(*args.detector.split(":"))]
else:
detectors = [
Detector("DETECTOR 1", "Houston"),
Detector("DETECTOR 2", "Seattle"),
Detector("DETECTOR 3", "Los Angeles"),
]
# configure and open observation stream
logger.info("starting up stream")
stream = Stream(auth=(not args.no_auth))
source = stream.open(os.getenv("OBSERVATION_TOPIC"), "w")
# generate messages
logger.info(f"publishing messages to {os.getenv('OBSERVATION_TOPIC')}")
try:
# send one message, then persist if specified
message = generate_message(
os.getenv("TIME_STRING_FORMAT"),
detectors,
alert_probability=args.alert_probability,
)
source.write(message)
time.sleep(args.rate)
while args.persist:
message = generate_message(
os.getenv("TIME_STRING_FORMAT"),
detectors,
alert_probability=args.alert_probability,
)
source.write(message)
time.sleep(args.rate)
except KeyboardInterrupt:
pass
finally:
logger.info("shutting down")
source.close()
```
#### File: hop-SNalert-app/snews/model.py
```python
import datetime
import logging
import os
import uuid
from dotenv import load_dotenv
import jsonschema
from jsonschema import validate
from hop import Stream
from hop.plugins.snews import SNEWSAlert, SNEWSHeartbeat, SNEWSObservation
from . import decider
from . import msgSchema
logger = logging.getLogger("snews")
def _add_parser_args(parser):
"""Parse arguments for broker, configurations and options
"""
parser.add_argument('-v', '--verbose', action='count', default=0, help="Be verbose.")
parser.add_argument('-f', '--env-file', type=str, help="The path to the .env file.")
parser.add_argument("--no-auth", action="store_true", help="If set, disable authentication.")
def validateJson(jsonData, jsonSchema):
"""
Function for validate a json data using a json schema.
:param jsonData: the data to validate.
:param jsonSchema: the schema assumed to be correct.
:return: true or false
"""
try:
validate(instance=jsonData, schema=jsonSchema)
except jsonschema.exceptions.ValidationError:
return False
return True
class Model(object):
def __init__(self, args):
"""
The constructor of the model class.
:param args: the command line arguments
"""
# load environment variables
load_dotenv(dotenv_path=args.env_file)
self.args = args
self.gcnFormat = "json"
self.coinc_threshold = int(os.getenv("COINCIDENCE_THRESHOLD"))
self.msg_expiration = int(os.getenv("MSG_EXPIRATION"))
self.db_server = os.getenv("DATABASE_SERVER")
self.drop_db = bool(os.getenv("NEW_DATABASE"))
self.regularMsgSchema = msgSchema.regularMsgSchema
logger.info(f"setting up decider at: {self.db_server}")
self.myDecider = decider.Decider(
self.coinc_threshold,
self.msg_expiration,
os.getenv("TIME_STRING_FORMAT"),
os.getenv("DATABASE_SERVER"),
self.drop_db
)
if self.drop_db:
logger.info("clearing out decider cache")
self.deciderUp = False
# specify topics
self.observation_topic = os.getenv("OBSERVATION_TOPIC")
self.alert_topic = os.getenv("ALERT_TOPIC")
# open up stream connections
self.stream = Stream(auth=(not args.no_auth), persist=True)
self.source = self.stream.open(self.observation_topic, "r")
self.sink = self.stream.open(self.alert_topic, "w")
# message types and processing algorithms
self.mapping = {
SNEWSObservation.__name__: self.processObservationMessage,
SNEWSHeartbeat.__name__: self.processHeartbeatMessage
}
def run(self):
"""
Execute the model.
:return: none
"""
self.deciderUp = True
logger.info("starting decider")
logger.info(f"processing messages from {self.observation_topic}")
for msg, meta in self.source.read(batch_size=1, metadata=True, autocommit=False):
self.processMessage(msg)
self.source.mark_done(meta)
def close(self):
"""
Close stream connections.
"""
logger.info("shutting down")
self.deciderUp = False
self.source.close()
self.sink.close()
def addObservationMsg(self, message):
self.myDecider.addMessage(message)
def processMessage(self, message):
message_type = type(message).__name__
logger.debug(f"processing {message_type}")
if message_type in self.mapping:
self.mapping[message_type](message)
def processObservationMessage(self, message):
self.addObservationMsg(message)
alert = self.myDecider.deciding()
if alert:
# publish alert message to ALERT_TOPIC
logger.info("found coincidence, sending alert")
self.sink.write(self.writeAlertMsg())
def processHeartbeatMessage(self, message):
pass
def writeAlertMsg(self):
return SNEWSAlert(
message_id=str(uuid.uuid4()),
sent_time=datetime.datetime.utcnow().strftime(os.getenv("TIME_STRING_FORMAT")),
machine_time=datetime.datetime.utcnow().strftime(os.getenv("TIME_STRING_FORMAT")),
content="SNEWS Alert: a coincidence between detectors has been observed.",
)
def main(args):
"""main function
"""
# set up logging
verbosity = [logging.WARNING, logging.INFO, logging.DEBUG]
logging.basicConfig(
level=verbosity[min(args.verbose, 2)],
format="%(asctime)s | model : %(levelname)s : %(message)s",
)
# start up
model = Model(args)
try:
model.run()
except KeyboardInterrupt:
pass
finally:
model.close()
```
#### File: hop-SNalert-app/tests/test_decider.py
```python
from unittest.mock import patch
import mongomock
from snews import decider
@mongomock.patch(servers=(('localhost', 27017),))
def test_decider(mongodb):
decide = decider.Decider(
coinc_threshold=10,
msg_expiration=120,
datetime_format="%y/%m/%d %H:%M:%S",
mongo_server="mongodb://localhost:27017/",
drop_db=False,
)
with patch.object(decide.db, 'cache', mongodb.cache):
# check messages in cache
messages = list(decide.getCacheMessages())
assert mongodb.cache.count() == len(messages)
# check deciding functionality, should determine coincidence
assert decide.deciding()
```
#### File: hop-SNalert-app/tests/test.py
```python
import subprocess
from hop import Stream
from hop.auth import Auth
from hop import auth
from hop.io import StartPosition
from hop.models import GCNCircular
import argparse
import random
import threading
import time
from functools import wraps
import datetime
import numpy
import uuid
from dotenv import load_dotenv
import os
from unittest.mock import Mock
import unittest
from mongoengine import connect, disconnect
# from hypothesis import given
# from hypothesis.strategies import lists, integers
# from hop.apps.SNalert import model as M
# from hop.apps.SNalert import decider
# from hop.apps.SNalert import db_storage
# from . import demo
# from .. import test_anything
test_locations = ["Houston", "New York", "Boston", "Not Texas"]
# load environment variables
load_dotenv(dotenv_path='./../.env')
# for measuring function execution time
# https://stackoverflow.com/questions/3620943/measuring-elapsed-time-with-the-time-module
PROF_DATA = {}
def profile(fn):
@wraps(fn)
def with_profiling(*args, **kwargs):
start_time = time.time()
ret = fn(*args, **kwargs)
elapsed_time = time.time() - start_time
if fn.__name__ not in PROF_DATA:
PROF_DATA[fn.__name__] = [0, []]
PROF_DATA[fn.__name__][0] += 1
PROF_DATA[fn.__name__][1].append(elapsed_time)
return ret
return with_profiling
def print_prof_data():
for fname, data in PROF_DATA.items():
max_time = max(data[1])
avg_time = sum(data[1]) / len(data[1])
print("Function %s called %d times. " % (fname, data[0]))
print('Execution time max: %.3f, average: %.3f' % (max_time, avg_time))
def clear_prof_data():
global PROF_DATA
PROF_DATA = {}
def exponentialDistribution(mean):
"""
Produce exponential distribution data.
:param mean: Mean of exponential distribution.
:return:
"""
return numpy.random.exponential(mean)
class integrationTest(object):
# @given(
# timeout=integers(min_value=1),
# mean=integers(min_value=1),
# totalTime=integers(min_value=1)
# )
def __init__(self, timeout, mean, totalTime):
"""
The constructor.
:param timeout: Time expiration parameter
:param mean:
:param totalTime:
"""
self.count = 0
self.topic = os.getenv("OBSERVATION_TOPIC")
self.mean = mean
self.totalTime = totalTime
# self.minTime = min
# self.maxTime = max
self.timeOut = timeout
self.auth = Auth(os.getenv("USERNAME"), os.getenv("PASSWORD"), method=auth.SASLMethod.PLAIN)
def run(self):
"""
Run the model for the integration test.
:return: none
"""
t1 = threading.Thread(target=self.readNumMsg, args=(self.topic,))
t1.start()
m = subprocess.Popen(['python3',
'../hop/apps/SNalert/model.py',
'--f',
'./../config.env',
'--no-auth'
])
startTime = time.monotonic()
# randomly publish messages
while time.monotonic() - startTime < self.totalTime:
# randomTime = random.randint(self.minTime, self.maxTime)
randomTime = exponentialDistribution(self.mean)
start2 = time.monotonic()
while True:
if time.monotonic() - start2 > randomTime:
break
# write message with current time
now = datetime.datetime.utcnow().strftime(os.getenv("TIME_STRING_FORMAT"))
# newFileName = self.writeMessage(now)
stream = Stream(auth=self.auth)
with stream.open(os.getenv("TESTING_TOPIC"), "w") as s:
s.write(self.writeMessage(now))
m.kill()
def readNumMsg(self, topic):
"""
Read the number of alert messages.
:param topic:
:param configFilePath:
:return:
"""
# gcnFormat = "json"
stream = Stream(persist=True, auth=self.auth)
# print("===")
# print(topic)
with stream.open(topic, "r") as s:
for msg in s: # set timeout=0 so it doesn't stop listening to the topic
print("====")
# if gcn_dict['header']['subject'] == "TEST":
# self.count += 1
self.count += 1
def getCount(self):
return self.count
def writeMessage(self, time):
msg = {}
msg["header"] = {}
msg["header"]["MESSAGE ID"] = str(uuid.uuid4())
msg["header"]["DETECTOR"] = "Test Detector"
msg["header"]["SUBJECT"] = "Test"
msg["header"]["MESSAGE SENT TIME"] = time
msg["header"]["NEUTRINO TIME"] = time
msg["header"]["LOCATION"] = test_locations[random.randint(0, 3)]
msg["header"]["P VALUE"] = "0.5"
msg["header"]["STATUS"] = "On"
msg["header"]["MESSAGE TYPE"] = "Observation"
msg["header"]["FROM"] = "<NAME> <<EMAIL>>"
msg["body"] = "This is an alert message generated at run time for testing purposes."
return msg
# def functionalTest():
#
# pass
class latencyTest(object):
def __init__(self, topic, numDetector=50, time=3000):
"""
The constructor.
"""
self.numMsgPublished = 0
self.numMsgReceived = 0
self.totalLatency = 0
self.numDetector = numDetector
self.detectorThreads = {}
self.countMsg = {}
self.totalTime = time
self.topic = topic
self.auth = Auth(os.getenv("USERNAME"), os.getenv("PASSWORD"), method=auth.SASLMethod.PLAIN)
self.idsWritten = set()
self.idsReceived = set()
self.lock = threading.Lock()
def oneDetectorThread(self, uuid):
# lock = threading.Lock()
print(uuid)
# print(timeout)
startTime = time.monotonic()
# randomly publish messages
while time.monotonic() - startTime < self.totalTime:
# print(time.monotonic() - startTime)
# print(self.totalTime)
# msg = self.writeMessage(uuid)
stream = Stream(auth=self.auth)
with stream.open(self.topic, "w") as s:
msg = self.writeMessage(uuid)
s.write(msg)
with self.lock:
self.numMsgPublished += 1
self.idsWritten.add(msg["header"]["MESSAGE ID"])
# def countWrittenMsgThread(self):
def runTest(self):
"""
Run the latency test.
:return:
"""
# create the topic if doesn't exist
stream = Stream(auth=self.auth)
# with stream.open(self.topic, "w") as s:
# s.write({"TEST": "TEST"})
# first run the thread that logs every message received
logThread = threading.Thread(target=self.logMsgs)
logThread.start()
# wait a few seconds
startTime = time.monotonic()
# randomly publish messages
while time.monotonic() - startTime < 10:
foo = 1
for i in range(self.numDetector):
# print(i)
id = uuid.uuid4()
# print(id)
t = threading.Thread(target=self.oneDetectorThread, args=(str(id),))
# self.oneDetectorThread(id)
self.detectorThreads[id] = t
t.start()
# # first run the thread that logs every message received
# logThread = threading.Thread(target=self.logMsgs)
# logThread.start()
def countMsgThread(self, msg_dict):
"""
A single thread for process the message received for Latency test.
:param msg_dict:
:return:
"""
# msg_dict = msg.asdict()['content']
id = msg_dict['header']['DETECTOR']
msg_id = msg_dict["header"]["MESSAGE ID"]
receivedTime = datetime.datetime.utcnow().strftime(os.getenv("TIME_STRING_FORMAT"))
sentTime = msg_dict['header']['MESSAGE SENT TIME']
timeDiff = datetime.datetime.strptime(receivedTime, os.getenv("TIME_STRING_FORMAT")) - datetime.datetime.strptime(sentTime, os.getenv("TIME_STRING_FORMAT"))
timeDiff_inSeconds = timeDiff.total_seconds()
# print("HERE")
with self.lock:
# print("____")
self.numMsgReceived += 1
self.totalLatency += timeDiff_inSeconds
self.idsReceived.add(msg_id)
def logMsgs(self):
# stream = Stream(persist=True, auth=self.auth, start_at=StartPosition.EARLIEST)
stream = Stream(persist=True, auth=self.auth)
with stream.open(self.topic, "r") as s:
for msg in s: # set timeout=0 so it doesn't stop listening to the topic
t = threading.Thread(target=self.countMsgThread, args=(msg.asdict()['content'],))
t.start()
def calculateAvgLatency(self):
"""
Calculate the latency.
:return:
"""
return self.totalLatency * 1.0 / self.numMsgReceived
def writeMessage(self, detector_id):
"""
Return a dictionary of the message in the required format.
:param uuid:
:return:
"""
now = datetime.datetime.utcnow().strftime(os.getenv("TIME_STRING_FORMAT"))
msg = {}
msg["header"] = {}
msg["header"]["MESSAGE ID"] = str(uuid.uuid4())
msg["header"]["DETECTOR"] = detector_id
msg["header"]["SUBJECT"] = "Test"
msg["header"]["MESSAGE SENT TIME"] = now
msg["header"]["NEUTRINO TIME"] = now
msg["header"]["LOCATION"] = test_locations[random.randint(0, 3)]
msg["header"]["P VALUE"] = "0.5"
msg["header"]["STATUS"] = "On"
msg["header"]["MESSAGE TYPE"] = "Latency Testing"
msg["header"]["FROM"] = "<NAME> <<EMAIL>>"
msg["body"] = "This is an alert message generated at run time for testing message latency."
return msg
def check(self):
assert self.numMsgReceived == self.numMsgPublished
if __name__ == '__main__':
print("Latency Test")
print("----------------------------------------")
print("Integration Test #1")
test = latencyTest("kafka://dev.hop.scimma.org:9092/snews-latencyTest", 5, 50)
print(test.totalTime)
test.runTest()
print("------")
startTime = time.monotonic()
# randomly publish messages
while time.monotonic() - startTime < 100:
foo = 1
# print(time.monotonic() - startTime)
print(test.calculateAvgLatency())
print(" %d messages written." % test.numMsgPublished)
print(" %d messages received and read." % test.numMsgReceived)
# print(" %d messages written." % len(test.idsWritten))
# print(" %d messages received and read." % len(test.idsReceived))
# print(" %d messages read in written." % len(test.idsReceived.intersection(test.idsWritten)))
assert test.numMsgPublished == test.numMsgReceived
``` |
{
"source": "JosTorre/chordentlich",
"score": 3
} |
#### File: code/helpers/openssl.py
```python
import subprocess
import os
import time
def makeSha256FromPem(inputFile, passphrase="<PASSWORD>"):
#time.sleep(0.1)
pipe = subprocess.Popen("openssl pkey -pubout -passin pass:"+passphrase+" -inform PEM -outform DER -in "+inputFile+" -out "+inputFile+"temp.der | openssl dgst -sha256 -hex "+inputFile+"temp.der | sed 's/^.* //'", shell=True, close_fds=True, stdout=subprocess.PIPE).stdout
output = pipe.read().rstrip().decode("utf-8")
#try:
# os.remove("temp.der")
#except:
# pass
if len(output) != 64:
print("WARNING: OPENSSL OUTPUT IS: ", output)
# echo the same without regex to get full error message
pipe = subprocess.Popen("openssl pkey -pubout -passin pass:"+passphrase+" -inform PEM -outform DER -in "+inputFile+" -out "+inputFile+"temp.der | openssl dgst -sha256 -hex "+inputFile+"temp.der", shell=True, close_fds=True, stdout=subprocess.PIPE).stdout
output = pipe.read().rstrip().decode("utf-8")
print(output)
return None
else:
return int(str(output), 16) # convert to int
```
#### File: code/helpers/test_replica.py
```python
import unittest
import imp
from helpers.replica import Replica
class TestReplica(unittest.TestCase):
def test_property_get(self):
replica = Replica(10000)
k1 = replica.get_key(51, 1)
k2 = replica.get_key(23, 2)
self.assertEqual((replica.get_key_list(8, 8)[0:3]), (replica.get_key_list(8, 4)[0:3]))
self.assertEqual(len(replica.get_key_list(8, 3)), 3)
self.assertNotEqual(k1, k2) # keys do not collide hopefully at a ring size of 10000
self.assertEqual(k1, 7195)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jostosh/gan",
"score": 2
} |
#### File: gan/dcganmnist/mnist_ssl.py
```python
import tensorflow as tf
import tensorflow.contrib.distributions as tfd
from official.utils.flags import core as flags_core
import numpy as np
from absl import flags, app as absl_app
import os.path as opth
import tqdm
import os
from sklearn.utils import shuffle
layers = tf.keras.layers
def define_flags():
flags_core.define_base() # Defines batch_size and train_epochs
flags_core.define_image()
flags_core.set_defaults(
batch_size=32, train_epochs=100)
flags_core.flags.DEFINE_float(
name="lr", default=2e-4,
help="Learning rate")
flags_core.flags.DEFINE_float(
name="stddev", default=1e-2,
help="z standard deviation")
flags_core.flags.DEFINE_integer(
name="num_classes", default=10,
help="Number of classes")
flags_core.flags.DEFINE_integer(
name="z_dim_size", default=100,
help="Dimension of noise vector")
flags_core.flags.DEFINE_integer(
name="num_labeled_examples", default=400,
help="Number of labeled examples per class")
flags_core.flags.DEFINE_bool(
name="man_reg", default=False,
help="Manifold regularization")
def define_generator():
def conv2d_block(filters, upsample=True, activation=tf.nn.relu, index=0):
if upsample:
model.add(layers.UpSampling2D(name="UpSampling" + str(index), size=(2, 2)))
model.add(layers.Conv2D(
filters=filters, kernel_size=5, padding='same', name="Conv2D" + str(index),
activation=activation))
# From flat noise to spatial
model = tf.keras.models.Sequential(name="Generator")
model.add(layers.Dense(7 * 7 * 64, activation=tf.nn.relu, name="NoiseToSpatial"))
model.add(layers.Reshape((7, 7, 64)))
# Four blocks of convolutions, 2 that upsample and convolve, and 2 more that
# just convolve
conv2d_block(filters=128, upsample=True, index=0)
conv2d_block(filters=64, upsample=True, index=1)
conv2d_block(filters=64, upsample=False, index=2)
conv2d_block(filters=1, upsample=False, activation=tf.nn.tanh, index=3)
return model
class Discriminator:
def __init__(self):
"""The discriminator network. Split up in a 'tail' and 'head' network, so that we can
easily get the """
self.tail = self._define_tail()
self.head = self._define_head()
def _define_tail(self, name="Discriminator"):
"""Defines the network until the intermediate layer that can be used for feature-matching
loss."""
feature_model = tf.keras.models.Sequential(name=name)
def conv2d_dropout(filters, strides, index=0):
# Adds a convolution followed by a Dropout layer
suffix = str(index)
feature_model.add(layers.Conv2D(
filters=filters, strides=strides, name="Conv{}".format(suffix), padding='same',
kernel_size=5, activation=tf.nn.leaky_relu))
feature_model.add(layers.Dropout(name="Dropout{}".format(suffix), rate=0.3))
# Three blocks of convs and dropouts. They all have 5x5 kernels, leaky ReLU and 0.3
# dropout rate.
conv2d_dropout(filters=32, strides=2, index=0)
conv2d_dropout(filters=64, strides=2, index=1)
conv2d_dropout(filters=64, strides=1, index=2)
# Flatten it and build logits layer
feature_model.add(layers.Flatten(name="Flatten"))
return feature_model
def _define_head(self):
# Defines the remaining layers after the 'tail'
head_model = tf.keras.models.Sequential(name="DiscriminatorHead")
head_model.add(layers.Dense(units=10, activation=None, name="Logits"))
return head_model
@property
def trainable_variables(self):
# Return both tail's parameters a well as those of the head
return self.tail.trainable_variables + self.head.trainable_variables
def __call__(self, x, *args, **kwargs):
# By adding this, the code below can treat a Discriminator instance as a
# tf.keras.models.Sequential instance
features = self.tail(x, *args, **kwargs)
return self.head(features, *args, **kwargs), features
def accuracy(logits, labels):
"""Compute accuracy for this mini-batch """
preds = tf.argmax(logits, axis=1)
return tf.reduce_mean(tf.to_float(tf.equal(preds, labels)))
def main(_):
flags_obj = flags.FLAGS
with tf.Graph().as_default():
# Setup th einput pipeline
(images_lab, labels_lab), (images_unl, labels_unl), (images_unl2, labels_unl2), \
(images_test, labels_test) = prepare_input_pipeline(flags_obj)
with tf.name_scope("BatchSize"):
batch_size_tensor = tf.shape(images_lab)[0]
# Get the noise vectors
z, z_perturbed = define_noise(batch_size_tensor, flags_obj)
# Generate images from noise vector
with tf.name_scope("Generator"):
g_model = define_generator()
images_fake = g_model(z)
images_fake_perturbed = g_model(z_perturbed)
# Discriminate between real and fake, and try to classify the labeled data
with tf.name_scope("Discriminator") as discriminator_scope:
d_model = Discriminator()
logits_fake, features_fake = d_model(images_fake, training=True)
logits_fake_perturbed, _ = d_model(images_fake_perturbed, training=True)
logits_real_unl, features_real_unl = d_model(images_unl, training=True)
logits_real_lab, features_real_lab = d_model(images_lab, training=True)
logits_train, _ = d_model(images_lab, training=False)
# Set the discriminator losses
with tf.name_scope("DiscriminatorLoss"):
# Supervised loss, just cross-entropy. This normalizes p(y|x) where 1 <= y <= K
loss_supervised = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels_lab, logits=logits_real_lab))
# Sum of unnormalized log probabilities
logits_sum_real = tf.reduce_logsumexp(logits_real_unl, axis=1)
logits_sum_fake = tf.reduce_logsumexp(logits_fake, axis=1)
loss_unsupervised = 0.5 * (
tf.negative(tf.reduce_mean(logits_sum_real)) +
tf.reduce_mean(tf.nn.softplus(logits_sum_real)) +
tf.reduce_mean(tf.nn.softplus(logits_sum_fake)))
loss_d = loss_supervised + loss_unsupervised
if flags_obj.man_reg:
loss_d += 1e-3 * tf.nn.l2_loss(logits_fake - logits_fake_perturbed) \
/ tf.to_float(batch_size_tensor)
# Configure discriminator training ops
with tf.name_scope("Train") as train_scope:
optimizer = tf.train.AdamOptimizer(flags_obj.lr * 0.25)
optimize_d = optimizer.minimize(loss_d, var_list=d_model.trainable_variables)
train_accuracy_op = accuracy(logits_train, labels_lab)
with tf.name_scope(discriminator_scope):
with tf.control_dependencies([optimize_d]):
# Build a second time, so that new variables are used
logits_fake, features_fake = d_model(images_fake, training=True)
logits_real_unl, features_real_unl = d_model(images_unl2, training=True)
# Set the generator loss and the actual train op
with tf.name_scope("GeneratorLoss"):
feature_mean_real = tf.reduce_mean(features_real_unl, axis=0)
feature_mean_fake = tf.reduce_mean(features_fake, axis=0)
# L1 distance of features is the loss for the generator
loss_g = tf.reduce_mean(tf.abs(feature_mean_real - feature_mean_fake))
with tf.name_scope(train_scope):
optimizer = tf.train.AdamOptimizer(flags_obj.lr, beta1=0.5)
train_op = optimizer.minimize(loss_g, var_list=g_model.trainable_variables)
with tf.name_scope(discriminator_scope):
with tf.name_scope("Test"):
logits_test, _ = d_model(images_test, training=False)
test_accuracy_op = accuracy(logits_test, labels_test)
# Setup summaries
with tf.name_scope("Summaries"):
summary_op = tf.summary.merge([
tf.summary.scalar("LossDiscriminator", loss_d),
tf.summary.scalar("LossGenerator", loss_g),
tf.summary.image("GeneratedImages", images_fake),
tf.summary.scalar("ClassificationAccuracyTrain", train_accuracy_op),
tf.summary.scalar("ClassificationAccuracyTest", test_accuracy_op)])
writer = tf.summary.FileWriter(_next_logdir("tensorboard/mnist_ssl"))
writer.add_graph(tf.get_default_graph())
# Run training
steps_per_epoch = 50_000 // flags_obj.batch_size
steps_per_test = 10_000 // flags_obj.batch_size
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
step = 0
for epoch in range(flags_obj.train_epochs):
losses_d, losses_g, accuracies = [], [], []
print("Epoch {}".format(epoch))
pbar = tqdm.trange(steps_per_epoch)
for _ in pbar:
if step % 1000 == 0:
# Look Ma, no feed_dict!
_, loss_g_batch, loss_d_batch, summ, accuracy_batch = sess.run(
[train_op, loss_g, loss_d, summary_op, train_accuracy_op])
writer.add_summary(summ, global_step=step)
else:
_, loss_g_batch, loss_d_batch, accuracy_batch = sess.run(
[train_op, loss_g, loss_d, train_accuracy_op])
pbar.set_description("Discriminator loss {0:.3f}, Generator loss {1:.3f}"
.format(loss_d_batch, loss_g_batch))
losses_d.append(loss_d_batch)
losses_g.append(loss_g_batch)
accuracies.append(accuracy_batch)
step += 1
print("Discriminator loss: {0:.4f}, Generator loss: {1:.4f}, "
"Train accuracy: {2:.4f}"
.format(np.mean(losses_d), np.mean(losses_g), np.mean(accuracies)))
# Classify test data
accuracies = [sess.run(test_accuracy_op) for _ in range(steps_per_test)]
print("Test accuracy: {0:.4f}".format(np.mean(accuracies)))
def define_noise(batch_size_tensor, flags_obj):
# Setup noise vector
with tf.name_scope("LatentNoiseVector"):
z = tfd.Normal(loc=0.0, scale=flags_obj.stddev).sample(
sample_shape=(batch_size_tensor, flags_obj.z_dim_size))
z_perturbed = z + tfd.Normal(loc=0.0, scale=flags_obj.stddev).sample(
sample_shape=(batch_size_tensor, flags_obj.z_dim_size)) * 1e-5
return z, z_perturbed
def prepare_input_pipeline(flags_obj):
(train_x, train_y), (test_x, test_y) = tf.keras.datasets.mnist.load_data(
"/home/jos/datasets/mnist/mnist.npz")
def reshape_and_scale(x, img_shape=(-1, 28, 28, 1)):
return x.reshape(img_shape).astype(np.float32) / 255. * 2.0 - 1.0
# Reshape data and rescale to [-1, 1]
train_x = reshape_and_scale(train_x)
test_x = reshape_and_scale(test_x)
# Shuffle train data
train_x_unlabeled, train_y_unlabeled = shuffle(train_x, train_y)
# Select subset as supervised
train_x_labeled, train_y_labeled = [], []
for i in range(flags_obj.num_classes):
train_x_labeled.append(
train_x_unlabeled[train_y_unlabeled == i][:flags_obj.num_labeled_examples])
train_y_labeled.append(
train_y_unlabeled[train_y_unlabeled == i][:flags_obj.num_labeled_examples])
train_x_labeled = np.concatenate(train_x_labeled)
train_y_labeled = np.concatenate(train_y_labeled)
with tf.name_scope("InputPipeline"):
def train_pipeline(data, shuffle_buffer_size):
return tf.data.Dataset.from_tensor_slices(data)\
.cache()\
.shuffle(buffer_size=shuffle_buffer_size)\
.batch(flags_obj.batch_size)\
.repeat()\
.make_one_shot_iterator()
# Setup pipeline for labeled data
train_ds_lab = train_pipeline(
(train_x_labeled, train_y_labeled.astype(np.int64)),
flags_obj.num_labeled_examples * flags_obj.num_classes)
images_lab, labels_lab = train_ds_lab.get_next()
# Setup pipeline for unlabeled data
train_ds_unl = train_pipeline(
(train_x_unlabeled, train_y_unlabeled.astype(np.int64)), len(train_x_labeled))
images_unl, labels_unl = train_ds_unl.get_next()
# Setup another pipeline that also uses the unlabeled data, so that we use a different
# batch for computing the discriminator loss and the generator loss
train_x_unlabeled, train_y_unlabeled = shuffle(train_x_unlabeled, train_y_unlabeled)
train_ds_unl2 = train_pipeline(
(train_x_unlabeled, train_y_unlabeled.astype(np.int64)), len(train_x_labeled))
images_unl2, labels_unl2 = train_ds_unl2.get_next()
# Setup pipeline for test data
test_ds = tf.data.Dataset.from_tensor_slices((test_x, test_y.astype(np.int64)))\
.cache()\
.batch(flags_obj.batch_size)\
.repeat()\
.make_one_shot_iterator()
images_test, labels_test = test_ds.get_next()
return (images_lab, labels_lab), (images_unl, labels_unl), (images_unl2, labels_unl2), \
(images_test, labels_test)
def _next_logdir(path):
os.makedirs(path, exist_ok=True)
subdirs = [d for d in os.listdir(path) if opth.isdir(opth.join(path, d))]
logdir = opth.join(path, "run" + str(len(subdirs)).zfill(4))
os.makedirs(logdir)
return logdir
if __name__ == "__main__":
define_flags()
absl_app.run(main)
``` |
{
"source": "jostrm/azure-enterprise-scale-ml",
"score": 2
} |
#### File: esml/common/baselayer_python.py
```python
import inspect
import types
# https://stackoverflow.com/questions/6760685/creating-a-singleton-in-python
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
# https://github.com/dabeaz/python-cookbook/blob/master/src/9/multiple_dispatch_with_function_annotations/example1.py
class MultiMethod:
'''
Represents a single multimethod.
'''
def __init__(self, name):
self._methods = {}
self.__name__ = name
def register(self, meth):
'''
Register a new method as a multimethod
'''
sig = inspect.signature(meth)
# Build a type-signature from the method's annotations
types = []
for name, parm in sig.parameters.items():
if name == 'self':
continue
if parm.annotation is inspect.Parameter.empty:
raise TypeError(
'Argument {} must be annotated with a type'.format(name)
)
if not isinstance(parm.annotation, type):
raise TypeError(
'Argument {} annotation must be a type'.format(name)
)
if parm.default is not inspect.Parameter.empty:
self._methods[tuple(types)] = meth
types.append(parm.annotation)
self._methods[tuple(types)] = meth
def __call__(self, *args):
'''
Call a method based on type signature of the arguments
'''
types = tuple(type(arg) for arg in args[1:])
meth = self._methods.get(types, None)
if meth:
return meth(*args)
else:
raise TypeError('No matching method for types {}'.format(types))
def __get__(self, instance, cls):
'''
Descriptor method needed to make calls work in a class
'''
if instance is not None:
return types.MethodType(self, instance)
else:
return self
class MultiDict(dict):
'''
Special dictionary to build multimethods in a metaclass
'''
def __setitem__(self, key, value):
if key in self:
# If key already exists, it must be a multimethod or callable
current_value = self[key]
if isinstance(current_value, MultiMethod):
current_value.register(value)
else:
mvalue = MultiMethod(key)
mvalue.register(current_value)
mvalue.register(value)
super().__setitem__(key, mvalue)
else:
super().__setitem__(key, value)
class MultipleMeta(type):
'''
Metaclass that allows multiple dispatch of methods
'''
def __new__(cls, clsname, bases, clsdict):
return type.__new__(cls, clsname, bases, dict(clsdict))
@classmethod
def __prepare__(cls, clsname, bases):
return MultiDict()
import json
@staticmethod
def dump_jsonl(data, output_path, append=False):
"""
Write list of objects to a JSON lines file.
"""
mode = 'a+' if append else 'w'
with open(output_path, mode, encoding='utf-8') as f:
for line in data:
json_record = json.dumps(line, ensure_ascii=False)
f.write(json_record + '\n')
print('Wrote {} records to {}'.format(len(data), output_path))
@staticmethod
def load_jsonl(input_path) -> list:
"""
Read list of objects from a JSON lines file.
"""
data = []
with open(input_path, 'r', encoding='utf-8') as f:
for line in f:
data.append(json.loads(line.rstrip('\n|\r')))
print('Loaded {} records from {}'.format(len(data), input_path))
return data
@staticmethod
def dump_json_newlines(path_to_save, data):
with open(path_to_save, 'w') as outfile:
json.dump(data, outfile, indent=2)
from scipy.sparse import issparse
import pandas as pd
import numpy as np
@staticmethod
def convert_to_list(df_series_or_ndarray):
if issparse(df_series_or_ndarray):
# if array.shape[1] > 1000:
# raise ValueError("Exceeds maximum number of features for visualization (1000)")
return df_series_or_ndarray.toarray().tolist()
if (isinstance(df_series_or_ndarray, pd.DataFrame)):
return df_series_or_ndarray.values.tolist()
if (isinstance(df_series_or_ndarray, pd.Series)):
return df_series_or_ndarray.values.tolist()
if (isinstance(df_series_or_ndarray, np.ndarray)):
return df_series_or_ndarray.tolist()
return df_series_or_ndarray
```
#### File: esml/common/storage_factory.py
```python
from azureml.core import Datastore
#from azureml.core import Dataset
from azureml.data.dataset_factory import FileDatasetFactory
import json
import sys
import os
sys.path.append(os.path.abspath(".")) # NOQA: E402
from baselayer_azure import AzureBase
from pathlib import Path
#import repackage
#repackage.up()
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
#class LakeAccess(metaclass=Singleton):
class LakeAccess():
ws = None
storage_config = None
suppress_logging = False
datastore = None
service_client = None # WRITE to adls gen 2
project = None
def __init__(self,ws, esml_project): # **datasetNameKey_PathValue
self.ws = ws
self.project = esml_project
self.suppress_logging = esml_project._suppress_logging
self.ReloadConfiguration()
def ReloadConfiguration(self): # overwrite = True
old_loc = os.getcwd()
try:
os.chdir(os.path.dirname(__file__))
user_settings = ""
if(self.project.demo_mode == False):
user_settings = "../../"
with open("{}../settings/project_specific/security_config.json".format(user_settings)) as f:
self.storage_config = json.load(f)
except Exception as e:
print(e)
print("LakeAccess.ReloadConfiguration - could not open security_config.json ")
finally:
os.chdir(old_loc)
# ACL vs RBAC - https://docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-access-control-model
def GetLakeAsDatastore(self, setAsDefault=True):
datastore_name = self.storage_config['lake_datastore_name']
sp_id_key = self.storage_config['kv-secret-esml-projectXXX-sp-id']
sp_secret_key = self.storage_config['kv-secret-esml-projectXXX-sp-secret']
tenant = self.storage_config['tenant']
url = self.storage_config['external_keyvault_url'].format(self.project.dev_test_prod)
sa_name, rg_name, sub_id = self.project.getLakeForActiveEnvironment()
if(self.project.verbose_logging == True):
print("GetLakeAsDatastore: ws.name", self.ws.name)
print("GetLakeAsDatastore: tenant", tenant)
print("GetLakeAsDatastore: sp_id_key", sp_id_key)
#print("GetLakeAsDatastore: sp_secret_key", sp_secret_key)
print("GetLakeAsDatastore: get_external_keyvault", url)
print("GetLakeAsDatastore: sa_name", sa_name)
print("GetLakeAsDatastore: rg_name", rg_name)
print("GetLakeAsDatastore: sub_id", sub_id)
print("GetLakeAsDatastore.setAsDefault: setAsDefault: {}".format(setAsDefault))
external_kv, tenantId = AzureBase.get_external_keyvault(self.ws, sp_id_key, sp_secret_key, tenant, url)
file_system_name = self.storage_config['lake_fs']
if(self.suppress_logging==False):
print("Register ES-ML lake as Datastore, as {}".format(datastore_name))
# COMMON SP - to mount DataStore. to overcome "upload files" to GEN 2
secret_bundle1 = external_kv.get_secret(self.storage_config['esml-common-sp-id'], "")
secret_bundle2 = external_kv.get_secret(self.storage_config['esml-common-sp-secret'], "")
# ESML-fix: GA API to WRITE to GEN2 via Azure Storage SDK
self.service_client = AzureBase.initialize_storage_account_ad(sa_name, secret_bundle1.value, secret_bundle2.value, tenantId)
try:
ds = Datastore(self.ws, datastore_name) # Return if already exists
self.datastore = ds
if(ds != None):
if(setAsDefault):
ds.set_as_default()
return ds # Return Datastore....based on Authentication (Interactive or SP) in p.ws
except Exception as ex:
print("Datastore to common lake does not exists in AMLS workspace {}, creating it...{}".format(self.ws.name, datastore_name))
print(ex)
# Error & CONTINUE...No Datastore Lets create one. We need IAM: BLOB STORAGE CONTRIBUTOR, which COMMON-SP should have (Interactive Admin might alos have this, but not project-SP)
#2 API to READ
datastore = Datastore.register_azure_data_lake_gen2(workspace=self.ws,
datastore_name=datastore_name,
filesystem=file_system_name,
account_name=sa_name,
tenant_id=tenantId,
client_id=secret_bundle1.value, # COMMON-SP
client_secret=secret_bundle2.value, # COMMON-SP
grant_workspace_access=True,
subscription_id=sub_id,
resource_group=rg_name)
if(setAsDefault == True):
datastore.set_as_default()
self.datastore = datastore
return datastore
def GetBlobAsDatastore(self,setAsDefault=False):
datastore_name = self.storage_config['blob_datastore_name']
if(self.suppress_logging==False):
print("Register ES-ML BLOB as Datastore, as {}".format(datastore_name))
#if (self.project.RecreateDatastore):
try:
ds = Datastore(self.ws, datastore_name) # Return if already exists
self.datastore = ds
return ds
except:
print("No datastore with name {} exists, creating one...".format(datastore_name))
sa_name, rg_name, sub_id = self.project.getLakeForActiveEnvironment()
container_name = self.storage_config['lake_fs']
#2 Get SECRET from KeyVault - accesst to TEMP storage account
keyvault = self.ws.get_default_keyvault()
temp_blob_secret_key = self.storage_config['temp_blob_secret_key']
saKey = keyvault.get_secret(name=temp_blob_secret_key)
rg_name = self.project.common_rg_name
subscription_id = self.project._subscriptionId
datastore = Datastore.register_azure_blob_container(workspace=self.ws,
datastore_name=datastore_name, # ....usually = my_blob_name
container_name=container_name, # = data_store
account_name=sa_name,
account_key=saKey,
create_if_not_exists=False,
grant_workspace_access=True,
skip_validation=True,
subscription_id=sub_id,
resource_group=rg_name)
if(setAsDefault):
self.ws.set_default_datastore(datastore)
self.datastore = datastore
return datastore
def upload(self, file_name, local_folder_path, bronze_silver_gold_target_path,overwrite=True,use_dataset_factory = True):
storage_type_blob = self.storage_config["storage_type_blob"] # BLOB vs GEN2 # BLOB vs GEN2
data_folder = Path(local_folder_path) # path only to folder
local_file_fullpath = data_folder / file_name # Full filename
# BLOB
if(storage_type_blob):
self.datastore.upload(src_dir=local_folder_path, target_path=bronze_silver_gold_target_path, overwrite=overwrite) # BLOB
else: # GEN 2 (2 options)
use_dataset_factory = False
if(use_dataset_factory): # GEN 2 Alt 1: from azureml.data.dataset_factory import FileDatasetFactory
self.upload2_exp(local_folder_path, bronze_silver_gold_target_path,overwrite)
else: # GEN 2 Alt 2: from azure.storage.filedatalake import DataLakeServiceClient
filesystem = self.storage_config['lake_fs']
AzureBase.upload_file_to_directory(self.service_client, filesystem,file_name, local_file_fullpath, bronze_silver_gold_target_path,overwrite)
def upload2_exp(self, local_parent_folder, bronze_silver_gold_target_path,overwrite=True):
#print("Uploading to datastore: {} ".format(self.datastore))
#print("From: {} ".format(local_parent_folder))
#print("To: {} ".format(bronze_silver_gold_target_path))
FileDatasetFactory.upload_directory(src_dir=local_parent_folder, target=(self.datastore, bronze_silver_gold_target_path), pattern=None, overwrite=overwrite, show_progress=False)
#Dataset.File.upload_directory(src_dir=”./local/data”, target=(adlsgen2_datastore, path))
``` |
{
"source": "jostster/PythonTwitchBotFramework",
"score": 2
} |
#### File: twitchbot/builtin_commands/currency_commands.py
```python
from datetime import datetime, timedelta
from secrets import randbelow
from typing import Dict
from twitchbot import (
Arena,
ARENA_DEFAULT_ENTRY_FEE,
Command,
Message,
set_currency_name,
get_currency_name,
set_balance,
get_balance,
session,
get_balance_from_msg,
add_balance,
cfg,
InvalidArgumentsError,
Balance,
duel_expired,
add_duel,
accept_duel,
subtract_balance, get_duel)
PREFIX = cfg.prefix
MANAGE_CURRENCY_PERMISSION = 'manage_currency'
@Command('setcurrencyname', permission=MANAGE_CURRENCY_PERMISSION, syntax='<new_name>',
help='sets the channels currency name')
async def cmd_set_currency_name(msg: Message, *args):
if len(args) != 1:
raise InvalidArgumentsError(reason='missing required arguments', cmd=cmd_set_currency_name)
set_currency_name(msg.channel_name, args[0])
await msg.reply(
f"this channel's currency name is now \"{get_currency_name(msg.channel_name).name}\"")
@Command('getcurrencyname', help='get the channels current currency name')
async def cmd_get_currency_name(msg: Message, *ignored):
await msg.reply(
f'this channel\'s current currency name is "{get_currency_name(msg.channel_name).name}"')
@Command('bal', syntax='(target)', help='gets the caller\'s (or target\'s if specified) balance')
async def cmd_get_bal(msg: Message, *args):
if args:
target = args[0].lstrip('@')
if target not in msg.channel.chatters:
raise InvalidArgumentsError(reason=f'no viewer found by the name of "{target}"', cmd=cmd_get_bal)
else:
target = msg.author
currency_name = get_currency_name(msg.channel_name).name
balance = get_balance(msg.channel_name, target).balance
await msg.reply(whisper=True, msg=f'@{target} has {balance} {currency_name}')
@Command('setbal', permission=MANAGE_CURRENCY_PERMISSION, syntax='<new_balance> (target)',
help='sets the callers or targets balance')
async def cmd_set_bal(msg: Message, *args):
if not len(args):
raise InvalidArgumentsError(reason='missing required arguments', cmd=cmd_set_bal)
elif len(args) == 2:
target = args[1].lstrip('@')
if target not in msg.channel.chatters:
raise InvalidArgumentsError(reason=f'no viewer found by the name of "{args[1]}"', cmd=cmd_set_bal)
else:
target = msg.author
try:
new_balance = int(args[0])
if new_balance < 0:
raise InvalidArgumentsError(reason='new balance cannot be negative', cmd=cmd_set_bal)
set_balance(msg.channel_name, target, new_balance)
except ValueError:
raise InvalidArgumentsError(reason=f'target balance must be a integer. example: 100')
await msg.reply(
f'@{target} now has {args[0]} '
f'{get_currency_name(msg.channel_name).name}')
@Command('give', syntax='<target> <amount>',
help='gives the target the specified amount from the callers currency balance')
async def cmd_give(msg: Message, *args):
if len(args) != 2:
raise InvalidArgumentsError(reason='missing required arguments', cmd=cmd_give)
if not msg.mentions or msg.mentions[0] not in msg.channel.chatters:
raise InvalidArgumentsError(reason=f'no viewer found by the name "{(msg.mentions or args)[0]}"')
caller = get_balance_from_msg(msg)
target = get_balance(msg.channel_name, msg.mentions[0])
try:
give = int(args[1])
except ValueError:
raise InvalidArgumentsError(reason='give amount must be a integer, example: 100', cmd=cmd_give)
if give <= 0:
raise InvalidArgumentsError(reason='give amount must be 1 or higher', cmd=cmd_give)
cur_name = get_currency_name(msg.channel_name).name
if caller.balance < give:
raise InvalidArgumentsError(reason=f"{msg.mention} you don't have enough {cur_name}", cmd=cmd_give)
caller.balance -= give
target.balance += give
session.commit()
await msg.reply(
f"@{msg.author} you gave @{args[0]} {give} {cur_name}, @{args[0]}'s balance is now {target.balance}")
@Command('gamble', syntax='<dice_sides> <bet>',
help='throws a X sided die and if it rolls on 1 you get twice your bet + (bet*(6/<dice_sides>)), '
'if the dice sides are more than 6 you get more payout, '
'but it is also a lower chance to roll a 1.')
async def cmd_gamble(msg: Message, *args):
if len(args) != 2:
raise InvalidArgumentsError(reason='missing required arguments', cmd=cmd_gamble)
try:
sides = int(args[0])
bet = int(args[1])
except ValueError:
raise InvalidArgumentsError(reason='invalid value for sides or bet', cmd=cmd_gamble)
if bet < 10:
raise InvalidArgumentsError(reason='bet cannot be less then 10', cmd=cmd_gamble)
elif sides < 2:
raise InvalidArgumentsError(reason='sides cannot be less than 2', cmd=cmd_gamble)
bal = get_balance_from_msg(msg)
cur_name = get_currency_name(msg.channel_name).name
if bal.balance < bet:
raise InvalidArgumentsError(reason=f"{msg.mention} you don't have enough {cur_name}", cmd=cmd_gamble)
n = randbelow(sides) + 1
if n == 1:
if sides >= 6:
bet *= 2
gain = bet + int(bet * (sides / 6))
bal.balance += gain
await msg.reply(f'you rolled {n} and won {gain} {cur_name}')
else:
bal.balance -= bet
await msg.reply(f'you rolled {n} and lost your bet of {bet} {cur_name}')
session.commit()
last_mine_time = {}
mine_gain = 50
@Command('mine', help='mines for currency, gives you a predefined amount (default 50)')
async def cmd_mine(msg: Message, *args):
key = (msg.author, msg.channel_name)
diff = (datetime.now() - last_mine_time.get(key, datetime.now())).total_seconds()
if key not in last_mine_time or diff >= 0:
bal = get_balance_from_msg(msg)
bal.balance += mine_gain
session.commit()
last_mine_time[key] = datetime.now() + timedelta(minutes=5)
await msg.reply(
f'@{msg.author} you went to work at the mines and came out with '
f'{mine_gain} {get_currency_name(msg.channel_name).name} worth of gold',
whisper=True)
else:
await msg.reply(f'you cannot mine again for {int(abs(diff))} seconds', whisper=True)
@Command('top', help="lists the top 10 balance holders")
async def cmd_top(msg: Message, *args):
results = (session.query(Balance)
.filter(Balance.channel == msg.channel_name, Balance.user != msg.channel_name,
Balance.user != cfg.nick.lower())
.order_by(Balance.balance.desc())
.limit(10))
b: Balance
message = ' | '.join(f'{i}: {b.user} => {b.balance}' for i, b in enumerate(results, 1))
await msg.reply(message or 'no users found')
running_arenas: Dict[str, Arena] = {}
@Command('arena', syntax='<entry_fee>',
help='starts a arena match, waits a certain amount of time for ppl to enter, '
'if not enough ppl enter the arena is cancelled and everyone is refunded,'
'the winner gets all of the entry_fee\'s paid')
async def cmd_arena(msg: Message, *args):
def _can_pay_entry_fee(fee):
return get_balance(msg.channel_name, msg.author).balance >= fee
def _remove_running_arena_entry(arena: Arena):
try:
del running_arenas[arena.channel.name]
except KeyError:
pass
arena = running_arenas.get(msg.channel_name)
curname = get_currency_name(msg.channel_name).name
# arena is already running for this channel
if arena:
if msg.author in arena.users:
return await msg.reply(
whisper=True,
msg='you are already entered the in the arena')
elif not _can_pay_entry_fee(arena.entry_fee):
await msg.reply(
whisper=True,
msg=f'{msg.mention} you do not have enough {curname} '
f'to join the arena, entry_fee is {arena.entry_fee} {curname}')
return
arena.add_user(msg.author)
add_balance(msg.channel_name, msg.author, -arena.entry_fee)
await msg.reply(
whisper=True,
msg=f'{msg.mention} you have been added to the arena, '
f'you were charged {arena.entry_fee} {curname} for entry')
# start a new arena as one is not already running for this channel
else:
if args:
try:
entry_fee = int(args[0])
except ValueError:
raise InvalidArgumentsError(reason='invalid value for entry fee, example: 100', cmd=cmd_arena)
else:
entry_fee = ARENA_DEFAULT_ENTRY_FEE
if entry_fee and entry_fee < ARENA_DEFAULT_ENTRY_FEE:
raise InvalidArgumentsError(reason=f'entry fee cannot be less than {ARENA_DEFAULT_ENTRY_FEE}',
cmd=cmd_arena)
if not _can_pay_entry_fee(entry_fee):
await msg.reply(
whisper=True,
msg=f'{msg.mention} you do not have {entry_fee} {curname}')
return
arena = Arena(msg.channel, entry_fee, on_arena_ended_func=_remove_running_arena_entry)
arena.start()
arena.add_user(msg.author)
subtract_balance(msg.channel_name, msg.author, arena.entry_fee)
running_arenas[msg.channel_name] = arena
@Command('duel', syntax='<target_user> (amount, default: 10)',
help='challenges a user to a duel with the bid as the reward')
async def cmd_duel(msg: Message, *args):
if not args:
raise InvalidArgumentsError(reason='missing required arguments', cmd=cmd_duel)
target = args[0].lstrip('@')
if target == msg.author:
raise InvalidArgumentsError(reason='you cannot duel yourself', cmd=cmd_duel)
if target not in msg.channel.chatters:
raise InvalidArgumentsError(reason=f'{msg.mention} {target} is not in this channel', cmd=cmd_duel)
duel = get_duel(msg.channel_name, msg.author, target)
if duel and not duel_expired(duel):
raise InvalidArgumentsError(reason=f'{msg.mention} you already have a pending duel with {target}', cmd=cmd_duel)
try:
bet = int(args[1])
except ValueError:
raise InvalidArgumentsError(reason=f'invalid bet: {args[1]}, bet must be a number with no decimals, ex: 12',
cmd=cmd_duel)
except IndexError:
bet = 10
add_duel(msg.channel_name, msg.author, target, bet)
currency_name = get_currency_name(msg.channel_name).name
await msg.reply(
f'{msg.mention} has challenged @{target} to a duel for {bet} {currency_name}'
f', do "{cfg.prefix}accept {msg.mention}" to accept the duel')
@Command('accept', syntax='<challenger>', help='accepts a duel issued by the challenger that is passed to this command')
async def cmd_accept(msg: Message, *args):
if len(args) != 1:
raise InvalidArgumentsError('missing required arguments')
challenger = args[0].lstrip('@')
winner, bet = accept_duel(msg.channel_name, challenger, msg.author)
if not winner:
raise InvalidArgumentsError(
reason=f'{msg.mention}, you have not been challenged by {challenger}, or the duel might have expired',
cmd=cmd_accept)
loser = msg.author if winner == msg.author else challenger
add_balance(msg.channel_name, winner, bet)
subtract_balance(msg.channel_name, loser, bet)
currency_name = get_currency_name(msg.channel_name).name
await msg.reply(f'@{winner} has won the duel, {bet} {currency_name} went to the winner')
```
#### File: PythonTwitchBotFramework/twitchbot/command.py
```python
import os
import typing
from datetime import datetime
from importlib import import_module
from typing import Dict, Callable, Optional, List, Tuple
from .util import temp_syspath
from twitchbot.database import CustomCommand
from twitchbot.message import Message
from .config import cfg
from .enums import CommandContext
from .util import get_py_files, get_file_name
if typing.TYPE_CHECKING:
from .modloader import Mod
__all__ = (
'Command', 'commands', 'command_exist', 'load_commands_from_directory', 'DummyCommand', 'CustomCommandAction',
'ModCommand', 'SubCommand', 'get_command', 'CUSTOM_COMMAND_PLACEHOLDERS')
class Command:
def __init__(self, name: str, prefix: str = None, func: Callable = None, global_command: bool = True,
context: CommandContext = CommandContext.CHANNEL, permission: str = None, syntax: str = None,
help: str = None):
"""
:param name: name of the command (without the prefix)
:param prefix: prefix require before the command name (defaults the the configs prefix if None)
:param func: the function that the commands executes
:param global_command: should the command be registered globally?
:param context: the context through which calling the command is allowed
"""
self.help: str = help
self.syntax: str = syntax
self.permission: str = permission
self.context: CommandContext = context
self.prefix: str = (prefix if prefix is not None else cfg.prefix).lower()
self.func: Callable = func
self.name: str = name.lower()
self.fullname: str = self.prefix + self.name
self.sub_cmds: Dict[str, Command] = {}
self.parent: Command = None
if global_command:
commands[self.fullname] = self
def _get_cmd_func(self, args) -> Tuple['Callable', List[str]]:
"""returns a tuple of the final commands command function and the remaining argument"""
if not self.sub_cmds or not args or args[0].lower() not in self.sub_cmds:
return self.func, args
return self.sub_cmds[args[0].lower()]._get_cmd_func(args[1:])
# while verison:
# cmd = self
# while cmd.sub_cmds and args and args[0].lower() in cmd.sub_cmds:
# cmd = cmd.sub_cmds[args[0].lower()]
# args = args[1:]
#
# return cmd.func, args
async def execute(self, msg: Message):
func, args = self._get_cmd_func(msg.parts[1:])
await func(msg, *args)
# decorator support
def __call__(self, func) -> 'Command':
self.func = func
return self
def __str__(self):
return f'<{self.__class__.__name__} fullname={repr(self.fullname)} parent={self.parent}>'
def __getitem__(self, item):
return self.sub_cmds.get(item.lower()) or self.sub_cmds.get(item.lower()[1:])
class SubCommand(Command):
def __init__(self, parent: Command, name: str, func: Callable = None, permission: str = None, syntax: str = None,
help: str = None):
super().__init__(name=name, prefix='', func=func, permission=permission, syntax=syntax, help=help,
global_command=False)
self.parent: Command = parent
self.parent.sub_cmds[self.name] = self
class DummyCommand(Command):
def __init__(self, name: str, prefix: str = None, global_command: bool = True,
context: CommandContext = CommandContext.CHANNEL, permission: str = None, syntax: str = None,
help: str = None):
super().__init__(name=name, prefix=prefix, func=self.exec, global_command=global_command,
context=context, permission=permission, syntax=syntax, help=help)
async def exec(self, msg: Message, *args):
"""the function called when the dummy command is executed"""
if self.sub_cmds:
await msg.reply(f'command options: {", ".join(self.sub_cmds)}')
else:
await msg.reply('no sub-commands were found for this command')
def add_sub_cmd(self, name: str) -> 'DummyCommand':
"""adds a new DummyCommand to the current DummyCommand as a sub-command, then returns the new DummyCommand"""
cmd = DummyCommand(name, prefix='', global_command=False)
self.sub_cmds[cmd.fullname] = cmd
return cmd
def _calc_channel_live_time(msg) -> str:
if msg.channel.live:
return format((msg.channel.stats.started_at - datetime.now()).total_seconds() / 3600, '.1f')
return '[NOT LIVE]'
CUSTOM_COMMAND_PLACEHOLDERS = (
(
'%user',
lambda msg: f'@{msg.author}'
),
(
'%uptime',
_calc_channel_live_time
),
(
'%channel',
lambda msg: msg.channel_name
),
)
class CustomCommandAction(Command):
def __init__(self, cmd):
super().__init__(cmd.name, prefix='', func=self.execute, global_command=False)
self.cmd: CustomCommand = cmd
async def execute(self, msg: Message):
resp = self.cmd.response
for placeholder, func in CUSTOM_COMMAND_PLACEHOLDERS:
if placeholder in resp:
resp = resp.replace(placeholder, func(msg))
await msg.channel.send_message(resp)
class ModCommand(Command):
def __init__(self, mod_name: str, name: str, prefix: str = None, func: Callable = None, global_command: bool = True,
context: CommandContext = CommandContext.CHANNEL, permission: str = None, syntax: str = None,
help: str = None):
super().__init__(name, prefix, func, global_command, context, permission, syntax, help)
self.mod_name = mod_name
self.mod: 'Mod' = None
async def execute(self, msg: Message):
# circular dependency hack
from .modloader import mods
if self.mod is None:
self.mod = mods[self.mod_name]
func, args = self._get_cmd_func(msg.parts[1:])
if 'self' in func.__code__.co_varnames:
await func(self.mod, msg, *args)
else:
await func(msg, *args)
commands: Dict[str, Command] = {}
def load_commands_from_directory(path):
print(f'loading commands from {path}...')
path = os.path.abspath(path)
if not os.path.exists(path):
return
with temp_syspath(path):
for file in get_py_files(path):
fname = get_file_name(file)
mod = import_module(fname)
def command_exist(name: str) -> bool:
"""
returns a bool indicating if a command exists,
tries added a configs prefix to the name if not found initially,
does not check for custom commands
"""
return any(cmd in commands for cmd in (name, cfg.prefix + name))
def get_command(name: str) -> Optional[Command]:
"""
gets a commands,
tries added a configs prefix to the name if not found initally,
returns None if not exist, does not get custom commands
"""
return commands.get(name) or commands.get(cfg.prefix + name)
```
#### File: PythonTwitchBotFramework/twitchbot/message.py
```python
from typing import List, Tuple, TYPE_CHECKING, Optional
from .util import get_message_mentions
from twitchbot.channel import Channel, channels
from .irc import Irc
from .regex import RE_PRIVMSG, RE_WHISPER, RE_JOINED_CHANNEL, RE_USERNOTICE
from .enums import MessageType
from .util import split_message
from .tags import Tags
from .emote import emotes, Emote
if TYPE_CHECKING:
from .bots import BaseBot
class Message:
def __init__(self, msg, irc=None, bot=None):
self.channel: Optional[Channel] = None
self.author: Optional[str] = None
self.content: Optional[str] = None
self.parts: List[str] = []
self.type: MessageType = MessageType.NONE
self.raw_msg: str = msg
self.receiver: Optional[str] = None
self.irc: Irc = irc
self.tags: Optional[Tags] = None
self.emotes: List[Emote] = []
self.mentions: Tuple[str] = ()
self.bot: 'BaseBot' = bot
self._parse()
def _parse(self):
m = RE_USERNOTICE.search(self.raw_msg)
if m:
self.tags = Tags(m['tags'])
self.channel = channels[m['channel']]
self.author = self.tags.all_tags.get('login')
self.content = m['content']
if self.tags.msg_id in {'sub', 'resub', 'subgift', 'anonsubgift', 'submysterygift'}:
self.type = MessageType.SUBSCRIPTION
m = RE_PRIVMSG.search(self.raw_msg)
if m:
self.channel = channels[m['channel']]
self.author = m['user']
self.content = m['content']
self.type = MessageType.PRIVMSG
self.parts = split_message(self.content)
self.tags = Tags(m['tags'])
self.mentions = get_message_mentions(self)
m = RE_WHISPER.search(self.raw_msg)
if m:
self.author = m['user']
self.receiver = m['receiver']
self.content = m['content']
self.type = MessageType.WHISPER
self.parts = split_message(self.content)
m = RE_JOINED_CHANNEL.search(self.raw_msg)
if m:
self.channel = channels[m['channel']]
self.author = m['user']
self.type = MessageType.JOINED_CHANNEL
elif self.raw_msg == 'PING :tmi.twitch.tv':
self.type = MessageType.PING
if self.parts and any(p in emotes for p in self.parts):
self.emotes = tuple(emotes[p] for p in self.parts if p in emotes)
@property
def is_user_message(self):
return self.type in (MessageType.WHISPER, MessageType.PRIVMSG)
@property
def is_privmsg(self):
return self.type is MessageType.PRIVMSG
@property
def is_whisper(self):
return self.type is MessageType.WHISPER
@property
def is_subscription(self):
return self.type is MessageType.SUBSCRIPTION
@property
def mention(self):
return f'@{self.author}' if self.author else ''
@property
def channel_name(self):
if self.channel:
return self.channel.name
if self.author:
return self.author
return ''
async def reply(self, msg: str = '', whisper=False):
if not msg:
raise ValueError('msg is empty, msg must be a non-empty string')
if not isinstance(msg, str):
msg = str(msg)
if self.type is MessageType.PRIVMSG and not whisper:
await self.channel.send_message(msg)
elif self.type is MessageType.WHISPER or (whisper and self.type is MessageType.PRIVMSG):
if self.irc is None:
raise ValueError('no irc instance set for this message')
await self.irc.send_whisper(self.author, msg)
# else:
# raise ValueError(f'invalid message type to reply, expected PRIVMSG or WHISPER, current: {self.type}')
def __str__(self):
if self.type is MessageType.PRIVMSG:
return f'{self.author}({self.channel.name}): {self.content}'
elif self.type is MessageType.WHISPER:
return f'{self.author} -> {self.receiver}: {self.content}'
elif self.type is MessageType.PING:
return 'PING'
return self.raw_msg
def __len__(self):
"""
:return: the len() of self.parts
"""
return len(self.parts)
```
#### File: PythonTwitchBotFramework/twitchbot/modloader.py
```python
import os
import traceback
from inspect import isclass
from typing import Dict
from .util import temp_syspath, get_py_files, get_file_name
from .channel import Channel
from .command import Command
from .config import cfg
from .enums import Event
from .message import Message
from .disabled_mods import is_mod_disabled
from importlib import import_module
__all__ = ('ensure_mods_folder_exists', 'Mod', 'register_mod', 'trigger_mod_event', 'mods',
'load_mods_from_directory', 'mod_exists')
# noinspection PyMethodMayBeStatic
class Mod:
name = 'DEFAULT'
# region events
async def on_enable(self, channel: str):
"""
triggered when the mod is enabled
:param channel: the channel the mod is enabled in
"""
async def on_disable(self, channel: str):
"""
triggered when the mod is disabled
:param channel: the channel the mod is disabled in
"""
async def on_connected(self):
"""
triggered when the bot connects to all the channels specified in the config file
"""
async def on_raw_message(self, msg: Message):
"""
triggered the instant a message is received,
this message can be any message received,
including twitches messages that do not have any useful information
"""
async def on_privmsg_sent(self, msg: str, channel: str, sender: str):
"""
triggered when the bot sends a privmsg
"""
async def on_privmsg_received(self, msg: Message):
"""
triggered when a privmsg is received, is not triggered if the msg is a command
"""
async def on_whisper_sent(self, msg: str, receiver: str, sender: str):
"""
triggered when the bot sends a whisper to someone
"""
async def on_whisper_received(self, msg: Message):
"""
triggered when a user sends the bot a whisper
"""
async def on_permission_check(self, msg: Message, cmd: Command) -> bool:
"""
triggered when a command permission check is requested
:param msg: the message the command was found from
:param cmd: the command that was found
:return: bool indicating if the user has permission to call the command, True = yes, False = no
"""
return True
async def on_before_command_execute(self, msg: Message, cmd: Command) -> bool:
"""
triggered before a command is executed
:return bool, if return value is False, then the command will not be executed
"""
return True
async def on_after_command_execute(self, msg: Message, cmd: Command):
"""
triggered after a command has executed
"""
async def on_bits_donated(self, msg: Message, bits: int):
"""
triggered when a bit donation is posted in chat
"""
async def on_channel_joined(self, channel: Channel):
"""
triggered when the bot joins a channel
"""
async def on_channel_subscription(self, channel: Channel, msg: Message):
"""
triggered when a user subscribes
"""
# endregion
mods: Dict[str, Mod] = {}
def register_mod(mod: Mod) -> bool:
"""
registers a mod globally
:param mod: the mod to register
:return: if registration was successful
"""
if mod.name in mods:
return False
mods[mod.name] = mod
return True
async def trigger_mod_event(event: Event, *args, channel: str = None) -> list:
"""
triggers a event on all mods
if the channel is passed, the it is checked if the mod is enabled for that channel,
if not, the event for that mod is skipped
:param event: the event to raise on all the mods
:param args: the args to pass to the event
:param channel: the channel the event is being raised from
:return: the result of all the mod event calls in a list
"""
async def _missing_function(*ignored):
pass
output = []
for mod in mods.values():
if channel and is_mod_disabled(channel, mod.name):
continue
try:
output.append(await getattr(mod, event.value, _missing_function)(*args))
except Exception as e:
print(f'\nerror has occurred while triggering a event on a mod, details:\n'
f'mod: {mod.name}\n'
f'event: {event}\n'
f'error: {type(e)}\n'
f'reason: {e}\n'
f'stack trace:')
traceback.print_exc()
return output
def ensure_mods_folder_exists():
"""
creates the mod folder if it does not exists
"""
if not os.path.exists(cfg.mods_folder):
os.mkdir(cfg.mods_folder)
def load_mods_from_directory(fullpath):
"""
loads all mods from the given directory, only .py files are loaded
:param fullpath: the path to search for mods to load
"""
print('loading mods from:', fullpath)
with temp_syspath(fullpath):
for file in get_py_files(fullpath):
# we need to import the module to get its attributes
module = import_module(get_file_name(file))
for obj in module.__dict__.values():
# verify the obj is a class, is a subclass of Mod, and is not Mod class itself
if not isclass(obj) or not issubclass(obj, Mod) or obj is Mod:
continue
# create a instance of the mod subclass, then register it
register_mod(obj())
def mod_exists(mod: str) -> bool:
"""
returns of a mod exists
:param mod: the mod to check for
:return: bool indicating if the mod exists
"""
return mod in mods
```
#### File: PythonTwitchBotFramework/twitchbot/overrides.py
```python
from .enums import Event
overrides = {}
def override_event(event: Event):
def _func(func):
overrides[event] = func
return _func
```
#### File: twitchbot/util/misc_util.py
```python
import os
import sys
from contextlib import contextmanager
from glob import iglob
from typing import List
__all__ = ('get_py_files', 'get_file_name', 'temp_syspath')
def get_py_files(path: str) -> List[str]:
"""gets all python (.py) files in a folder"""
yield from iglob(os.path.join(path, '*.py'))
def get_file_name(path: str):
"""gets the files name without the extension"""
return os.path.basename(path).split('.')[0]
@contextmanager
def temp_syspath(fullpath):
"""
temporarily appends the fullpath to sys.path, yields, then removes it from sys.path
if the fullpath is already in sys.path the append/remove is skipped
"""
if not os.path.isabs(fullpath):
fullpath = os.path.abspath(fullpath)
if fullpath not in sys.path:
sys.path.insert(0, fullpath)
yield
sys.path.remove(fullpath)
else:
yield
``` |
{
"source": "JostTim/custom_libs",
"score": 3
} |
#### File: custom_libs/LibrairieVideoAna/ExtractFromDB.py
```python
import mysql.connector
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import calendar
import datetime
from datetime import timedelta
import random
def SessionsList(mouse, trainingset):
cnx = mysql.connector.connect(host="127.0.0.1",user="Tim",passwd="<PASSWORD>!",db="maze")
cursor = cnx.cursor()
query = ("""
SELECT distinct(session_id) FROM maze.mouses_sessions as ms
INNER JOIN mouses as mo ON mo.mouses_id = ms.mouses_id
where training_set_id = %s and mouse_number = %s
order by session_date
""")
Add= (trainingset,mouse)
cursor.execute(query, Add)
result=cursor.fetchall()
cursor.close()
SList=[y for x in result for y in x]
return SList
def SessionDetail(sessionid,arglist):
query = ("""
SELECT event_value FROM session_detail sd
WHERE session_id = %s AND event_value IN(""")
i = 0
for I in arglist :
if i >= len(arglist)-1:
query = query + str(I)
else:
query = query + str(I) + ", "
i = i+1
query = query + ") ORDER BY timestamp"
cnx = mysql.connector.connect(host="127.0.0.1",user="Tim",passwd="<PASSWORD>!",db="maze")
cursor = cnx.cursor()
Add= (sessionid,)
cursor.execute(query, Add)
result=cursor.fetchall()
cursor.close()
Dlist = [y for x in result for y in x]
return Dlist
def MouseList(trainingset):
cnx = mysql.connector.connect(host="127.0.0.1",user="Tim",passwd="<PASSWORD>!",db="maze")
cursor = cnx.cursor()
query = ("""
SELECT distinct(mouse_number) FROM maze.mouses_sessions as ms
INNER JOIN mouses as mo ON mo.mouses_id = ms.mouses_id
where training_set_id = %s
order by mouse_number""")
Add= (trainingset,)
cursor.execute(query, Add)
result=cursor.fetchall()
cursor.close()
Mlist = [y for x in result for y in x]
return Mlist
def TrainingsetName(trainingset):
cnx = mysql.connector.connect(host="127.0.0.1",user="Tim",passwd="<PASSWORD>!",db="maze")
cursor = self.cnx.cursor()
query = ("""
SELECT training_set_name, training_set_description FROM maze.training_set_def as ms
where training_set_id = %s""")
Add= (trainingset,)
cursor.execute(query, Add)
result=cursor.fetchall()
cursor.close()
Mlist = [y for x in result for y in x]
print("Select: {} : {} : {}".format(trainingset,Mlist[0],Mlist[1]))
return Mlist[0]
def _test():
assert SessionDetail(0,[1]) == []
if __name__ == '__main__':
_test()
```
#### File: LibrairieVideoCompress/VideoCompression/HirisSeqReader_V2.py
```python
import struct
import os
from termcolor import colored
import sys
import gc
import logging
import re
##Unused imports
#import csv
#import mmap
#import datetime
#import array
#import glob
#import time
##Extension libraries imports
import configparser
import numpy as np
import pyprind
from cv2 import VideoWriter, VideoWriter_fourcc, imread
#import matplotlib.pyplot as plt
#import matplotlib.image as mpimg
#from cv2 import cv
def Seq_to_Video(seq_path,output_folder,**kwargs):
"""
Lecture du fichier binaire de séquence sqb
Les données sont représentées par la structure en C suivante :
typedef struct
{
long offset; // 4 bits -> + 4 bits vides car mémoire alignée
double TimeStamp; // 8 bits
int binfile; // 4 bits -> + 4 bits vides car mémoire alignée
} IMGDATA;
"""
logger = logging.getLogger("Seq_to_Video")
logger.setLevel(logging.INFO)
if not os.path.exists(seq_path):
print(colored("INFO : File do not exist, in folder : {}".format(seq_path),"red"))
logger.error("INFO : File do not exist, in folder : {}".format(seq_path))
return False, "input file do not exist"
logger.info("Opening file {}".format(seq_path))
if "alerts" in kwargs:
alerts = kwargs.get("alerts")
else:
alerts = True
if "output_name" in kwargs:
output_name = kwargs.get("output_name")
else :
input_path = seq_path
path,file = os.path.split(input_path)
if file.endswith(".seq") and path != "":
output_name = os.path.basename(path)
else:
if path == "" or path == None:
output_name = file
# sys.exit("ERROR 2 INVALID_PATH : You must either specify a filename with : output_name = ""Nameofyourfile"" (better practice is doing it iteratively) or the path input to get the seq file, used in HirisSeqReader, with input_path = ""pathtoyourvideo"" ")
else :
if "\\" not in path:
output_name = path
else :
output_name = os.path.basename(path)
if "extension" in kwargs:
extension = kwargs.get("extension")
else:
if alerts :
print(colored("Using default extension (.avi) as none was specified","blue"))
logger.debug("Using default extension (.avi) as none was specified")
extension = ".avi"
if "fps" in kwargs:
fps = kwargs.get("fps")
else:
fps = 30
if alerts :
print(colored("Using default framerate (30 fps) as none was specified","blue"))
logger.debug("Using default framerate (30 fps) as none was specified")
if "codec" in kwargs:
codec = kwargs.get("codec")
else:
codec = "MJPG"
if alerts :
print(colored("Using default codec (MJPG) as none was specified","blue"))
logger.debug("Using default codec (MJPG) as none was specified")
if "color" in kwargs:
color = kwargs.get("color")
else:
color = False
if alerts :
print(colored("Interpreting data as greyscale images as no color info was specified","blue"))
logger.debug("Interpreting data as greyscale images as no color info was specified")
FullOutputPathname = os.path.join(output_folder,output_name+extension)
logger.debug(output_folder)
logger.debug(output_name)
logger.debug(FullOutputPathname)
if os.path.exists(FullOutputPathname):
print("Video {} Already Exist, searching next".format(output_name+".avi"))
logger.info("File {} already exist, skipping".format(FullOutputPathname))
return False, "output file already exist"
cfg = configparser.ConfigParser()
cfg.read(seq_path)
try:
width = int(cfg.get('Sequence Settings', 'Width'))
height = int(cfg.get('Sequence Settings', 'Height'))
bpp = int(cfg.get('Sequence Settings', 'BytesPerPixel'))
num_images = cfg.get('Sequence Settings', 'Number of files')
bin_file = cfg.get('Sequence Settings', 'Bin File')
sqb_path = seq_path.replace('.seq', '.sqb')
except Exception as e:
print(colored("Error : {} on file : {}".format(e,seq_path),"red"))
logger.error("Error : {} on file : {}".format(e,seq_path))
return False, "seq config read"
pathstr = os.path.dirname(seq_path)
if height < 10 or width < 10 :
logger.error("Error on file : {}".format(seq_path) + "Width or Heidth not compliant (<10)")
return False, "Dimension"
if int(num_images) < 10 :
#for files in os.path.dirname(seq_path) :
#QuickRegexp(files)
#if True:
# pass#ADD CODE HERE TO TEST IF FILE IS CORRUPTED OR SIMPLY END OF A SESSION
logger.error("Error on file : {}".format(seq_path) + "Number of frames not compliant (<10)")
return False, "Frames"
if not os.path.exists(output_folder):
try :
os.makedirs(output_folder)
except FileExistsError:
pass
size = width , height
fourcc = VideoWriter_fourcc(*codec)
vid = VideoWriter(FullOutputPathname, fourcc, fps, size, color)
# VideoArray = np.empty([height,width,int(num_images)])
print("Processing Sequence : {}".format(seq_path))
print("Video format : {} x {}".format(height,width))
print(colored("Writing to {}".format(FullOutputPathname),"green"))
bar = pyprind.ProgBar(int(num_images),bar_char='░')
with open(sqb_path,'rb') as f :
try :
for i in range(0, int(num_images)):
offset = struct.unpack('l', f.read(4))
#This variables are unused but file has to be read in a specific order to acess the valuable data
# padding = f.read(4)
# timestamp = struct.unpack('d', f.read(8))
f.read(4)
struct.unpack('d', f.read(8))
#End of unused variables
binfile = struct.unpack('i', f.read(4))
#This variables are unused but file has to be read in a specific order to acess the valuable data
# padding = f.read(4)
f.read(4)
#End of unused variables
# print(offset)
bin_path = "%s\\%s%0.5d.bin" % (pathstr, bin_file, binfile[0])
# tiff_file_path = "%s_%0.5d.tif" %(tiff_path, i)
f_bin = open(bin_path, 'rb')
f_bin.seek(offset[0], os.SEEK_SET)
bytes = f_bin.read(height*width*bpp)
if bpp == 2:
buffer = np.frombuffer(bytes, dtype=np.uint16)
else:
buffer = np.frombuffer(bytes, dtype=np.uint8)
nparr2 = buffer.reshape(height, width)
# cv2.imwrite(tiff_file_path, nparr2)
f_bin.close()
# imgplot = plt.imshow(nparr2,cmap='gray_r')
# plt.show(imgplot)
# print(np.shape(nparr2))
# input()
# VideoArray[:,:,i] = nparr2
vid.write(np.uint8(nparr2))
bar.update()
# for ImageIndex in range(np.size(VideoArray,2)):
# print(ImageIndex)
vid.release()
except Exception as e:
print(colored("Error : {} on file : {}".format(e,seq_path),"red"))
logger.error("Error : {} on file : {}".format(e,seq_path))
return False, "binary file I/O"
del bar
del cfg
# del VideoArray
gc.collect()
print()
print("Video compression {} sucessfull".format(seq_path))
logger.info("Video compression {} sucessfull".format(seq_path))
return True, "none"
def HirisSeqReader(seq_path):
"""
Lecture du fichier binaire de séquence sqb
Les données sont représentées par la structure en C suivante :
typedef struct
{
long offset; // 4 bits -> + 4 bits vides car mémoire alignée
double TimeStamp; // 8 bits
int binfile; // 4 bits -> + 4 bits vides car mémoire alignée
} IMGDATA;
"""
cfg = configparser.ConfigParser()
cfg.read(seq_path)
width = int(cfg.get('Sequence Settings', 'Width'))
height = int(cfg.get('Sequence Settings', 'Height'))
bpp = int(cfg.get('Sequence Settings', 'BytesPerPixel'))
num_images = cfg.get('Sequence Settings', 'Number of files')
bin_file = cfg.get('Sequence Settings', 'Bin File')
sqb_path = seq_path.replace('.seq', '.sqb')
pathstr = os.path.dirname(seq_path)
if height < 0 or width< 0 or int(num_images) < 0 :
return False
VideoArray = np.empty([height,width,int(num_images)])
print("Processing Sequence : {}".format(seq_path))
print("Video format : {} x {}".format(height,width))
bar = pyprind.ProgBar(int(num_images),bar_char='░')
with open(sqb_path,'rb') as f :
for i in range(0, int(num_images)):
offset = struct.unpack('l', f.read(4))
#This variables are unused but file has to be read in a specific order to acess the valuable data
# padding = f.read(4)
# timestamp = struct.unpack('d', f.read(8))
f.read(4)
struct.unpack('d', f.read(8))
#End of unused variables
binfile = struct.unpack('i', f.read(4))
#This variables are unused but file has to be read in a specific order to acess the valuable data
# padding = f.read(4)
f.read(4)
#End of unused variables
# print(offset)
bin_path = "%s\\%s%0.5d.bin" % (pathstr, bin_file, binfile[0])
# tiff_file_path = "%s_%0.5d.tif" %(tiff_path, i)
f_bin = open(bin_path, 'rb')
f_bin.seek(offset[0], os.SEEK_SET)
bytes = f_bin.read(height*width*bpp)
if bpp == 2:
buffer = np.frombuffer(bytes, dtype=np.uint16)
else:
buffer = np.frombuffer(bytes, dtype=np.uint8)
nparr2 = buffer.reshape(height, width)
# cv2.imwrite(tiff_file_path, nparr2)
f_bin.close()
# imgplot = plt.imshow(nparr2,cmap='gray_r')
# plt.show(imgplot)
# print(np.shape(nparr2))
# input()
VideoArray[:,:,i] = nparr2
bar.update()
del bar
del cfg
print("Video reading sucessfull")
return VideoArray
def VideoArrayWrite(VideoArray,output_folder,**kwargs):
if not os.path.exists(output_folder):
try :
os.makedirs(output_folder)
except FileExistsError:
pass
if "alerts" in kwargs:
alerts = kwargs.get("alerts")
else:
alerts = True
if "output_name" in kwargs:
output_name = kwargs.get("output_name")
else:
#ERROR 2 FLAG: LOOK FOR REASON HERE : BEGINING
if "input_path" in kwargs:
input_path = kwargs.get("input_path")
path,file = os.path.split(input_path)
if file.endswith(".seq") and path != "":
output_name = os.path.basename(path)
else:
if path == "" or path == None:
output_name = file
# sys.exit("ERROR 2 INVALID_PATH : You must either specify a filename with : output_name = ""Nameofyourfile"" (better practice is doing it iteratively) or the path input to get the seq file, used in HirisSeqReader, with input_path = ""pathtoyourvideo"" ")
else :
if "\\" not in path:
output_name = path
else :
output_name = os.path.basename(path)
else :
#ERROR 2 FLAG : LOOK FOR REASON HERE : END
sys.exit("ERROR 2 FILE_NOT_FOUND : You must either specify a filename with : output_name = ""Nameofyourfile"" (better practice is doing it iteratively) or the path input to get the seq file, used in HirisSeqReader, with input_path = ""pathtoyourvideo"" ")
print(output_name)
if "extension" in kwargs:
extension = kwargs.get("extension")
else:
if alerts :
print(colored("Using default extension (.avi) as none was specified","blue"))
extension = ".avi"
if "fps" in kwargs:
fps = kwargs.get("fps")
else:
fps = 30
if alerts :
print(colored("Using default framerate (30 fps) as none was specified","blue"))
if "codec" in kwargs:
codec = kwargs.get("codec")
else:
codec = "MJPG"
if alerts :
print(colored("Using default codec (MJPG) as none was specified","blue"))
if "color" in kwargs:
color = kwargs.get("color")
else:
color = False
if alerts :
print(colored("Interpreting data as greyscale images as no color info was specified","blue"))
FullOutputPathname = os.path.join(output_folder,output_name+extension)
size = np.shape(VideoArray)[1], np.shape(VideoArray)[0]
#np.size(VideoArray,1) , np.size(VideoArray,0)
bar = pyprind.ProgBar(np.shape(VideoArray)[2],bar_char='▓',title=f'\nWriting video at {FullOutputPathname}')
fourcc = VideoWriter_fourcc(*codec)
vid = VideoWriter(FullOutputPathname, fourcc, fps, size, True)
for ImageIndex in range(np.shape(VideoArray)[2]):
bar.update()
frame = VideoArray[:,:,ImageIndex].astype('uint8')
vid.write(np.repeat(frame[:,:,np.newaxis],3,axis = 2))
vid.release()
print(f"Video compression at {FullOutputPathname} sucessfull\n")
def Foldersearch(MainInputFolder,VideoName):
DirList = os.listdir(MainInputFolder)
NewDirlist=[]
for Subdir in DirList:
if os.path.exists(os.path.join(MainInputFolder,Subdir,VideoName)):
NewDirlist.append(os.path.join(MainInputFolder,Subdir,VideoName))
return NewDirlist
def RegFileSearch(MainInputFolder,regexp, ** kwargs):
File_List = os.listdir(MainInputFolder)
if "checkfile" in kwargs: #Care : this function considerably slows down the process and is only necessary in multifolder search
checkfile = kwargs.get("checkfile")
else :
checkfile = False
if checkfile:
check_list = []
for f in File_List:
if os.path.isfile(os.path.join(MainInputFolder, f)):
print("Checked")
check_list.append(f)
File_List = check_list
NewDirlist=[]
for File in File_List:
if QuickRegexp(File,regexp):
NewDirlist.append(os.path.join(MainInputFolder,File))
return NewDirlist
def BinarySearch(InputFolder,extension):
DirList = os.listdir(InputFolder)
NewDirlist=[]
try:
for Subdir in DirList:
FILE = os.path.join(InputFolder,Subdir)
if os.path.exists(FILE) and FILE.endswith(extension):
NewDirlist.append(Subdir)
except Exception as e:
print(e)
return NewDirlist
def QuickRegexp(Line,regex, **kwargs):
if "groups" in kwargs :
Groups = kwargs.get("groups")
else :
Groups = False
if Groups:
matches = re.match(regex, Line, re.MULTILINE)
if matches :
tempMATCH = matches.groups()
MATCH = []
for Element in tempMATCH:
if Element:
MATCH.append(Element)
return MATCH
else :
return False
else :
matches = re.finditer(regex, Line, re.MULTILINE)
for matchnum, match in enumerate(matches, start = 0):
MATCH = match.group()
return MATCH
return False
def AlphaNum_Sort(List):
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)',key)]
return sorted(List, key = alphanum_key)
def Repair_HIRIS(repairfolder,damagedVideopath,outputfolder,**kwargs):
logger = logging.getLogger("Repair_HIRIS")
logger.setLevel(logging.INFO)
if "expectedFrames" in kwargs:
expectedFrames = kwargs.get("expectedFrames")
else:
expectedFrames = 500
if "alerts" in kwargs:
alerts = kwargs.get("alerts")
else:
alerts = True
if "output_name" in kwargs:
output_name = kwargs.get("output_name")
else :
output_name = os.path.basename(os.path.dirname(damagedVideopath))
if "extension" in kwargs:
extension = kwargs.get("extension")
else:
extension = ".avi"
if alerts :
print(colored("Using default extension (.avi) as none was specified","blue"))
logger.debug("Using default extension (.avi) as none was specified")
if "fps" in kwargs:
fps = kwargs.get("fps")
else:
fps = 30
if alerts :
print(colored("Using default framerate (30 fps) as none was specified","blue"))
logger.debug("Using default framerate (30 fps) as none was specified")
if "codec" in kwargs:
codec = kwargs.get("codec")
else:
codec = "MJPG"
if alerts :
print(colored("Using default codec (MJPG) as none was specified","blue"))
logger.debug("Using default codec (MJPG) as none was specified")
if "color" in kwargs:
color = kwargs.get("color")
else:
color = False
if alerts :
print(colored("Interpreting data as greyscale images as no color info was specified","blue"))
logger.debug("Interpreting data as greyscale images as no color info was specified")
FullOutputvideo = os.path.join(outputfolder,output_name+extension)
if os.path.exists(FullOutputvideo):
return False, "out video exists"
cfg = configparser.ConfigParser()
cfg.read(damagedVideopath)
try:
width = int(cfg.get('Sequence Settings', 'Width'))
height = int(cfg.get('Sequence Settings', 'Height'))
bpp = int(cfg.get('Sequence Settings', 'BytesPerPixel'))
RepairSubFolder = str(width)+"x"+str(height)+"-"+str(expectedFrames)
TrialName = os.path.basename(damagedVideopath)
sqb_Name = TrialName.replace('.seq', '.sqb')
TrialName = TrialName[0:-4]
sqb_path = os.path.join(repairfolder,RepairSubFolder,sqb_Name)
pathstr = os.path.dirname(damagedVideopath)
except Exception as e:
print(colored("Error : {} on file : {}".format(e,damagedVideopath),"red"))
logger.error("Error : {} on file : {}".format(e,damagedVideopath))
return False, "seq config read"
ListDAMAGED_BINs = BinarySearch(pathstr,".bin")
ListCORRECTER_BINs = BinarySearch(os.path.join(repairfolder,RepairSubFolder),".bin")
try :
ListDAMAGED_BINs = AlphaNum_Sort(ListDAMAGED_BINs)
ListCORRECTER_BINs = AlphaNum_Sort(ListCORRECTER_BINs)
except Exception as e :
print(colored("Error : {} on file : {}".format(e,damagedVideopath),"red"))
logger.error("Error : {} on file : {}".format(e,damagedVideopath))
return False, "sorting failed"
if len(ListDAMAGED_BINs) != len(ListCORRECTER_BINs):
print(colored("Insufficient nb of binaries for file : {}".format(damagedVideopath),"red"))
logger.error("Insufficient nb of binaries for file : {}".format(damagedVideopath))
return False, "insufficient binary files"
try:
size = width , height
fourcc = VideoWriter_fourcc(*codec)
vid = VideoWriter(FullOutputvideo, fourcc, fps, size, color)
except Exception as e:
print(colored("Error : {} on file : {}".format(e,FullOutputvideo),"red"))
logger.error("Error : {} on file : {}".format(e,FullOutputvideo))
return False, "videowirte open fail"
print("Repairing Sequence : {}".format(damagedVideopath))
print("Video format : {} x {}".format(height,width))
print(colored("Writing to {}".format(FullOutputvideo),"green"))
bar = pyprind.ProgBar(int(expectedFrames),bar_char='░')
with open(sqb_path,'rb') as f :
try :
for i in range(0, int(expectedFrames)):
offset = struct.unpack('l', f.read(4))
#This variables are unused but file has to be read in a specific order to acess the valuable data
# padding = f.read(4)
# timestamp = struct.unpack('d', f.read(8))
f.read(4)
struct.unpack('d', f.read(8))
#End of unused variables
binfile = struct.unpack('i', f.read(4))
#This variables are unused but file has to be read in a specific order to acess the valuable data
# padding = f.read(4)
f.read(4)
#End of unused variables
bin_number = "_%0.5d.bin" % (binfile[0])
Index = ListCORRECTER_BINs.index(TrialName+bin_number)
# print(offset)
bin_path = os.path.join(pathstr,ListDAMAGED_BINs[Index])
# tiff_file_path = "%s_%0.5d.tif" %(tiff_path, i)
f_bin = open(bin_path, 'rb')
f_bin.seek(offset[0], os.SEEK_SET)
bytes = f_bin.read(height*width*bpp)
if bpp == 2:
buffer = np.frombuffer(bytes, dtype=np.uint16)
else:
buffer = np.frombuffer(bytes, dtype=np.uint8)
nparr2 = buffer.reshape(height, width)
# cv2.imwrite(tiff_file_path, nparr2)
f_bin.close()
# imgplot = plt.imshow(nparr2,cmap='gray_r')
# plt.show(imgplot)
# print(np.shape(nparr2))
# input()
# VideoArray[:,:,i] = nparr2
vid.write(np.uint8(nparr2))
bar.update()
# for ImageIndex in range(np.size(VideoArray,2)):
# print(ImageIndex)
except Exception as e:
vid.release()
print(colored("Error : {} on file : {}".format(e,damagedVideopath),"red"))
logger.error("Error : {} on file : {}".format(e,damagedVideopath))
return False, "binary file I/O"
vid.release()
del bar
del cfg
# del VideoArray
gc.collect()
print()
print("Video compression {} sucessfull".format(damagedVideopath))
logger.info("Video compression {} sucessfull".format(damagedVideopath))
return True, "none"
def Compress_Tiffvideo(TiffFiles,OutputVideo, ** kwargs):
logger = logging.getLogger("Compress_Tiffvideo")
logger.setLevel(logging.INFO)
TiffFiles = AlphaNum_Sort(TiffFiles)
print("Treating video : {} at {}".format(os.path.basename(OutputVideo),os.path.dirname(OutputVideo)))
if "alerts" in kwargs:
alerts = kwargs.get("alerts")
else:
alerts = True
if "fps" in kwargs:
fps = kwargs.get("fps")
else:
fps = 30
if alerts :
print(colored("Using default framerate (30 fps) as none was specified","blue"))
logger.debug("Using default framerate (30 fps) as none was specified")
if "codec" in kwargs:
codec = kwargs.get("codec")
else:
codec = "MJPG"
if alerts :
print(colored("Using default codec (MJPG) as none was specified","blue"))
logger.debug("Using default codec (MJPG) as none was specified")
if "color" in kwargs:
color = kwargs.get("color")
else:
color = False
if alerts :
print(colored("Interpreting data as greyscale images as no color info was specified","blue"))
logger.debug("Interpreting data as greyscale images as no color info was specified")
bar = pyprind.ProgBar(len(TiffFiles),bar_char='░')
print("Processing a {} frames video".format(len(TiffFiles)))
Index = 0
bar.update()
for File in TiffFiles:
image = imread(File, 0)
if Index == 0:
Index = 1
SIZE = np.shape(image)
size = SIZE[1] , SIZE[0]
fourcc = VideoWriter_fourcc(*codec)
vid = VideoWriter(OutputVideo, fourcc, fps, size, color)
vid.write(np.uint8(image))
else :
vid.write(np.uint8(image))
bar.update()
try :
del bar
vid.release()
return True
except Exception as e:
print(colored("Error Compress_Tiffvideo 2: {} on file : {}".format(e,OutputVideo),"red"))
logger.error("Error Compress_Tiffvideo 2: {} on file : {}".format(e,OutputVideo))
return False
if __name__ == "__main__":
input_video = r"D:\BehavioralVideos\Whisker_Video\Whisker_Topview\Expect_1\Mouse25\200210_1\2020-02-10T15.03.15\Trial.seq"
output_folder = r"C:\Users\Timothe\Downloads"
Status, LOG = Seq_to_Video(input_video, output_folder, output_name = "videodesouris" , extension = ".avi", codec = "MJPG" )
```
#### File: LibrairieWhisk/PythonWhiskCustom/py2_whisk.py
```python
import os
import numpy as np
import pandas as pd
import pickle
#sys.path.append(os.path.dirname(os.path.abspath("__filename__")))
from python import traj, trace
def Measurements_to_Pickle(InputPath):
mesureinput = InputPath + ".measurements"
data = load_measurements(mesureinput)
outputpath, outputname = os.path.split(InputPath)
output = os.path.join(outputpath,outputname + "#measurements.pickles")
f = file(output, 'wb')
pickle.dump(data, f)
f.close
return output
def load_measurements(measure_file):
"""Load measurements, such as curvature.
The data is taken from traj.MeasurementsTable
I had to guess at what the columns mean, based on example code in
features() in python/summary.py
The ordering of the result does not seem to match the ordering derived
from *.whiskers. I suspect too-small whiskers are being moved after good
whiskers. You must use 'frame' and 'wid' to line up with other data.
For this reason I set the index to be 'frame' and 'wid' in this function.
0 - "smask"? I think this may be a mask that is applied to filter out
whiskers that are too small. This seems to affect the ordering of
the results as well (good whiskers moved before bad).
1 - frame
2 - wid
3 - path_length
4 - median_score
5 - the "root angle", I think the angle of a few samples around follicle
6 - curvature
7, 8 - follicle x and y
9, 10 - tip x and y
measure_file : string
Path to *.measurements file from measure
convert_to_int : if True, then convert 'frame' and 'wid' columns to int
set_index : if True, then set index to ['frame', 'wid']
Don't do this if you want to maintain the ordering of the columns
Returns: DataFrame
Has one row for each whisker segment.
Columns: smask, frame, wid, path_length, score,
angle, curv, fol_x, fol_y, tip_x, tip_y
Then ['frame', 'wid'] are set to be the index (see above)
"""
# Measurements filename cannot be unicode, for some reason
tmt = traj.MeasurementsTable(str(measure_file))
tmt_arr = tmt.asarray()
tmtdf = pd.DataFrame(tmt_arr,
columns=['label', 'frame', 'wid', 'length', 'score',
'angle', 'curvature', 'fol_x', 'fol_y', 'tip_x', 'tip_y'])
# Convert to float32.
tmtdf = tmtdf.astype(np.float32)
# Convert index to int32.
for col in ['frame', 'wid']:
tmtdf[col] = tmtdf[col].astype(np.int32)
# Make index.
tmtdf = tmtdf.set_index(['frame', 'wid'],
verify_integrity=True).sort_index()
return tmtdf
def CreateWhiskPickles(path,**kwargs):
whiskersinput = path + ".whiskers"
wv = trace.Load_Whiskers(whiskersinput)
indxx = ["time","id","x","y","thick","scores"]
FrameDict = {}
cnt = 0
wvkeys = wv.keys()
for i in wvkeys :
subkeys = wv.get(i).keys()
for j in subkeys:
temp = wv.get(i).get(j)
temp = [temp.time,temp.id,temp.x,temp.y,temp.thick,temp.scores]
tempframe = pd.Series( temp , index = indxx)
FrameDict.update( [ ( cnt , tempframe ) ] )
cnt = cnt + 1
outfolder = kwargs.get( "outfolder" ,os.path.dirname(path))
name = os.path.basename(path).rstrip(".whiskers") + "#whiskers.pickles"
outpath = os.path.join(outfolder,name)
result = create_multi(pd.DataFrame(FrameDict).T)
result.to_pickle(outpath)
#print("written pickle at " + outpath)
return outpath
def create_multi(df):
multi= df.set_index(['time', 'id'], inplace=False)
return multi
if __name__ == "__main__":
path = r'\\172.16.31.10\EqShulz\Timothe\BehavioralVideos\Whisker_Video\Whisker_Topview\Expect_1\Mouse25\200303_VSD2\Mouse25_2020-03-03T11.53.01.whiskers'
CreateWhiskPickles(path,outfolder = r"C:\Users\Timothe\Desktop\Testzone" )
```
#### File: LibrairieWhisk/PythonWhiskCustom/WhiskReadings.py
```python
import os
import sys
import numpy as np
import pandas as pd
import pickle
#sys.path.append(os.path.dirname(os.path.abspath("__filename__")))
from python import traj
def Measurements_to_Pickle(InputPath):
mesureinput = InputPath + ".measurements"
data = load_measurements(mesureinput)
outputpath, outputname = os.path.split(InputPath)
output = os.path.join(outputpath,outputname + ".pck")
f = file(output, 'wb')
pickle.dump(data, f)
f.close
return output
def load_measurements(measure_file):
"""Load measurements, such as curvature.
The data is taken from traj.MeasurementsTable
I had to guess at what the columns mean, based on example code in
features() in python/summary.py
The ordering of the result does not seem to match the ordering derived
from *.whiskers. I suspect too-small whiskers are being moved after good
whiskers. You must use 'frame' and 'wid' to line up with other data.
For this reason I set the index to be 'frame' and 'wid' in this function.
0 - "smask"? I think this may be a mask that is applied to filter out
whiskers that are too small. This seems to affect the ordering of
the results as well (good whiskers moved before bad).
1 - frame
2 - wid
3 - path_length
4 - median_score
5 - the "root angle", I think the angle of a few samples around follicle
6 - curvature
7, 8 - follicle x and y
9, 10 - tip x and y
measure_file : string
Path to *.measurements file from measure
convert_to_int : if True, then convert 'frame' and 'wid' columns to int
set_index : if True, then set index to ['frame', 'wid']
Don't do this if you want to maintain the ordering of the columns
Returns: DataFrame
Has one row for each whisker segment.
Columns: smask, frame, wid, path_length, score,
angle, curv, fol_x, fol_y, tip_x, tip_y
Then ['frame', 'wid'] are set to be the index (see above)
"""
# Measurements filename cannot be unicode, for some reason
tmt = traj.MeasurementsTable(str(measure_file))
tmt_arr = tmt.asarray()
tmtdf = pd.DataFrame(tmt_arr,
columns=['label', 'frame', 'wid', 'length', 'score',
'angle', 'curvature', 'fol_x', 'fol_y', 'tip_x', 'tip_y'])
# Convert to float32.
tmtdf = tmtdf.astype(np.float32)
# Convert index to int32.
for col in ['frame', 'wid']:
tmtdf[col] = tmtdf[col].astype(np.int32)
# Make index.
tmtdf = tmtdf.set_index(['frame', 'wid'],
verify_integrity=True).sort_index()
return tmtdf
if __name__ == "__main__":
pass
```
#### File: custom_libs/LibUtils/archi.py
```python
import os, yaml
class DataEnvironment():
def __init__(self,path):
self.yaml_path = os.path.join(path,"data_environment.yaml")
with open(self.yaml_path, 'r') as stream:
try:
print(yaml.safe_load(stream))
except yaml.YAMLError as exc:
print(exc)
```
#### File: custom_libs/LibUtils/fileio.py
```python
import os, sys
import pickle
import configparser, json
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath("__filename__"))))
#print(os.path.dirname(os.path.dirname(os.path.abspath("__name__"))))
from LibUtils import genpu
def pickleIN(path):
if os.path.isfile(path):
results = []
with open(path,"rb") as f :
while True :
try :
results.append(pickle.load(f))
except EOFError :
break
return results if len(results) > 1 else results[0]
return None
def pickleOUT(data,path,noiter = True):
with open(path,"wb") as f :
if isinstance(data, (list,tuple)) and not noiter:
for item in data :
pickle.dump(item,f)
return None
pickle.dump(data,f)
class ConfigFile(genpu.TwoLayerDict):
def __init__(self, path, **kwargs):
"""
A class to access config files through an object with indexing,
either for geting or seting values.
Seamless and easy integration in code, ability to load or set multiple
variables at once for more readability when using in static environments
(e.g. functions or simple classes)
Parameters
----------
path : str
Path to the config file.
**kwargs : TYPE
DESCRIPTION.
Returns
-------
None.
"""
self.path = path
self.cfg = configparser.ConfigParser()
self.last_mtime = None
self.cursor = None
super(genpu.TwoLayerDict, self).__init__({})
self._read_if_changed()
def sections(self):
return self.cfg.sections()
def params(self,section = None):
return self.cfg.options(section)
def _read_if_changed(self):
if self._filechanged :
self._read()
def __getitem__(self,index):
self._read_if_changed()
return super().__getitem__(index)
def _read(self):
self.cfg.read(self.path)
super().clear()
for sec in self.sections():
super().__setitem__(sec , {param: self._getasvar(sec,param) for param in self.params(sec) } )
def _getasvar(self,section,param):
try :
#print(section,param)
#print(self.cfg.get(section,param))
val = json.loads(self.cfg.get(section,param))
except configparser.NoOptionError:
return None
if isinstance(val,str):
if val[0:1] == "f" :
val = val.replace("''",'"')
if isinstance(val,list):
if len(val) == 2 :
if val[0] == "np.ndarray":
val = np.array(val[1])
return val
@property
def _filechanged(self):
filestatus = os.stat(self.path).st_mtime
if self.last_mtime is None or self.last_mtime != filestatus:
self.last_mtime = filestatus
return True
return False
if __name__ == "__main__":
test = ConfigFile(r"\\192.168.3.11\EqShulz\Timothe\DATA\DataProcessing\Expect_3_mush\CrossAnimals\SpatialScale\scale.txt")
```
#### File: custom_libs/LibUtils/traces.py
```python
import numpy as np
from scipy.signal import savgol_filter, filtfilt, butter, find_peaks, peak_prominences
from scipy.interpolate import interp1d
import logging, warnings
import scipy.ndimage as scpnd
########## 1D signal processing functions
def Polyfit1D( signal , order , **kwargs):
signal = np.array(signal)
if kwargs.get("x",None) is None :
x = np.arange(signal.shape[0])
params = np.polyfit(x,signal ,order)
func = np.poly1d(params)
if kwargs.get("ret_params",False):
return func(x) ,params, func
return func(x)
def Smooth1D(signal_trace, windows_size = None ,polynomial_order = 3):
if windows_size is None :
windows_size = 7
return savgol_filter(signal_trace, windows_size, polynomial_order)
def Interp1D(signal_trace, resample_rate = 2, method = 'quadratic'):
""" Expects a 1D np array as signal_trace
"""
x1 = np.linspace(0, 1, signal_trace.size)
interpolator = interp1d(x1, signal_trace, kind = method , axis=0)
x2 = np.linspace(0, 1, signal_trace.size * resample_rate)
return interpolator(x2)
def Filt1D(x, fcut , order = 3):
x_prime = x.copy()
x_prime = np.array(x_prime)
slices, values = DetectContiguity(x_prime,np.nan)
slicindex = None
for idx , val in enumerate(values) :
if val == 1 :
if slicindex is not None :
raise ValueError("Found two separate signals, cannot proceed")
slicindex = idx
if slicindex is None :
raise ValueError("Found no data, cannot proceed")
data_slice = slices[slicindex]
b, a = butter( order, fcut )
filtered_contiguous_data = filtfilt(b, a, x_prime[data_slice[0]:data_slice[1]] , padlen=3)
x_prime[data_slice[0]:data_slice[1]] = filtered_contiguous_data
return x_prime
def Peak1D(input_signal,**kwargs):
input_signal = np.array(input_signal)
peak_min_height = kwargs.get("height",None)
if peak_min_height is None :
sd_coeff = kwargs.get("sd_height",None)
if sd_coeff is None :
sd_coeff = 2
peak_min_height = input_signal.mean() + ( sd_coeff * input_signal.std())
pk_indx , values = find_peaks( input_signal, height = peak_min_height )
pk_values = values["peak_heights"]
prominence = peak_prominences(input_signal,pk_indx)[0]
min_prominence = kwargs.get("prom",None)
if min_prominence is None :
min_prominence = ( input_signal.max() - input_signal.min() ) / 3
pk_prominence = np.array([ (pk_indx[i],pk_values[i]) for i in range(prominence.shape[0]) if prominence[i] >= min_prominence ]).T
return pk_indx , pk_values , pk_prominence
def DetectContiguity(List,threshold = None):
import math
_List = np.asarray(List.copy())
if threshold is not None :
if np.isnan(threshold) :
for idx , val in enumerate(_List) :
if not np.isnan(val):
_List[idx] = 1
else :
for idx , val in enumerate(_List) :
if not np.isnan(val) and val >= threshold :
_List[idx] = 1
if not np.isnan(val) and val < threshold :
_List[idx] = 0
ranges = [i+1 for i in range(len(_List[1:])) if not ( ( _List[i] == _List[i+1] ) or ( math.isnan(_List[i]) and math.isnan(_List[i+1]) ) ) ]
ranges.append(len(_List))
ranges.insert(0, 0)
slices = []
values = []
for i in range(len(ranges)-1):
slices.append([ranges[i], ranges[i+ 1]])
if _List[ranges[i]] is None :
values.append(None)
else :
values.append(_List[ranges[i]])
return slices, values
def BinarizeList(valulist, threshold, up = True):
import math
valulist = np.asarray(valulist)
outlist = []
for i in range(valulist.shape[0]):
if valulist[i] is None :
outlist.append(None)
else :
if np.isnan(valulist[i]) or math.isnan(valulist[i]) :
outlist.append(np.nan)
else :
if up :
if valulist[i] >= threshold:
outlist.append(1)
else :
outlist.append(0)
else :
if valulist[i] <= threshold:
outlist.append(1)
else :
outlist.append(0)
return outlist
def Derivate(vlist):
out = []
vlist = np.asarray(vlist)
for i in range(vlist.shape[0]-1):
out.append(vlist[i]-vlist[i+1])
return out
########## ND signal processing functions
#TODO : ND savgol smoothing window filter function
#TODO : 2D image interpolation "reparation" based on mask ("snippetize" from ProcessData external)
#from rasterio.fill import fillnodata
def NDfilter_uni(signal,value,dimensions):
"""
Uniform filter on Ndim data
Parameters
----------
signal : TYPE
DESCRIPTION.
value : TYPE
DESCRIPTION.
dimensions : TYPE
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
"""
dim = []
for i in range(len(signal.shape)):
if i < dimensions:
dim.append(value)
else :
dim.append(0)
return scpnd.uniform_filter( signal, dim )
def NDfilter_gauss(signal,sigma_value,dimensions):
"""
Gaussian filter on Ndim data
Parameters
----------
signal : TYPE
DESCRIPTION.
value : TYPE
DESCRIPTION.
dimensions : TYPE
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
"""
dim = []
for i in range(len(signal.shape)):
if i < dimensions:
dim.append(sigma_value)
else :
dim.append(0)
return scpnd.gaussian_filter( signal, dim )
def FFT( signal_trace, sample_freq, part = 'all', interp = None, smooth = None):
""" Expect a one D numpy array, a sample freq ( samples per sec ), real,
imaginary or all part of the DFT, and optional tuples containing interpolation
rate and smooth window in case you wish to perform these operations. Else leave as default (None)
"""
if interp is not None :
signal_trace = Interp1D(signal_trace, *interp)
if len(interp) > 0 :
coeff = interp[0]
else :
coeff = 2
else :
coeff = 1
if smooth is not None :
signal_trace = Smooth1D(signal_trace , *smooth)
if part == 'real' :
p = 'r'
elif part == 'imaginary':
p = 'i'
else :
p = ''
FFTsignal_trace = eval( f"np.fft.{p}fft( signal_trace )" )
if p == 'i':
p = 'r'
FREQ = eval( f"np.fft.{p}fftfreq(signal_trace.size) * sample_freq * coeff" )
return FREQ , FFTsignal_trace
def MapSpline(image,spline,auto_linspace = None, auto_imgtranspose = False):
"""
Returns a slice of values correspunding to the interpolated values of an array when sliced by a curve or line in an arbitrary orientation
Parameters
----------
image : numpy array. REQUIRE TO BE .T transposed to be mapped correctly
DESCRIPTION.
spline : numpy array with shape :
[2,N] with N >= 2
with [0] of dimension 1 = x
and [1] of dimension 1 = y
Returns
-------
zi : TYPE
DESCRIPTION.
"""
import scipy.ndimage
if auto_imgtranspose :
image = image.T
if autolinspace is None and spline.shape[1] == 2 and spline.shape[1] == 2 :
pass
#TODO : transform spline to a linspace based on the terminal coordinates in spline
if spline.shape[1] == 2 and spline.shape[0] > 2:
spline = spline.T
zi = scipy.ndimage.map_coordinates(image, spline)
return zi
def DetectHighPulses(signal_trace,ThresholdHigh,ThresholdLow,samplerate):
if not isinstance(signal_trace,list):
if isinstance(signal_trace,np.ndarray):
signal_trace = signal_trace.tolist()
state = False
Pulses = 0
PulseLength = []
PulsesStarts = []
LastPulseStart = 0
for U in range(len(signal_trace)):
if signal_trace[U] > ThresholdHigh and not state:
Pulses = Pulses + 1
state = True
LastPulseStart = U
PulsesStarts.append(U)
if signal_trace[U] < ThresholdLow and state:
PulseLength.append(U-LastPulseStart)
state = False
logging.info(f"Test : pulses :{Pulses} mean pulseduration :{np.mean(np.asarray(PulseLength)) / (samplerate/1000)}" )
return {'count' : Pulses, 'indexes' : np.asarray(PulsesStarts) , 'durations' : np.asarray(PulseLength) / (samplerate/1000) , 'mean_duration' : np.mean(np.asarray(PulseLength)) / (samplerate/1000) }
###### Pending deprecation functions
def smooth(x,window_len=11,window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal_trace.
The signal_trace is prepared by introducing reflected copies of the signal_trace
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal_trace.
input:
x: the input signal_trace
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal_trace
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal_trace.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
"""
warnings.warn("deprecated use smooth1D instead (more reliable and well built)")
x = np.asarray(x)
if x.ndim != 1:
raise Exception("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise Exception("Input vector needs to be bigger than window size.")
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise Exception("Window is one of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]
#print(len(s))
if window == 'flat': #moving average
w=np.ones(window_len,'d')
else:
w=eval('np.'+window+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='valid')
return y
def DetectContingence(List,threshold = None):
warnings.warn("Deprecated, (bad naming) use DetectContiguity instead. Same syntax and same use case")
return DetectContiguity(List, threshold)
if __name__ == "__main__":
nonan_accel = filter_accel[ ~np.isnan(filter_accel) ]
freq, fft = signal_trace.FFT( nonan_accel , 500, 'real' , (2,), (7,))
plt.stem(freq, abs(fft) )
plt.xlim((-5,400))
plt.show()
``` |
{
"source": "jostv99/Bicikli",
"score": 3
} |
#### File: jostv99/Bicikli/preberi_podatke_spravi_csv.py
```python
import requests
import re
import os
import csv
import time
###############################################################################
# Najprej definirajmo nekaj pomožnih orodij za pridobivanje podatkov s spleta.
###############################################################################
# definirajte URL glavne strani bolhe za oglase z mačkami
knjige_url1 = "https://www.bolha.com/avto-oglasi?page="
# mapa, v katero bomo shranili podatke
knjige_dir = "fantazijski_romani"
# ime datoteke v katero bomo shranili glavno stran
frontpage_filename = "knjige"
# ime CSV datoteke v katero bomo shranili podatke
csv_filename = "knjige"
def download_url_to_string(url):
"""Funkcija kot argument sprejme niz in poskusi vrniti vsebino te spletne
strani kot niz. V primeru, da med izvajanje pride do napake vrne None.
"""
try:
# del kode, ki morda sproži napako
r = requests.get(url)
except requests.exceptions.ConnectionError:
# koda, ki se izvede pri napaki
# dovolj je če izpišemo opozorilo in prekinemo izvajanje funkcije
print("Napaka pri povezovanju do:", url)
return None
# nadaljujemo s kodo če ni prišlo do napake
if r.status_code == requests.codes.ok:
return r.text
else:
print("Napaka pri prenosu strani:", url)
return None
def save_string_to_file(text, directory, filename):
"""Funkcija zapiše vrednost parametra "text" v novo ustvarjeno datoteko
locirano v "directory"/"filename", ali povozi obstoječo. V primeru, da je
niz "directory" prazen datoteko ustvari v trenutni mapi.
"""
os.makedirs(directory, exist_ok=True)
path = os.path.join(directory, filename)
with open(path, 'w', encoding='utf-8') as file_out:
file_out.write(text)
return None
# Definirajte funkcijo, ki prenese glavno stran in jo shrani v datoteko.
l = 115
def save_frontpage(directory, d): # zadnji indeks 4596, i += 12
"""Funkcija vrne celotno vsebino datoteke "directory"/"filename" kot niz"""
i = 1
while i < d:
knjige_url = knjige_url1 + str(i)
print(knjige_url)
text = download_url_to_string(knjige_url)
save_string_to_file(text, directory, f"knjige{str(i)}.html")
i += 1
return None
###############################################################################
# Po pridobitvi podatkov jih želimo obdelati.
###############################################################################
def read_links_to_adds(dir, filename):
dat = read_file_to_string(dir, filename)
rx = re.compile(r"<a.*href=(\"/avto-oglasi.*)>.*</a></h3>")
ads = re.findall(rx, dat)
return ads
def open_add_link_return_info(url):
url = "https://www.bolha.com" + url[1:-1]
fl = download_url_to_string(url)
#print(fl)
rx = re.compile(r"<th scope=\"row\">(?P<stvar>.*)</th>\n.*<td>(?P<stvar_info>.*)<abbr|<th scope=\"row\">(?P<stvar1>.*):</th>\n.*<td>(?P<stvar_info1>.*)<|priceInEuros\":\"(?P<Cena>\d*.?\d*)&.*?;")
info = re.findall(rx, str(fl))
a = make_dict_from_list(info)
return a
def make_dict_from_list(l):
ret = {}
for t in l:
key = None
val = None
for item in t:
if item == "":
continue
else:
if key == None:
key = item
val = item
ret[key] = val
return ret
def read_file_to_string(directory, filename):
"""Funkcija vrne celotno vsebino datoteke "directory"/"filename" kot niz"""
path = os.path.join(directory, filename)
with open(path, 'r', encoding='utf-8') as file_in:
return file_in.read()
def make_big_csv_from_small_csv(dir, fn):
keys = []
os.makedirs(dir, exist_ok=True)
path = os.path.join(dir, fn)
with open(path, "w", encoding="utf-8") as csv_file:
for file in os.listdir(dir):
if file.endswith(".csv") and file != "koncni.csv":
with open(dir + "\\" + file, "r", encoding="utf-8") as file:
dic = read_csv_return_dict(file)
for key, val in dic.items():
if key not in keys:
keys.append(key) #ja vem ful neucinkovito loh bi naredu to ze prej ampak ohwell
writer = csv.DictWriter(csv_file, keys)
writer.writeheader()
for file in os.listdir(dir):
if file.endswith(".csv"):
with open(dir + "\\" + file, "r", encoding="utf-8") as file:
dic = read_csv_return_dict(file)
writer.writerow(dic)
print("krneki")
def read_csv_return_dict(myfile):
f = True
dic = {}
for row in myfile:
if row == "\n":
continue
row = row.replace("\n", "")
row = row.split(",")
if f:
dic["Cena"] = row[1].replace(".", "")
f = False
else:
dic[row[0]] = row[1]
return dic
###############################################################################
# Obdelane podatke želimo sedaj shraniti.
###############################################################################
def make_csv(dict, dir, filename):
os.makedirs(dir, exist_ok=True)
path = os.path.join(dir, filename)
with open(path, "w", encoding="utf-8") as csv_file:
writer = csv.writer(csv_file)
for key, val in dict.items():
writer.writerow([key, val])
return None
def main(redownload=True, reparse=True):
"""Funkcija izvede celoten del pridobivanja podatkov:
1. Oglase prenese iz bolhe
2. Lokalno html datoteko pretvori v lepšo predstavitev podatkov
3. Podatke shrani v csv datoteko
"""
# Najprej v lokalno datoteko shranimo glavno stran
i = 0
#save_frontpage(knjige_dir, l)
print("konec lonec")
# for i in range(1, l + 1):
# links = read_links_to_adds(knjige_dir, f"knjige{i}.html")
# print(i)
# for j, link in enumerate(links):
# info = open_add_link_return_info(link)
# make_csv(info, knjige_dir, f"retc{i}{j}.csv")
make_big_csv_from_small_csv(knjige_dir, "koncni.csv")
# Dodatno: S pomočjo parametrov funkcije main omogoči nadzor, ali se
# celotna spletna stran ob vsakem zagon prenese (četudi že obstaja)
# in enako za pretvorbo
if __name__ == '__main__':
main()
``` |
{
"source": "Jostyck9/remote-robot-surveillance",
"score": 2
} |
#### File: remote-robot-surveillance/robot/video_stream.py
```python
from flask import Response
from flask import Flask
from flask import render_template
import threading
import time
import cv2
import jetson.inference
import jetson.utils
VIDEO_SOURCE = "http://192.168.0.48:4747/video"
APP_PORT = 5000
outputFrame = None
frame = None
lock = threading.Lock()
vid_lock = threading.Lock()
app = Flask(__name__)
vs = cv2.VideoCapture(VIDEO_SOURCE)
net = jetson.inference.detectNet("ssd-mobilenet-v2", threshold=0.8)
time.sleep(2.0)
@app.route("/")
def index():
return render_template("index.html")
def get_continuous_video():
global vs, frame, vid_lock
while True:
_, fr = vs.read()
if _:
fr = cv2.rotate(fr, cv2.ROTATE_90_COUNTERCLOCKWISE)
with vid_lock:
frame = fr.copy()
def detect_motion():
global outputFrame, lock, net, frame, vid_lock
while True:
with vid_lock:
if frame is None:
continue
tmp_frame = frame.copy()
colored = cv2.cvtColor(tmp_frame, cv2.COLOR_BGR2RGB)
img = jetson.utils.cudaFromNumpy(colored)
height, width = colored.shape[:2]
detections = net.Detect(img, width, height)
for detection in detections:
class_name = net.GetClassDesc(detection.ClassID)
tmp_frame = cv2.rectangle(tmp_frame,
(int(detection.Left), int(detection.Top)),
(int(detection.Right), int(detection.Bottom)),
(255,0,0), 2
)
cv2.putText(tmp_frame, class_name,
(int(detection.Left), int(detection.Top)),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2)
with lock:
outputFrame = tmp_frame.copy()
def generate():
# grab global references to the output frame and lock variables
global outputFrame, lock
# loop over frames from the output stream
while True:
# wait until the lock is acquired
with lock:
# check if the output frame is available, otherwise skip
# the iteration of the loop
if outputFrame is None:
continue
# encode the frame in JPEG format
(flag, encodedImage) = cv2.imencode(".jpg", outputFrame)
# ensure the frame was successfully encoded
if not flag:
continue
# yield the output frame in the byte format
yield(b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + bytearray(encodedImage) + b'\r\n')
@app.route("/video_feed")
def video_feed():
# return the response generated along with the specific media
# type (mime type)
return Response(generate(), mimetype = "multipart/x-mixed-replace; boundary=frame")
if __name__ == '__main__':
t = threading.Thread(target=get_continuous_video, daemon=True)
t.start()
t = threading.Thread(target=detect_motion, daemon=True)
t.start()
app.run(host='0.0.0.0', port=APP_PORT, debug=True, threaded=True, use_reloader=False)
vs.release()
``` |
{
"source": "josua1111/K-mean-clustering",
"score": 3
} |
#### File: josua1111/K-mean-clustering/K-mean-clustering.py
```python
import pickle
import numpy as np
def eucl_dist(a, b, axis=1):
return np.linalg.norm(a - b, axis=axis)
def k_mean(x, k):
#initalizing cluster variable
cluster = np.zeros(x.shape[0])
# calculation min and max for every dimension of data
minv = np.min(x,axis=0)
maxv = np.max(x,axis=0)
# for k in range(2,11):
error = 0
# initalizing centroids of k clusters
center = np.zeros((k, x.shape[1]))
for i in range(k):
for j in x.shape[1]:
center[i,j] = np.random.randint(minv, maxv)
# assigining zeros to old centroids value
center_old = np.zeros(center.shape)
# initial error
err = eucl_dist(center, center_old, None)
while err != 0:
# calculatin distance of data points from centroids and assiging min distance cluster centroid as data point cluster
for i in range(len(x)):
distances = eucl_dist(x[i], center)
clust = np.argmin(distances)
cluster[i] = clust
# changing old centroids value
center_old = np.copy(center)
# Finding the new centroids by taking the average value
for i in range(k):
points = [x[j] for j in range(len(x)) if cluster[j] == i]
if points:
center[i] = np.mean(points, axis=0)
# calculation difference between new centroid and old centroid values
err = eucl_dist(center, center_old, None)
# calculation total difference between cluster centroids and cluster data points
for i in range(k):
d = [eucl_dist(x[j],center[i],None) for j in range(len(x)) if cluster[j] == i]
error += np.sum(d)
# counting data points in all clusters
count = {key: 0.0 for key in range(k)}
for i in range(len(x)):
count[cluster[i]] += 1
# displaying cluster number, average distance between centroids and data points and cluster count
print k, error/len(x), count
return cluster
if __name__ == '__main__':
# loading dataset of form [[data1],[data2], ....]
inp = pickle.load(open('test.pickle', 'rb'))
x = np.array([i[0] for i in inp])
# return cluster number for every data
cluster = k_mean(x)
``` |
{
"source": "JosuaCarl/Script_Assisted_Modeling",
"score": 3
} |
#### File: JosuaCarl/Script_Assisted_Modeling/annotate_genes.py
```python
import sys
import os
import cobra
from tqdm import tqdm
import memote
import helper_functions as hf
from bioservices.kegg import KEGG
import gffpandas.gffpandas as gffpd
'''
Usage: annotate_genes.py <path_input_sbml-file> <path_output_sbml-file> <path-GFF File> <path_memote-report> <name_organism>
Adds annotations from KEGG and SBO Terms to genes.
'''
def main(args):
# console access
if len(args) != 5:
print(main.__doc__)
sys.exit(1)
infile = args[1]
outfile = args[2]
gff_file = args[3]
memote_report = args[4]
organism_name = args[5]
if not os.path.exists(infile):
print("[Error] %s : No such file." % infile)
sys.exit(1)
# Read in files
model = cobra.io.sbml.read_sbml_model(infile)
df = gffpd.read_gff3(gff_file)
df_attr = df.attributes_to_columns()
kegg = KEGG()
# find organism
req = kegg.lookfor_organism(organism_name)[0].split(' ')
entry = req[0] # 'T00661'
org_id = req[1] # 'fma'
sbo_nr = "SBO:0000243"
# Kegg genes extraction
genes = kegg.list(org_id).split("\n")[:-1]
genome_dict = {g.split("\t")[0].replace("fma:", ""): g.split("\t")[1] for g in genes}
# -- Cobra model annotation
for i in tqdm(range(len(model.genes))):
annotations = {"sbo": sbo_nr}
id_sbml = model.genes[i].id
refseq = id_sbml[:-2] + "." + id_sbml[-1]
gff_query = df_attr["Name"] == refseq
if gff_query.any():
matches = df_attr[gff_query]
for j, row in matches.iterrows():
if df_attr.loc[j - 1, "type"] == "gene" and df_attr.loc[j, "type"] == "CDS":
locus_tag = df_attr.loc[j - 1, "old_locus_tag"]
new_locus_tag = df_attr.loc[j - 1, "locus_tag"]
note = df_attr.loc[j, "Note"]
name = df_attr.loc[j, "Name"]
annotations = hf.dict_add_overlap_to_list(annotations, {"kegg.genes": f"{org_id}:{locus_tag}",
"refseq": refseq})
model.genes[i].annotation = \
hf.dict_add_overlap_to_list(model.genes[i].annotation, annotations)
if note is None:
model.genes[i].notes.update({"locus tag:": new_locus_tag})
else:
model.genes[i].notes.update({"locus tag:": new_locus_tag, "NCBI note:": note})
model.genes[i].name = name
# Export model
cobra.io.sbml.write_sbml_model(model, outfile)
# Make memote report
result = memote.test_model(model, results=True, skip=["test_find_metabolites_not_produced_with_open_bounds"])
report = memote.snapshot_report(result[1], config=None, html=True)
with open(memote_report, "w") as handle:
handle.write(report)
if __name__ == '__main__':
main(sys.argv)
```
#### File: JosuaCarl/Script_Assisted_Modeling/balance_from_csv.py
```python
import sys
import os
import libsbml
from tqdm import tqdm
import pandas as pd
import helper_functions as hf
'''
Usage: amend_formulas.py <path_input_sbml-file> <path_output_sbml-file> <path_infile-csv_balancing_changes>
Used, to balance a model through a manually curated list in csv format.
The csv table must be structured as follows: id, change_type, old, new, foundation, db_id, notes, eco
The entry of an eco_term or additional notes is optional.
The id must correspond to a metabolite or reaction id in the model.
The change_type can be one of: [charge, formula] for metabolites and [product, reactant] for reactions
The old field will be transferred into notes as a change note.
The new field is the new value, which will be implemented into the model. It must correspond to a formula/charge for
metabolites and have the format <number> <compound> (e.g. 1 h_c, or 200 fe_rd_e) for reaction changes.
As a foundation, a database can be given with a corresponding id in db_id. An entry is not optional.
'''
def main(args):
# console access
if len(args) < 6:
print(main.__doc__)
sys.exit(1)
infile = args[1]
outfile = args[2]
infile_csv = args[3]
if not os.path.exists(infile):
print("[Error] %s : No such file." % infile)
sys.exit(1)
# create Readers and Writers
reader = libsbml.SBMLReader()
writer = libsbml.SBMLWriter()
# Read SBML File
doc = reader.readSBML(infile)
model = doc.getModel()
# parse through table
table = pd.read_csv(infile_csv)
for i in tqdm(range(1, len(table["id"]))):
meta_id = table["id"][i]
try:
if table["id"][i].startswith("R_"):
if table["foundation"][i] == "SEED":
lnk = f"https://identifiers.org/seed.reaction:{table['db_id'][i]}"
model = hf.add_link_annotation_reaction(model, lnk, libsbml.BQB_IS, meta_id)
elif table["foundation"][i] == "BiGG":
lnk = f"https://identifiers.org/bigg.reaction:{table['db_id'][i]}"
model = hf.add_link_annotation_reaction(model, lnk, libsbml.BQB_IS, meta_id)
if table["change_type"][i] == "product":
new_comp = table["new"][i].split(" ")
comp_nr = float(new_comp[0])
comp = new_comp[1]
if comp_nr == 0:
model.getReaction(meta_id).removeProduct("M_" + comp)
else:
species = model.getSpecies("M_" + comp)
model.getReaction(meta_id).removeProduct("M_" + comp)
model.getReaction(meta_id).addProduct(species, comp_nr)
note_str = f"Changed product from {table['old'][i]} to {table['new'][i]}. Source: {table['foundation'][i]}"
model = hf.add_note_reaction(model, note_str, meta_id)
if table["change_type"][i] == "reactant":
new_comp = table["new"][i].split(" ")
comp_nr = float(new_comp[0])
comp = new_comp[1]
if comp_nr == 0:
model.getReaction(meta_id).removeReactant("M_" + comp)
else:
species = model.getSpecies("M_" + comp)
model.getReaction(meta_id).removeReactant("M_" + comp)
model.getReaction(meta_id).addReactant(species, comp_nr)
note_str = f"Changed reactant from {table['old'][i]} to {table['new'][i]}. Source: {table['foundation'][i]}"
model = hf.add_note_reaction(model, note_str, meta_id)
if type(table["notes"][i]) == str and table["notes"][i] != "":
model = hf.add_note_reaction(model, table["notes"][i], meta_id)
if type(table["eco"][i]) == str and table["eco"][i] != "":
link = f"https://identifiers.org/eco/{table['eco'][i]}"
model = hf.add_link_annotation_species(model, link, libsbml.BQB_IS, meta_id)
else:
meta_id = "M_" + table["id"][i]
if table["change_type"][i] == "charge":
note_str = f"Changed charge from {table['old'][i]} to {table['new'][i]}. Source: {table['foundation'][i]}"
model = hf.add_note_species(model, note_str, meta_id)
model.getSpecies(meta_id).getPlugin('fbc').setCharge(int(table["new"][i]))
elif table["change_type"][i] == "formula":
note_str = f"Changed formula from {table['old'][i]} to {table['new'][i]}. Source: {table['foundation'][i]}"
model = hf.add_note_species(model, note_str, meta_id)
model.getSpecies(meta_id).getPlugin('fbc').setChemicalFormula(table["new"][i])
if table["foundation"][i] == "SEED":
link = f"https://identifiers.org/seed.compound:{table['db_id'][i]}"
model = hf.add_link_annotation_species(model, link, libsbml.BQB_IS, meta_id)
elif table["foundation"][i] == "BiGG":
link = f"https://identifiers.org/bigg.metabolite:{table['db_id'][i]}"
model = hf.add_link_annotation_species(model, link, libsbml.BQB_IS, meta_id)
elif table["foundation"][i] == "MetaCyc":
link = f"https://identifiers.org/metacyc.compound:{table['db_id'][i]}"
model = hf.add_link_annotation_species(model, link, libsbml.BQB_IS, meta_id)
elif table["foundation"][i] == "MetaNetX":
link = f"https://identifiers.org/metanetx.chemical:{table['db_id'][i]}"
model = hf.add_link_annotation_species(model, link, libsbml.BQB_IS, meta_id)
elif table["foundation"][i] == "KEGG":
link = f"https://identifiers.org/kegg.compound:{table['db_id'][i]}"
model = hf.add_link_annotation_species(model, link, libsbml.BQB_IS, meta_id)
if type(table["notes"][i]) == str and table["notes"][i] != "":
model = hf.add_note_species(model, table["notes"][i], meta_id)
if type(table["eco"][i]) == str and table["eco"][i] != "":
link = f"https://identifiers.org/eco/{table['eco'][i]}"
model = hf.add_link_annotation_species(model, link, libsbml.BQB_IS_DESCRIBED_BY, meta_id)
except AttributeError as ae:
print(meta_id)
raise ae
# Saving new model
doc.setModel(model)
writer.writeSBML(doc, outfile)
if __name__ == '__main__':
main(sys.argv)
```
#### File: JosuaCarl/Script_Assisted_Modeling/check+annotate_metabolites.py
```python
import sys
import os
import libsbml
from tqdm import tqdm
import pandas as pd
from itertools import product
from bioservices.kegg import KEGG
from requests.exceptions import HTTPError, RequestException
import helper_functions as hf
'''
Usage: check+annotate_metabolites.py <path_input_sbml-file> <outfile-csv> <program_name> <program_version>
<tolerate_charge_hydrogen_balancing> : -chBal, if +1 charge should correspond to +1 H-atom
Takes formulas from the notes field and fbc-plugin, if none are found, BiGG-DB is searched for a formula.
If multiple or no possibilities are given in BiGG, a csv-formatted table with these metabolites is returned.
Only searches info, but does not change the model.
'''
def main(args):
# console access
if len(args) < 3:
print(main.__doc__)
sys.exit(1)
infile = args[1]
outfile_mismatches = args[2]
outfile_formula_search = args[3]
tolerate_ch_h_bal = "-chBal" in args
if not os.path.exists(infile):
print("[Error] %s : No such file." % infile)
sys.exit(1)
# create Readers and Writers
reader = libsbml.SBMLReader()
# Read SBML File
doc = reader.readSBML(infile)
model = doc.getModel()
# Knowledge base preparation
# bigg_db = pd.read_csv("Databases/BiGG/bigg_models_metabolites.tsv", sep='\t')
mnx_db = pd.read_csv("Databases/MetaNetX/chem_prop.tsv", header=351, sep='\t')
mnx_db.rename(columns={'#ID': 'id'}, inplace=True)
mnx_db.fillna("", inplace=True)
seed_db = pd.read_csv("Databases/SEED/compounds.tsv", header=0, sep="\t")
seed_db.fillna("", inplace=True)
kegg = KEGG()
req = kegg.lookfor_organism('finegoldia magna')[0].split(' ')
entry = req[0] # "T00661"
org_code = req[1] # 'fma'
# -------- formula check against knowledge bases ---------
start = 0
if os.path.exists(outfile_mismatches):
mismatches_old = pd.read_csv(outfile_mismatches, sep="\t", index_col=0)
start = max(start, int(mismatches_old.tail(1)["model_index"][1]) + 1)
mismatches_old = None
if os.path.exists(outfile_formula_search):
formula_searches_old = pd.read_csv(outfile_formula_search, sep="\t", index_col=0)
start = max(start, int(formula_searches_old.tail(1)["model_index"][1]) + 1)
mismatches_old = None
mismatches = pd.DataFrame(columns=["model_index", "name", "spec_id", "ids_biocyc", "ids_metanetx", "ids_seed",
"formula_bigg", "formula_biocyc", "formula_metanetx", "formula_seed",
"formula_model",
"charge_bigg", "charge_biocyc", "charge_metanetx", "charge_seed",
"charge_model", "matching_db"])
formula_search = pd.DataFrame(columns=["model_index", "name", "spec_id", "formula_model",
"ids_biocyc", "ids_mnx", "ids_seed", "ids_kegg"])
form_comp = [False, False, False, False, False]
num_spec = model.getNumSpecies()
for i in tqdm(range(start, num_spec)):
# --------- Knowledge collection ---------
spec_id = str(model.getSpecies(i).getId())
try:
# Check for formula in model
formula_model = []
charge_model = []
name_model = ""
if model.getSpecies(i).getPlugin('fbc').isSetChemicalFormula():
formula_model = str(model.getSpecies(i).getPlugin('fbc').getChemicalFormula())
if model.getSpecies(i).getPlugin('fbc').isSetCharge():
charge_model = model.getSpecies(i).getPlugin('fbc').getCharge()
name_model = model.getSpecies(i).getName()
# BiGG formulas - commented out: extraction from tsv (contains no formula and charge)
# bigg_query = bigg_db.loc[bigg_db['bigg_id'] == pruned_id]
bigg_query = hf.bigg_request(spec_id[2:-2], "metabolites")
formulas_bigg = bigg_query["formulae"]
charges_bigg = bigg_query["charges"]
try:
inchikey = bigg_query["database_links"]["InChi Key"]["id"]
except KeyError:
inchikey = False
# Biocyc
formulas_biocyc = []
charges_biocyc = []
ids_biocyc = []
biocyc_req = hf.biocyc_request("GCF_000010185", "BIGG", spec_id[2:-2])
if not biocyc_req[0]["STATUS"] == 1:
biocyc_req = hf.biocyc_get_from_formula("GCF_000010185", formula_model)
form_comp[1] = True
if biocyc_req[0]["STATUS"] == 1:
for res in biocyc_req[0]["RESULTS"]:
ids_biocyc.append(res["ID"])
try:
biocyc_tree = hf.biocyc_get(id_org="meta", id_biocyc=res["ID"], detail="low")
charges_biocyc.append(
int(biocyc_tree["ptools-xml"]["Compound"]["cml"]["molecule"]["@formalCharge"]))
formulas_biocyc_str = biocyc_tree["ptools-xml"]["Compound"]["cml"]["molecule"]["formula"][
"@concise"]
formulas_biocyc.append(formulas_biocyc_str.replace(" ", ""))
except KeyError:
print(spec_id + ": no simple compound.")
except HTTPError or RequestException:
print(spec_id + " failed in biocyc request.")
# MetaNetX
charges_mnx = []
formulas_mnx = []
ids_mnx = []
if inchikey:
mnx_query = mnx_db.loc[mnx_db['InChiKey'] == inchikey]
elif name_model != "":
mnx_query = mnx_db.loc[mnx_db['name'] == name_model]
elif formula_model != "":
mnx_query = mnx_db.loc[mnx_db['formula'] == formula_model]
form_comp[2] = True
else:
mnx_query = pd.DataFrame({'formula': [], 'charge': []})
for idx, row in mnx_query.iterrows():
ids_mnx.append(row["id"])
if row["formula"] != "" and row["charge"] != "":
formulas_mnx.append(row['formula'])
charges_mnx.append(row['charge'])
# SEED
formulas_seed = []
charges_seed = []
ids_seed = []
search = seed_db['aliases'].str.contains("BiGG: " + spec_id[2:-2])
if search.any():
seed_query = seed_db.loc[search]
elif inchikey:
seed_query = seed_db.loc[seed_db['inchikey'] == inchikey]
elif name_model != "":
seed_query = seed_db.loc[seed_db['name'] == name_model]
elif formula_model != "":
seed_query = seed_db.loc[seed_db['formula'] == formula_model]
form_comp[3] = True
else:
seed_query = pd.DataFrame({'formula': [], 'charge': []})
for idx, row in seed_query.iterrows():
ids_seed.append(row['id'])
formulas_seed.append(row['formula'])
charges_seed.append(int(row['charge']))
# KEGG
ids_kegg = []
if formula_model != "":
kegg_query = kegg.find("compound", formula_model, "formula").split("\n")
form_comp[4] = True
else:
kegg_query = pd.DataFrame({'formula': [], 'charge': []})
for kq in kegg_query:
ids_kegg.append(kq.split("\t")[0])
except Exception as e:
print(spec_id)
raise e
# --------- Knowledge vs. current entry - comparison ---------
matching_dbs = []
formula_matching_ids = []
if not model.getSpecies(i).getPlugin('fbc').isSetChemicalFormula():
mismatches.loc[len(mismatches.index)] = [i, model.getSpecies(i).getName(),
spec_id, ids_biocyc, ids_mnx, ids_seed,
formulas_bigg, formulas_biocyc, formulas_mnx, formulas_seed,
formula_model,
charges_bigg, charges_biocyc, charges_mnx, charges_seed,
charge_model,
matching_dbs]
continue
formulas_all = [formulas_bigg, formulas_biocyc, formulas_mnx, formulas_seed]
charges_all = [charges_bigg, charges_biocyc, charges_mnx, charges_seed]
ids_all = [spec_id[2:-2], ids_biocyc, ids_mnx, ids_seed]
comparisons_bool = []
for j in range(len(formulas_all)):
if form_comp[j]:
formula_matching_ids.append(ids_all[j])
else:
if tolerate_ch_h_bal:
for_cha_product = list(product(formulas_all[j], charges_all[j]))
else:
for_cha_product = formulas_all[j]
for fcp in for_cha_product:
if tolerate_ch_h_bal and charges_all[j] and charge_model:
comparison = hf.compare_formulas([fcp[0], formula_model], [int(fcp[1]), charge_model])
comparisons_bool.append(comparison)
else:
comparison = hf.compare_formulas([fcp[0], formula_model])
comparisons_bool.append(comparison)
if comparison:
matching_dbs.append(j)
# --------- Collection in table ---------
if True not in comparisons_bool:
mismatches.loc[len(mismatches.index)] = [i, model.getSpecies(i).getName(),
spec_id, ids_biocyc, ids_mnx, ids_seed,
formulas_all[0], formulas_all[1], formulas_all[2], formulas_all[3],
formula_model,
charges_all[0], charges_all[1], charges_all[2], charges_all[3],
charge_model,
matching_dbs]
formula_search.loc[len(formula_search.index)] = [i, model.getSpecies(i).getName(), spec_id, formula_model,
ids_biocyc, ids_mnx, ids_seed, ids_kegg]
# in between saves
if i % 50 == 25:
if os.path.exists(outfile_mismatches):
mismatches_old = pd.read_csv(outfile_mismatches, sep="\t", index_col=0)
mismatches = pd.concat([mismatches_old, mismatches])
mismatches.reset_index(drop=True, inplace=True)
mismatches_old = None
mismatches.to_csv(outfile_mismatches, sep="\t")
mismatches = pd.DataFrame(
columns=["model_index", "name", "spec_id", "ids_biocyc", "ids_metanetx", "ids_seed",
"formula_bigg", "formula_biocyc", "formula_metanetx", "formula_seed",
"formula_model",
"charge_bigg", "charge_biocyc", "charge_metanetx", "charge_seed",
"charge_model", "matching_db"])
if os.path.exists(outfile_formula_search):
formula_search_old = pd.read_csv(outfile_formula_search, sep="\t", index_col=0)
formula_search = pd.concat([formula_search_old, formula_search])
formula_search.reset_index(drop=True, inplace=True)
formula_search_old = None
formula_search.to_csv(outfile_formula_search, sep="\t")
formula_search = pd.DataFrame(columns=["model_index", "name", "spec_id", "formula_model",
"ids_biocyc", "ids_mnx", "ids_seed", "ids_kegg"])
# Exporting mismatches and formula search results
mismatches_old = pd.read_csv(outfile_mismatches, sep="\t", index_col=0)
mismatches = pd.concat([mismatches_old, mismatches])
mismatches.reset_index(drop=True, inplace=True)
mismatches_old = None
mismatches.to_csv(outfile_mismatches, sep="\t")
formula_search_old = pd.read_csv(outfile_formula_search, sep="\t", index_col=0)
formula_search = pd.concat([formula_search_old, formula_search])
formula_search.reset_index(drop=True, inplace=True)
formula_search_old = None
formula_search.to_csv(outfile_formula_search, sep="\t")
if __name__ == '__main__':
main(sys.argv)
```
#### File: JosuaCarl/Script_Assisted_Modeling/helper_functions.py
```python
import requests
import sys
import re
import libsbml
import xmltodict
import json
def delete_doubles(arr):
"""
:param arr: list()
:return: Given list, without duplicated entries
"""
arr2 = []
for element in arr:
if not arr2.__contains__(element):
arr2.append(element)
return arr2
def compare_formulas(formulas, charges=[]):
"""
compares formulas
:param charges: charges of molecules as list of integers
:param charge_hydrogen_balance_accepted: boolean, that indecates, wether differences in the number of H atoms are
accepted, if the difference is accounted for in the charge
:param formulas: list of formulas
:return: True, if all formulas have the same components with the same amount
<formula> must be string: Upper case character marks new Element (Mg12Ag2 = [Mg12, Ag2] & MG12AG2 = [M,G12,A,G2])
"""
# Separate Components of Formula
formulas_split = []
for formula in formulas:
formula_split = []
# separates the atoms
for char in formula:
if char.isupper():
formula_split.append(char)
else:
formula_split[len(formula_split) - 1] = formula_split[len(formula_split) - 1] + char
# adds "1" to formula, if no number is given at the end
for i in range(len(formula_split)):
if re.search("[0-9]", formula_split[i]) is None:
formula_split[i] = formula_split[i] + "1"
# adds separated formulas to a list
formulas_split.append(formula_split)
# Iterates through all formulas
for j in range(len(formulas_split) - 1):
for component in formulas_split[j]:
# accounts for hydrogen - charge relationship
if charges and not re.search("^H(?![a-z])+([0-9])*", component) is None:
component = int(component.split("H")[1])
component = component + (charges[j + 1] - charges[j])
component = "H" + str(component)
# Check next element for current element
if component not in formulas_split[j + 1]:
return False
# Check whether all components were in formula
if len(formulas_split[j]) != len(formulas_split[j + 1]):
return False
return True
# ++ Get Metabolite Data from BiGG Database ++
def bigg_request(_id: str, search_type: str = "metabolites"):
"""
Requests an entry from the BIGG Database
:param _id: str e.g. "nh3"
:param search_type: str e.g. "metabolites"
:return: decoded .json into dictionary
"""
custom_request = "http://bigg.ucsd.edu/api/v2/universal/" + search_type + "/" + _id
req = requests.get(custom_request, headers={"Content-Type": "application/json"})
if not req.ok:
req.raise_for_status()
sys.exit()
decoded_req = req.json()
return decoded_req
# ++ Get Metabolite Data from Biocyc Database ++
def biocyc_request(id_org: str, db: str, id_db: str):
"""
Requests an entry from the BioCyc DB
:param db: Database e.g. BIGG, SEED,..
:param id_db: ID from Database e.g. atp, cpd0001
:param id_org: ID of organism e.g. GCF_000010185
:return: decoded .json into dictionary
"""
custom_request = f"https://websvc.biocyc.org/{id_org}/foreignid?ids={db}:{id_db}&fmt=json"
req = requests.get(custom_request, headers={"Content-Type": "application/json"})
if not req.ok:
req.raise_for_status()
sys.exit()
try:
decoded_req = req.json()
except json.decoder.JSONDecodeError:
assert id_org != "meta"
decoded_req = biocyc_request("meta", db, id_db)
return decoded_req
# KEGG Request Function
def kegg_get(org_id: str, kegg_id: str):
request_url = f"http://rest.kegg.jp/get/{org_id}:{kegg_id}"
req = requests.get(request_url).text.split("\n")
return req
# ++ Get Metabolite Data from BioCyc Database ++
def biocyc_get(id_org: str, id_biocyc: str, detail: str = "full"):
"""
Requests an entry from the BioCyc DB
:param detail: either none, low or full, defaults to full
:param id_biocyc: ID of object e.g. ATP
:param id_org: ID of organism e.g. GCF_000010185
:return: decoded .xml into dictionary
"""
custom_request = f"https://websvc.biocyc.org/getxml?id={id_org}:{id_biocyc}&detail={detail}"
req = requests.get(custom_request)
if not req.ok:
req.raise_for_status()
sys.exit()
decoded_req = xmltodict.parse(req.content)
return decoded_req
def biocyc_get_from_formula(id_org: str, formula: str):
"""
Requests an entry from the BioCyc DB
:param formula:
:param detail: either none, low or full, defaults to full
:param id_biocyc: ID of object e.g. ATP
:param id_org: ID of organism e.g. GCF_000010185
:return: decoded .xml into dictionary
"""
custom_request = f"https://websvc.biocyc.org/{id_org}/CF?cfs={formula}&fmt=json"
req = requests.get(custom_request)
if not req.ok:
req.raise_for_status()
sys.exit()
try:
decoded_req = req.json()
except json.decoder.JSONDecodeError:
assert id_org != "meta"
decoded_req = biocyc_get_from_formula("meta", formula)
return decoded_req
def make_cv_term(link: str, qual_type=libsbml.BQB_IS):
"""
:param qual_type:
:param link: string that is added to CV-Term
:return: libsbml.CVTerm
This method is not generic, but only creates species and reaction standard CV Terms.
"""
c = libsbml.CVTerm()
c.setQualifierType(libsbml.BIOLOGICAL_QUALIFIER)
c.setBiologicalQualifierType(qual_type)
c.addResource(link)
return c
def add_link_annotation_species(model, lnk, qual_type, s_id):
"""
:param qual_type: libsbml.QUALIFIER
:param model: libsbml.model
:param lnk: string
:param s_id: string
:return: libsbml.model
"""
cv_term = make_cv_term(lnk, qual_type)
# eliminate duplicates
list_cv = []
for i in range(model.getSpecies(s_id).getNumCVTerms()):
list_cv.append(model.getSpecies(s_id).getCVTerm(i))
if cv_term not in list_cv:
model.getSpecies(s_id).addCVTerm(cv_term)
return model
def add_link_annotation_reaction(model, lnk, qual_type, s_id):
"""
:param qual_type: libsbml.QUALIFIER
:param model: libsbml.model
:param lnk: string
:param s_id: string
:return: libsbml.model
"""
cv_term = make_cv_term(lnk, qual_type)
# eliminate duplicates
list_cv = []
for k in range(model.getReaction(s_id).getNumCVTerms()):
list_cv.append(model.getReaction(s_id).getCVTerm(k))
if cv_term not in list_cv:
model.getReaction(s_id).addCVTerm(cv_term)
return model
def add_note_species(model, note: str, fbc_id):
"""
:param fbc_id: str
:param model: libsbml.model
:param note: str
:return: libsbml.model
"""
str_note = f"<body xmlns=\"http://www.w3.org/1999/xhtml\">\n <p>{note}</p>\n </body>"
if not model.getSpecies(fbc_id).isSetNotes():
model.getSpecies(fbc_id).setNotes(str_note)
else:
notes_curent = model.getSpecies(fbc_id).getNotes().toXMLString()
if note not in notes_curent:
model.getSpecies(fbc_id).appendNotes(str_note)
return model
def add_note_gene_product(model, note: str, fbc_id):
"""
:param fbc_id: str
:param model: libsbml.model
:param note: str
:return: libsbml.model
"""
str_note = f"<body xmlns=\"http://www.w3.org/1999/xhtml\">\n <p>{note}</p>\n </body>"
if not model.getPlugin('fbc').getGeneProduct(fbc_id).isSetNotes():
model.getPlugin('fbc').getGeneProduct(fbc_id).setNotes(str_note)
else:
notes_curent = model.getPlugin('fbc').getGeneProduct(fbc_id).getNotes().toXMLString()
if note not in notes_curent:
model.getPlugin('fbc').getGeneProduct(fbc_id).appendNotes(str_note)
return model
def add_note_reaction(model, note: str, fbc_id):
"""
:param model: libsbml.model
:param note: str
:param fbc_id: str
:return:
"""
str_note = f"<body xmlns=\"http://www.w3.org/1999/xhtml\">\n <p>{note}</p>\n </body>"
if not model.getReaction(fbc_id).isSetNotes():
model.getReaction(fbc_id).setNotes(str_note)
else:
notes_curent = model.getReaction(fbc_id).getNotes().toXMLString()
if note not in notes_curent:
model.getReaction(fbc_id).appendNotes(str_note)
return model
def dict_add_overlap_to_list(orig_dict, extend_dict):
for k, v in extend_dict.items():
if k not in orig_dict:
orig_dict[k] = v
else:
if hasattr(orig_dict[k], '__iter__') and not isinstance(orig_dict[k], str):
orig_dict[k] = set(orig_dict[k])
else:
orig_dict[k] = {orig_dict[k]}
if hasattr(v, '__iter__') and not isinstance(v, str):
orig_dict[k] |= set(v)
else:
orig_dict[k] |= {v}
orig_dict[k] = list(orig_dict[k])
return orig_dict
``` |
{
"source": "JosuaKrause/parcell",
"score": 2
} |
#### File: parcell/parcell/connector.py
```python
from __future__ import print_function
from __future__ import division
import os
import sys
import math
import logging
import argparse
import threading
from rpaths import PosixPath
from tej import RemoteQueue, JobNotFound, RemoteCommandFailure, JobAlreadyExists
import loading
def msg(message, *args, **kwargs):
print(message.format(*args, **kwargs), file=sys.stdout)
def set_msg(m):
global msg
msg = m
loading.set_msg(m)
def get_envs():
return loading.get_envs()
def get_servers():
return loading.get_servers()
def get_projects():
return loading.get_projects()
def set_password_reuse(reuse_pw):
loading.set_password_reuse(reuse_pw)
def init_passwords():
loading.init_passwords()
def get_connector(project):
with loading.MAIN_LOCK:
if project not in Connector._ALL_CONNECTORS:
Connector(project) # adds itself to the list
return Connector._ALL_CONNECTORS[project]
class Connector(object):
SCRIPT_FILE = "_start"
_ALL_CONNECTORS = {}
def __init__(self, p):
self._lock = threading.RLock()
self._job_number = 0
self._project = loading.get_project(p)
self._rqs = dict([ (s.name, loading.get_remote(s)) for s in self._project["servers"] ])
Connector._ALL_CONNECTORS[p] = self
def get_path(self):
return self._project.path_local
def get_commands(self):
return self._project.commands
def get_env(self):
return self._project["env"].name
def _get_env(self, rq, chk):
if len(chk) == 4:
name, cmd, regex, line = chk
else:
name, cmd, regex, line, _ = chk
output = rq.check_output(cmd)
oarr = output.split("\n")
if line >= len(oarr):
raise ValueError("line {0} not in:\n{1}".format(line, oarr))
m = regex.search(oarr[line])
if m is None:
raise ValueError("unexpected mismatch {0} not in:\n{1}".format(regex.pattern, oarr[line]))
return name, m.group(1)
def get_vital_value(self, rq, chk):
name, c = self._get_env(rq, chk)
asc = chk[4]
if c:
try:
return name, float(c), asc
except TypeError:
pass
return name, float('nan'), asc
def get_vitals(self, rq):
return [ self.get_vital_value(rq, b) for b in self._project["env"]["vital"] ]
def get_servers(self):
return [ s.name for s in self._project["servers"] ]
def get_servers_info(self):
return [ {
"server": s,
"vital": self.get_vital_value(self._rqs[s], self._project["env"]["vital"][0])[1],
} for s in self.get_servers() ]
def get_server_stats(self, s):
server = self._project.servers[s]
rq = self._rqs[s]
return {
"name": server["hostname"],
"versions": [ self._get_env(rq, chk) for chk in self._project["env"]["versions"] ],
"vitals": self.get_vitals(self._rqs[s]),
}
def get_all_vitals(self):
return [ (s, self.get_vitals(self._rqs[s])) for s in self.get_servers() ]
def get_best_server(self):
servers = self.get_servers()
if len(servers) < 2:
return servers[0] if servers else None
all_vitals = self.get_all_vitals()
cur_ix = 0
best_s = []
best_num = float('nan')
while len(best_s) < 2 and cur_ix < len(all_vitals[0][1]):
for (s, cur) in all_vitals:
_, num, asc = cur[cur_ix]
if math.isnan(best_num):
best_s = [ s ]
best_num = num
elif num == best_num:
best_s.append(s)
else:
if asc:
if num < best_num:
best_s = [ s ]
best_num = num
else:
if num > best_num:
best_s = [ s ]
best_num = num
cur_ix += 1
return best_s[0] if len(best_s) > 0 else None
_STATUS = dict([
(RemoteQueue.JOB_DONE, "done"),
(RemoteQueue.JOB_RUNNING, "running"),
(RemoteQueue.JOB_INCOMPLETE, "incomplete"),
(RemoteQueue.JOB_CREATED, "created"),
("missing", "missing"),
("error", "error"),
])
def get_all_jobs(self):
def desc(s, j, info):
if i["status"] == RemoteQueue.JOB_DONE:
if "result" not in i: # FIXME: hack for tej without result in list
return self.get_job_status(s, j)[0]
if int(i["result"]) != 0:
return Connector._STATUS["error"]
return Connector._STATUS.get(i["status"], "?")
return [ (s, j, desc(s, j, i)) for s in self.get_servers() for (j, i) in self.get_job_list(s) ]
def get_job_list(self, s):
prefix = "{0}_".format(self._project.name)
rq = self._rqs[s]
return [ ji for ji in loading.list_jobs(rq) if ji[0].startswith(prefix) ]
def get_job_status(self, s, j):
rq = self._rqs[s]
try:
status, _, result = rq.status(j)
if status == RemoteQueue.JOB_DONE and int(result) != 0:
status = "error"
except JobNotFound:
status = "missing"
result = "?"
except RemoteCommandFailure as rcf:
status = "error"
result = rcf.ret
return Connector._STATUS.get(status, "?"), result
def submit_job(self, s, cmd):
if not cmd.strip():
raise ValueError("cannot execute empty command: {0}".format(cmd))
with self._lock:
rq = self._rqs[s]
path = self._project.path_local
self._project.add_cmd(cmd)
with open(os.path.join(path, Connector.SCRIPT_FILE), 'wb') as f:
print(cmd, file=f)
while True:
try:
job_name = "{0}_{1}".format(self._project.name, self._job_number)
call = "sh -l ./{0}".format(Connector.SCRIPT_FILE)
return rq.submit(job_name, path, call)
except JobAlreadyExists:
pass
finally:
self._job_number += 1
def delete_job(self, s, j):
with self._lock:
rq = self._rqs[s]
loading.kill_job(rq, s, j)
def delete_all_jobs(self):
with self._lock:
for (s, j, _) in self.get_all_jobs():
self.delete_job(s, j)
def get_job_files(self, s, j, rel_path):
rq = self._rqs[s]
status, path, result = rq.status(j)
rel_path = PosixPath(rel_path)
if rel_path.is_absolute:
rel_path = PosixPath(".")
res = rq.check_output("ls -p1t {0}".format(str(path / rel_path))).split("\n")
if rel_path != ".":
res.insert(0, "../")
return res
def get_job_file(self, s, j, req_file):
rq = self._rqs[s]
status, path, result = rq.status(j)
path = PosixPath(loading.DIR_TEMP) / s / j
res = str(path / req_file)
path_str = os.path.dirname(res)
if not os.path.exists(path_str):
os.makedirs(path_str)
if not os.path.exists(res) or status == RemoteQueue.JOB_RUNNING:
try:
rq.download(j, [ req_file ], destination=path_str)
except JobNotFound:
return None
return res
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parcell Connector')
parser.add_argument('--reuse-pw', action='store_true', dest='reuse_pw', help="only ask for one password")
parser.add_argument('-v', '--verbose', action='count', default=1, dest='verbosity', help="augments verbosity level")
parser.add_argument('project', type=str, nargs='?', help="project file")
args = parser.parse_args()
levels = [ logging.CRITICAL, logging.WARNING, logging.INFO, logging.DEBUG ]
logging.basicConfig(level=levels[min(args.verbosity, 3)])
if not args.project:
for p in loading.get_projects():
print(p)
exit(0)
msg("{0}", " ".join(sys.argv))
msg("initializing passwords -- please type as prompted")
set_password_reuse(args.reuse_pw)
init_passwords()
msg("initializing passwords -- done")
conn = Connector(args.project)
for s in conn.get_servers():
for (k, v) in conn.get_server_stats(s).items():
if isinstance(v, (list, tuple)):
print("{0}:".format(k))
for (kk, vv) in v:
print(" {0}: {1}".format(kk, vv))
else:
print("{0}: {1}".format(k, v))
```
#### File: parcell/parcell/loading.py
```python
from __future__ import print_function
from __future__ import division
import os
import re
import sys
import json
import time
import atexit
import base64
import shutil
import getpass
import hashlib
import binascii
import paramiko
import threading
import traceback
from rpaths import PosixPath
from tej import RemoteQueue, parse_ssh_destination, QueueDoesntExist, RemoteCommandFailure, JobNotFound
from tunnel import start_tunnel, check_tunnel, check_permission_denied
def simple_msg(message, *args, **kwargs):
print(message.format(*args, **kwargs), file=sys.stdout)
msg = simple_msg
def set_msg(m):
global msg
msg = m
MAIN_LOCK = threading.RLock()
DEFAULT_BASE = os.path.dirname(__file__)
DIR_ENV_DEFAULT = os.path.join(DEFAULT_BASE, "default_envs")
DIR_ENV = "envs"
DIR_SERVER = "servers"
DIR_PROJECT = "projects"
DIR_TEMP = "temp_files"
EXT = ".json"
DIR_REMOTE_TEJ = "~/.parcell"
LOCALHOST = "127.0.0.1"
DEFAULT_REGEX = "(.*)"
DEFAULT_LINE = 0
UPGRADE_ENV = []
UPGRADE_SERVER = []
UPGRADE_PROJECT = []
def upgrade(array, version):
def wrapper(func):
if len(array) != version:
raise ValueError("upgrade definition in wrong order {0} != {1}".format(len(array), version))
array.append(func)
return func
return wrapper
def _get_config_list(config, default=None, no_default=False):
if not os.path.exists(config):
if no_default:
return []
os.makedirs(config)
res = [ c[:-len(EXT)] for c in os.listdir(config) if c.endswith(EXT) ]
if default is not None and not no_default:
res += [ c[:-len(EXT)] for c in os.listdir(default) if c.endswith(EXT) ]
res = list(set(res))
return res
def get_envs(no_default=False):
return _get_config_list(DIR_ENV, DIR_ENV_DEFAULT, no_default=no_default)
def get_servers(no_default=False):
return _get_config_list(DIR_SERVER, no_default=no_default)
def get_projects(no_default=False):
return _get_config_list(DIR_PROJECT, no_default=no_default)
def _get_path(path, name):
return os.path.join(path, "{0}{1}".format(name, EXT))
def _write_json(path, obj):
with open(path, 'wb') as f:
json.dump(obj, f, indent=2, sort_keys=True)
def _rm_json(config, path):
if os.path.exists(path):
os.remove(path)
if not _get_config_list(config, no_default=True):
os.rmdir(config)
CONFIG_LOG = threading.RLock()
ALL_CONFIG = {}
CONFIG_NUM = 0
def _close_all_config():
with CONFIG_LOG:
for c in list(ALL_CONFIG.values()):
c.close()
atexit.register(_close_all_config)
class Config(object):
def __init__(self, name):
global CONFIG_NUM
if not _check_name(name):
raise ValueError("bad character '{0}' in name '{1}'".format(_get_bad_chars(name)[0], name))
self._name = name
self._chg = False
self._closed = True
self._deleted = False
with CONFIG_LOG:
self._config_num = CONFIG_NUM
CONFIG_NUM += 1
self._reopen()
def is_deleted(self):
return self._deleted
def _reopen(self):
if not self.is_closed():
return
with CONFIG_LOG:
if self.is_deleted():
return
self._obj = self._read()
self._closed = False
ALL_CONFIG[self._config_num] = self
def close(self):
with CONFIG_LOG:
if self._config_num in ALL_CONFIG:
del ALL_CONFIG[self._config_num]
if self._chg and not self.is_closed() and not self.is_deleted():
self._chg = False
self._write(self.write_object(self._obj))
self._closed = True
def is_closed(self):
return self._closed
def _get_config_path(self, config):
return _get_path(config, self._name)
def _read(self):
if self.is_deleted():
raise ValueError("server description does not exist!")
config = self.get_config_dir()
if not os.path.exists(config):
os.makedirs(config)
path = self._get_config_path(config)
is_new = False
default = self.get_default_dir()
if not os.path.exists(path) and default is not None:
path = self._get_config_path(default)
msg("{0}", path)
is_new = True
with open(path, 'rb') as f:
res = json.load(f)
res, chg = self._check_version(res)
if chg or is_new:
if not is_new:
os.rename(path, path + ".old")
self._write(res)
return self.read_object(res)
def _check_version(self, obj):
upgrade = self.get_upgrade_list()
v = int(obj.get("version", 0))
chg = False
while v < len(upgrade):
obj = upgrade[v](obj)
v += 1
obj["version"] = v
chg = True
return obj, chg
def _write(self, obj):
if self.is_deleted():
return
config = self.get_config_dir()
if not os.path.exists(config):
os.makedirs(config)
obj["version"] = len(self.get_upgrade_list())
_write_json(self._get_config_path(config), obj)
def delete_file(self):
with CONFIG_LOG:
self._deleted = True
self.close()
config = self.get_config_dir()
_rm_json(config, self._get_config_path(config))
def get_config_dir(self):
raise NotImplementedError("get_config_dir")
def get_default_dir(self):
return None
def get_upgrade_list(self):
raise NotImplementedError("get_upgrade_list")
def set_change(self, chg):
self._chg = chg
if chg:
self._reopen()
def has_change(self):
return self._chg
def read_object(self, obj):
return obj
def write_object(self, obj):
return obj
def __getitem__(self, key):
self._reopen()
return self._obj[key]
def __setitem__(self, key, value):
self._reopen()
if key not in self._obj or self._obj[key] != value:
self._obj[key] = value
self.set_change(True)
def __contains__(self, key):
self._reopen()
return key in self._obj
def get(self, key, default=None):
if key not in self:
return default
return self[key]
@property
def name(self):
return self._name
def get_obj(self, skip=None):
self._reopen()
return dict(
it for it in self._obj.items() if skip is None or it[0] not in skip
)
class EnvConfig(Config):
def __init__(self, name):
super(EnvConfig, self).__init__(name)
def get_config_dir(self):
return DIR_ENV
def get_default_dir(self):
return DIR_ENV_DEFAULT
def get_upgrade_list(self):
return UPGRADE_ENV
def read_object(self, obj):
def get(field, version):
res = []
if field in obj:
for e in obj[field]:
name = e["name"]
cmd = e["cmd"]
regex = re.compile(e.get("regex", DEFAULT_REGEX))
line = int(e.get("line", DEFAULT_LINE))
if not version:
asc = e.get("asc", True)
res.append((name, cmd, regex, line, asc))
else:
res.append((name, cmd, regex, line))
return res
return {
"versions": get("versions", True),
"vital": get("vital", False),
}
def write_object(self, obj):
def conv(e, version):
if not version:
name, cmd, regex, line, asc = e
res = {
"name": name,
"cmd": cmd,
"asc": asc,
}
else:
name, cmd, regex, line = e
res = {
"name": name,
"cmd": cmd,
}
if regex.pattern != DEFAULT_REGEX:
res["regex"] = regex.pattern
if line != DEFAULT_LINE:
res["line"] = line
return res
return {
"versions": [ conv(e, True) for e in obj["versions"] ],
"vital": [ conv(e, False) for e in obj["vital"] ],
}
@upgrade(UPGRADE_ENV, 0)
def up_e0(obj):
obj["vital"] = obj["cpus"]
del obj["cpus"]
for o in obj["vital"]:
o["asc"] = True
return obj
ALL_ENVS = {}
def get_env(e):
with MAIN_LOCK:
if e not in ALL_ENVS:
ALL_ENVS[e] = EnvConfig(e)
return ALL_ENVS[e]
SERVER_SKIP_KEYS = frozenset([
"needs_pw",
"tunnel",
"tunnel_port",
"needs_tunnel_pw",
"key",
"version",
])
class ServerConfig(Config):
def __init__(self, name):
super(ServerConfig, self).__init__(name)
def get_config_dir(self):
return DIR_SERVER
def get_upgrade_list(self):
return UPGRADE_SERVER
def read_object(self, obj):
if "password" in obj:
raise ValueError("password should not be stored in config! {0}".format(self._name))
return obj
def write_object(self, obj):
return dict((k, v) for (k, v) in obj.items() if k != "password")
def get_destination_obj(self, front):
res = self.get_obj(SERVER_SKIP_KEYS)
if front and "tunnel_port" in self:
res["hostname"] = LOCALHOST
res["port"] = self["tunnel_port"]
return res
def __setitem__(self, key, value):
chg = self.has_change()
super(ServerConfig, self).__setitem__(key, value)
if key == "password":
self.set_change(chg)
def check_key(self, hostname, key_type, key_base64, key_fp):
if hostname != self["hostname"]:
raise ValueError("mismatching hostname '{0}' != '{1}'".format(hostname, self["hostname"]))
kobj = self.get("key", {})
known_base64 = kobj.get("base64", None)
if known_base64 is None:
replay_fp = hashlib.md5(base64.decodestring(key_base64)).hexdigest()
if replay_fp != key_fp:
raise ValueError("Error encoding fingerprint of '{0}'! {1} != {2}\n{3}: {4}".format(hostname, replay_fp, key_fp, key_type, key_base64))
msg("The authenticity of host '{0}' can't be established.", hostname)
pretty_fp = ':'.join(a + b for (a, b) in zip(key_fp[::2], key_fp[1::2]))
msg("{0} key fingerprint is {1}.", key_type, pretty_fp)
if not _ask_yesno("Are you sure you want to continue connecting?"):
sys.exit(1)
self["key"] = {
"type": key_type,
"base64": key_base64,
}
# FIXME: there might be a better way
if key_type != self["key"]["type"]:
raise ValueError("mismatching key type for '{0}'. '{1}' != '{2}'".format(hostname, key_type, self["key"]["type"]))
if key_base64 != self["key"]["base64"]:
raise ValueError("mismatching {0} key for '{1}'. '{2}' != '{3}'".format(key_type, hostname, key_base64, self["key"]["base64"]))
@upgrade(UPGRADE_SERVER, 0)
def up_s0(obj):
obj["key"] = {
"type": None,
"base64": None,
}
return obj
ALL_SERVERS = {}
def get_server(s):
with MAIN_LOCK:
if s not in ALL_SERVERS:
ALL_SERVERS[s] = ServerConfig(s)
return ALL_SERVERS[s]
class ProjectConfig(Config):
def __init__(self, name):
super(ProjectConfig, self).__init__(name)
if not os.path.exists(self.path_local):
os.makedirs(self.path_local)
def get_config_dir(self):
return DIR_PROJECT
def get_upgrade_list(self):
return UPGRADE_PROJECT
def read_object(self, obj):
return {
"local": obj["local"],
"cmds": obj["cmds"],
"env": get_env(obj["env"]),
"servers": [ get_server(s) for s in obj["servers"] ],
}
def write_object(self, obj):
return {
"local": obj["local"],
"cmds": obj["cmds"],
"env": obj["env"].name,
"servers": [ s.name for s in obj["servers"] ],
}
@property
def path_local(self):
return self["local"]
@property
def commands(self):
return self["cmds"]
def remove_server(self, server):
self["servers"] = [ s for s in self["servers"] if s.name != server ]
def add_cmd(self, cmd):
cmd = cmd.strip()
if not cmd:
return
if cmd in self["cmds"] and cmd == self["cmds"][0]:
return
self["cmds"] = [ cmd ] + [ c for c in self["cmds"] if c != cmd ]
@property
def servers(self):
return dict( (s.name, s) for s in self["servers"] )
@upgrade(UPGRADE_PROJECT, 0)
def up_p0(obj):
obj["cmds"] = [ obj["cmd"] ]
del obj["cmd"]
return obj
ALL_PROJECTS = {}
def get_project(p):
with MAIN_LOCK:
if p not in ALL_PROJECTS:
ALL_PROJECTS[p] = ProjectConfig(p)
return ALL_PROJECTS[p]
def _get_tunnel_ports():
sobjs = [ get_server(n) for n in get_servers() ]
return [ int(s["tunnel_port"]) for s in sobjs if "tunnel_port" in s ]
_REUSE_PW = False
def set_password_reuse(reuse_pw):
global _REUSE_PW
_REUSE_PW = reuse_pw
_GLOBAL_PASSWORD = None
_ALL_PWS = {}
_ASK_REUSE = True
_ASK_REUSE_PRIMED = None
def ask_password(user, address):
global _GLOBAL_PASSWORD
global _ASK_REUSE
global _ASK_REUSE_PRIMED
pw_id = (user, address)
if pw_id not in _ALL_PWS:
if _ASK_REUSE_PRIMED is not None and _ask_yesno("Do you want to reuse this password for other servers"):
set_password_reuse(True)
res = _ASK_REUSE_PRIMED
_ASK_REUSE_PRIMED = None
_ASK_REUSE = False
auto = True
elif _REUSE_PW and _GLOBAL_PASSWORD is not None:
res = _GLOBAL_PASSWORD
auto = True
else:
res = _getpass("password for {0}@{1}:".format(user, address))
if _ASK_REUSE_PRIMED is not None:
_ASK_REUSE_PRIMED = None
_ASK_REUSE = False
elif _ASK_REUSE:
_ASK_REUSE_PRIMED = res
auto = False
if _REUSE_PW and _GLOBAL_PASSWORD is None:
_GLOBAL_PASSWORD = res
if auto:
msg("Password for {0}@{1} is known", user, address)
_ALL_PWS[pw_id] = res
return _ALL_PWS[pw_id]
def _setup_tunnel(server):
with MAIN_LOCK:
s = server.name
tunnel = parse_ssh_destination(server["tunnel"])
if "password" in tunnel:
raise ValueError("tunnel password should not be stored in config! {0}@{1}:{2}".format(tunnel["username"], tunnel["hostname"], tunnel["port"]))
if server.get("needs_tunnel_pw", False):
tunnel["password"] = ask_password(tunnel["username"], tunnel["hostname"])
start_tunnel(s, tunnel, server.get_destination_obj(False), server["tunnel_port"])
class LocalAddPolicy(paramiko.client.MissingHostKeyPolicy):
def __init__(self, s_obj):
self.s_obj = s_obj
super(LocalAddPolicy, self).__init__()
def missing_host_key(self, client, hostname, key):
server = self.s_obj
if "tunnel_port" in server and hostname == "[{0}]:{1}".format(LOCALHOST, server["tunnel_port"]):
hostname = server["hostname"]
server.check_key(hostname, key.get_name(), key.get_base64(), binascii.hexlify(key.get_fingerprint()))
class TunnelableRemoteQueue(RemoteQueue):
def __init__(self, *args, **kwargs):
# needs to be before actual constructor because
# _ssh_client is called from within
self.s_obj = kwargs.pop("s_obj")
super(TunnelableRemoteQueue, self).__init__(*args, **kwargs)
def _ssh_client(self):
ssh = super(TunnelableRemoteQueue, self)._ssh_client()
ssh.set_missing_host_key_policy(LocalAddPolicy(self.s_obj))
return ssh
ALL_REMOTES = {}
def get_remote(server):
with MAIN_LOCK:
s = server.name
if "tunnel" in server and not check_tunnel(s):
_setup_tunnel(server)
if s not in ALL_REMOTES:
if server.get("needs_pw", False) and "password" not in server:
raise ValueError("no password found in {0}".format(s))
remote_dir = "{0}_{1}".format(DIR_REMOTE_TEJ, s)
dest = server.get_destination_obj(True)
while s not in ALL_REMOTES:
try:
ALL_REMOTES[s] = TunnelableRemoteQueue(dest, remote_dir, s_obj=server)
except paramiko.ssh_exception.NoValidConnectionsError as e:
if e.errno is None:
if "tunnel" in server:
if check_permission_denied(s):
msg("Incorrect password for {0}.", server["tunnel"])
sys.exit(1)
if not check_tunnel(s):
msg("Error starting tunnel! Re-run with -vv for more information.")
sys.exit(1)
time.sleep(1)
else:
raise e
return ALL_REMOTES[s]
def test_connection(server, save):
s = server.name
if server.get("needs_pw", False):
server["password"] = ask_password(server["username"], server["hostname"])
msg("Checking connectivity of {0}", s)
conn = get_remote(server)
conn.check_call("hostname")
if save:
server.set_change(True)
server.close()
def init_passwords():
with MAIN_LOCK:
for s in get_servers():
test_connection(get_server(s), False)
def _check_project(name):
p = get_project(name)
for s in p["servers"]:
test_connection(s, True)
p.set_change(True)
p.close()
def list_jobs(rq):
try:
return [ ji for ji in rq.list() ]
except QueueDoesntExist:
return []
def kill_job(rq, s, j):
try:
rq.kill(j)
except (RemoteCommandFailure, JobNotFound):
pass
try:
rq.delete(j)
except JobNotFound:
pass
path = str(PosixPath(DIR_TEMP) / s / j)
if os.path.exists(path):
shutil.rmtree(path)
def remove_server(s):
with MAIN_LOCK:
msg("removing server '{0}' from projects", s)
for p in get_projects(no_default=True):
get_project(p).remove_server(s)
msg("stopping all jobs on '{0}'", s)
server = get_server(s)
test_connection(server, False)
rq = get_remote(server)
for (j, _) in list_jobs(rq):
kill_job(rq, s, j)
rpath = str(rq.queue)
msg("removing server side files '{0}'", rpath)
rq.check_call("rm -rf -- {0}".format(rpath))
msg("removing server description '{0}'", s)
server.delete_file()
def remove_all():
with MAIN_LOCK:
msg("removing all servers")
for s in get_servers(no_default=True):
remove_server(s)
msg("removing all projects")
for p in get_projects(no_default=True):
msg("removing project '{0}'", p)
get_project(p).delete_file()
msg("removing all environments")
for e in get_envs(no_default=True):
msg("removing environment '{0}'", p)
get_env(e).delete_file()
msg("Successfully removed all local and remote data!")
ALLOW_ASK = True
def allow_ask(allow):
global ALLOW_ASK
ALLOW_ASK = allow
def _getline(line):
if not ALLOW_ASK:
msg("Not allowed to use prompt! Terminating!\n{0}--", line)
sys.exit(1)
return raw_input(line).rstrip("\r\n")
def _getpass(line):
if not ALLOW_ASK:
msg("Not allowed to use prompt! Terminating!\n{0}--", line)
sys.exit(1)
return getpass.getpass(line)
def _ask(line, default=None, must=True):
line = "{0}{1}: ".format(line, '' if default is None else " ({0})".format(default))
while True:
res = _getline(line)
if res != '':
break
if default is not None:
res = default
break
if not must:
break
return res
def _ask_yesno(line):
line = "{0} (yes|no): ".format(line)
while True:
res = _getline(line)
if res == 'yes' or res == 'y':
res = True
break
if res == 'no' or res == 'n':
res = False
break
return res
def confirm_critical(line, expect):
msg("{0}", line)
res = _getline("Type '{0}' to confirm: ".format(expect))
return res == expect
PORT_LOWER = 1
PORT_UPPER = 65535
def _ask_port(line, default=None):
while True:
res = _ask(line, default)
try:
res = int(res)
if res >= PORT_LOWER and res <= PORT_UPPER:
break
except ValueError:
pass
msg("Must be integer in the range of {0}--{1}".format(PORT_LOWER, PORT_UPPER))
return res
def _ask_choice(line, of=[], special={}):
num_of = len(of)
num_special = len(special.keys())
if not num_of and not num_special:
raise ValueError("no choices!")
if num_of + num_special == 1:
return (0, of[0], True) if num_of else tuple(list(special.items()[0]) + [ False ])
while True:
msg("{0}:", line)
for (ix, o) in enumerate(of):
msg(" ({0}): {1}", ix, o)
for (k, v) in special.items():
msg(" ({0}): {1}", k, v)
res = _getline("Please select: ")
if res in special:
res = (res, special[res], False)
break
try:
res = int(res)
if res >= 0 and res < len(of):
res = (res, of[res], True)
break
except ValueError:
pass
return res
def _ask_server_list():
servers = []
while True:
opt_list = [ s for s in get_servers() if s not in servers ]
opt_cmds = {
"a": "..add new server",
"l": "..list selection",
}
if servers:
opt_cmds["d"] = "..done"
cmd, el, is_name = _ask_choice("Add server", opt_list, opt_cmds)
if is_name:
servers.append(el)
elif cmd == "a":
msg("Adding new server..")
name, okay = add_server()
if okay:
servers.append(name)
else:
msg("Creating server failed..")
elif cmd == "d":
break
elif cmd == "l":
msg("Currently selected servers:")
for s in servers:
msg(" {0}", s)
return servers
VALID_NAME_CHARS = frozenset("ABCDEFGHIJKLMNOPQRSTUVWXYZ" \
"abcdefghijklmnopqrstuvwxyz" \
"0123456789_-+=@%:.,")
def _check_name(name):
return all(c in VALID_NAME_CHARS for c in name)
def _get_bad_chars(name):
return [ c for c in name if c not in VALID_NAME_CHARS ]
def add_project(name):
if not _check_name(name):
msg("Invalid character {0} in project name '{1}'", _get_bad_chars(name)[0], name)
return False
if name in get_projects():
msg("Project '{0}' already exists!", name)
return False
msg("Create project '{0}'.", name)
project = {
"cmds": [],
"version": 1,
}
project["local"] = _ask("Project root", default=os.path.join(DIR_PROJECT, name))
_, env, _ = _ask_choice("Environment", of=get_envs())
project["env"] = env
project["servers"] = _ask_server_list()
_write_json(_get_path(DIR_PROJECT, name), project)
msg("Checking project configuration")
_check_project(name)
msg("Successfully created project '{0}'!", name)
return True
def _infer_server_name(hostname):
if not _check_name(hostname):
return None
cur_host = hostname
name = ''
while '.' in cur_host:
dot = cur_host.index('.')
name = "{0}{1}{2}".format(name, '.' if name != '' else '', cur_host[:dot])
if name not in get_servers():
return name
cur_host = cur_host[dot+1:]
return None if hostname in get_servers() else hostname
def add_server():
hostname = _ask("Hostname")
name = _ask("Server name", default=_infer_server_name(hostname))
if not _check_name(name):
msg("Invalid character {0} in server name '{1}'", _get_bad_chars(name)[0], name)
return None, False
if name in get_servers():
msg("Server '{0}' already exists!", name)
return None, False
try:
server = {}
server["hostname"] = hostname
server["username"] = _ask("Username")
server["port"] = _ask_port("Port", default=22)
server["needs_pw"] = _ask_yesno("Is a password required?")
if _ask_yesno("Is a tunnel needed?"):
tunnel_host = _ask("Tunnel hostname")
tunnel_user = _ask("Tunnel username")
tport_final = None
while tport_final is None:
tport = 11111
blocked = set(_get_tunnel_ports())
while tport in blocked:
tport += 1
if tport > PORT_UPPER:
raise ValueError("All ports are blocked?")
tport_final = _ask_port("Unique tunnel port", default=tport)
if tport_final in blocked:
msg("Port {0} is not unique!", tport_final)
tport_final = None
server["tunnel_port"] = tport_final
tunnel_port = _ask_port("Standard tunnel port", default=22)
server["tunnel"] = "{0}@{1}{2}".format(
tunnel_user,
tunnel_host,
":{0}".format(tunnel_port) if tunnel_port != 22 else ""
)
server["needs_tunnel_pw"] = _ask_yesno("Is a tunnel password required?")
_write_json(_get_path(DIR_SERVER, name), server)
msg("Checking server configuration")
test_connection(get_server(name), True)
msg("Successfully created server '{0}'!", name)
except (KeyboardInterrupt, SystemExit):
raise
except:
msg("Error creating server {0}:\n{1}", name, traceback.format_exc())
return None, False
return name, True
```
#### File: parcell/parcell/server.py
```python
from __future__ import print_function
from __future__ import division
import os
import sys
import time
import threading
import webbrowser
from connector import get_envs, get_servers, get_projects, \
get_connector, init_passwords, set_password_reuse, \
set_msg
from loading import allow_ask
from quick_server import create_server, msg, setup_restart, \
has_been_restarted, is_original
from quick_cache import QuickCache
set_msg(msg)
PARCEL_MNT = '/parcell/'
def get_server(addr, port, cache):
server = create_server((addr, port))
server.bind_path(PARCEL_MNT, os.path.join(os.path.dirname(__file__), 'www'))
prefix = '/parcell'
server.directory_listing = False
server.add_default_white_list()
server.link_empty_favicon_fallback()
server.suppress_noise = True
server.report_slow_requests = True
server.link_worker_js(prefix + '/js/worker.js')
server.cache = cache
def optional(key, args, default=None):
return args[key] if key in args else default
def optional_bool(key, args, default):
return bool(args[key]) if key in args else default
@server.json_get(prefix + '/envs')
def json_get_envs(req, args):
return {
"envs": get_envs(),
}
@server.json_get(prefix + '/projects')
def json_get_projects(req, args):
return {
"projects": get_projects(),
}
@server.json_get(prefix + '/servers')
def json_get_servers(req, args):
project = optional("project", args["query"])
return {
"servers": [ {
"server": s,
"vital": float('nan'),
} for s in get_servers_info() ] if project is None else get_connector(project).get_servers_info(),
}
@server.json_get(prefix + '/project_info')
def json_get_project_info(req, args):
project = args["query"]["project"]
conn = get_connector(project)
return {
"project": project,
"path": conn.get_path(),
"cmds": conn.get_commands(),
"env": conn.get_env(),
}
@server.json_worker(prefix + '/stats')
def json_status(args):
project = args["project"]
server = args["server"]
conn = get_connector(project)
return {
"project": project,
"server": server,
"stats": conn.get_server_stats(server),
}
@server.json_worker(prefix + '/best_server')
def json_best_server(args):
project = args["project"]
conn = get_connector(project)
return {
"project": project,
"server": conn.get_best_server(),
}
@server.json_worker(prefix + '/start')
def json_start(args):
project = args["project"]
server = args["server"]
cmd = args["cmd"]
conn = get_connector(project)
job = conn.submit_job(server, cmd)
return {
"project": project,
"server": server,
"job": job,
"cmds": conn.get_commands(),
}
@server.json_worker(prefix + '/jobs')
def json_jobs(args):
project = args["project"]
conn = get_connector(project)
return {
"project": project,
"jobs": conn.get_all_jobs(),
}
@server.json_worker(prefix + '/kill_job')
def json_kill(args):
project = args["project"]
server = args["server"]
job = args["job"]
conn = get_connector(project)
conn.delete_job(server, job)
return {
"project": project,
"server": server,
"job": job,
}
@server.json_worker(prefix + '/kill_all')
def json_kill_all(args):
project = args["project"]
conn = get_connector(project)
conn.delete_all_jobs()
return {
"project": project,
}
@server.json_worker(prefix + '/status')
def json_status(args):
project = args["project"]
server = args["server"]
job = args["job"]
conn = get_connector(project)
status, result = conn.get_job_status(server, job)
return {
"project": project,
"server": server,
"job": job,
"status": status,
"result": result,
}
@server.json_worker(prefix + '/ls')
def json_status(args):
project = args["project"]
server = args["server"]
job = args["job"]
path = args["path"]
conn = get_connector(project)
return {
"project": project,
"server": server,
"job": job,
"path": path,
"files": conn.get_job_files(server, job, path),
}
@server.text_get(prefix + '/file')
def text_get(req, args):
args = args["query"]
project = args["project"]
server = args["server"]
job = args["job"]
req_file = args["file"]
conn = get_connector(project)
filename = conn.get_job_file(server, job, req_file)
if filename is None:
return None
with open(filename, 'rb') as f:
return f.read()
def complete_cache_clear(args, text):
if args:
return []
return [ section for section in cache.list_sections() if section.startswith(text) ]
@server.cmd(complete=complete_cache_clear)
def cache_clear(args):
if len(args) > 1:
msg("too many extra arguments! expected one got {0}", ' '.join(args))
return
msg("clear {0}cache{1}{2}", "" if args else "all ", " " if args else "s", args[0] if args else "")
cache.clean_cache(args[0] if args else None)
return server
def is_child():
return not is_original()
def enable_restart():
setup_restart()
def start_server(addr, port, cache_quota, ram_quota, reuse_pw):
cache_temp = "tmp"
if os.path.exists("cache_path.txt"):
with open("cache_path.txt") as cp:
cache_temp = cp.read().strip()
msg("{0}", " ".join(sys.argv))
msg("initializing passwords -- please type as prompted")
set_password_reuse(reuse_pw)
init_passwords()
msg("initializing passwords -- done")
server = get_server(addr, port, QuickCache(quota=cache_quota, ram_quota=ram_quota, temp=cache_temp, warnings=msg))
urlstr = "http://{0}:{1}{2}".format(addr if addr else 'localhost', port, PARCEL_MNT)
def browse():
time.sleep(1)
msg("browsing to {0}", urlstr)
webbrowser.open(urlstr, new=0, autoraise=True)
if not has_been_restarted():
t = threading.Thread(target=browse, name="Browser")
t.daemon = True
t.start()
else:
msg("please browse to {0}", urlstr)
msg("starting web interface..")
allow_ask(False)
server.serve_forever()
msg("shutting down..")
server.server_close()
```
#### File: JosuaKrause/parcell/setup.py
```python
import os
import sys
from codecs import open
from setuptools import setup
os.chdir(os.path.abspath(os.path.dirname(__file__)))
def list_files(d, root):
files = []
for e in os.listdir(os.path.join(root, d)):
if os.path.isdir(os.path.join(root, d, e)):
files.extend(list_files('%s/%s' % (d, e), root))
elif not e.endswith('.pyc'):
files.append('%s/%s' % (d, e))
return files
# NOTE! steps to distribute:
#$ git submodule update --init --recursive
#$ python setup.py sdist bdist_wheel
#$ twine upload dist/... <- here be the new version!
with open('README.rst', encoding='utf-8') as f:
long_description = f.read()
req = [ 'quick_server', 'quick_cache', 'pexpect', 'tej' ]
if sys.version_info < (2, 7):
req.append('argparse')
setup(
name='parcell',
version='0.2.3',
packages=['parcell'],
package_data={
'parcell': list_files('www', 'parcell') + list_files('default_envs', 'parcell'),
},
entry_points={
'console_scripts': [ 'parcell = parcell.main:main' ],
},
install_requires=req,
description='UI for local development of server executed projects.',
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
url='https://github.com/JosuaKrause/parcell/',
long_description=long_description,
license='MIT',
keywords=[
'parcell',
'remote',
'execution',
'project',
'management',
'web',
'UI',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
]
)
``` |
{
"source": "JosuaKrause/quick_cache",
"score": 2
} |
#### File: JosuaKrause/quick_cache/quick_cache.py
```python
from __future__ import division
import os
import sys
import json
import time
import zlib
import atexit
import shutil
import hashlib
import threading
import collections
try:
import cPickle
except ImportError:
import pickle as cPickle
try:
unicode = unicode
except NameError:
# python 3
str = str
unicode = str
bytes = bytes
basestring = (str, bytes)
else:
# python 2
str = str
unicode = unicode
bytes = str
basestring = basestring
if hasattr(time, "monotonic"):
def _get_monotonic_time():
return time.monotonic()
get_time = _get_monotonic_time
else:
def _get_clock_time():
return time.clock()
get_time = _get_clock_time
__version__ = "0.3.1"
def _write_str(id_obj, elapsed, data):
obj = json.dumps([id_obj, elapsed], sort_keys=True, allow_nan=True)
return (str(len(obj)) + ';' + obj + data).encode('utf-8')
def _read_str(txt):
len_ix, rest = txt.decode('utf-8').split(";", 1)
length = int(len_ix)
id_obj, elapsed = json.loads(rest[:length])
return id_obj, elapsed, rest[length:]
CHUNK_SIZE = 1024 * 1024 * 1024
methods = {
"pickle": (
lambda id_obj, elapsed, data: cPickle.dumps(
(id_obj, elapsed, data), -1),
lambda txt: cPickle.loads(txt),
),
"json": (
lambda id_obj, elapsed, data: json.dumps(
[id_obj, elapsed, data], sort_keys=True, allow_nan=True),
lambda txt: tuple(json.loads(txt)),
),
"string": (_write_str, _read_str),
}
class QuickCache(object):
def __init__(self, base_file=None, base_string=None, quota=None,
ram_quota=0, temp="tmp", warnings=None, method="pickle"):
"""Creates a new cache. It is recommended to only use one cache object
for all cache operations of a program.
Parameters
----------
base_file : filename (optional; default=None)
The data source file the caching is based on or None for a general
cache. Only one of base_file or base_string can be non-None.
base_string : string (optional; default=None)
The string the caching is based on (if the string changes so does
the cache) or None for a general cache.
Only one of base_file or base_string can be non-None.
quota : size (optional; default=None)
The maximum cache size in MB. Longest untouched files are removed
first. Files larger than quota are not written. Quota is base
specific.
ram_quota : size (optional; default=0)
The maximum RAM cache size in MB. Longest unused caches are written
to disk first. Caches larger than the RAM quota are written to disk
immediately if disk quota allows.
temp : folder (optional; default="tmp")
The folder to store the cache files.
warnings : function (optional; default=None)
Used to print warnings. Arguments are formatting string and then
*args and **kwargs.
method : string (optional; default="pickle")
The method to read/write data to the disk. The stored content must
be convertible without loss. Available methods are:
"pickle",
"json",
"string",
Attributes
----------
verbose : bool (default=False)
Whether to log non warning messages.
"""
self._own = threading.RLock()
self._method = methods.get(method, None)
if self._method is None:
raise ValueError("unknown method: '{0}'".format(method))
self._locks = {}
self._temp = temp
self._quota = None if quota is None else float(quota)
self._ram_quota = None if ram_quota is None else float(ram_quota)
if base_file is not None:
if base_string is not None:
err_msg = "of base_file and base_string only one " + \
"can be non-None: {0} {1}".format(
base_file, base_string)
raise ValueError(err_msg)
with open(base_file, 'rb') as f:
base = hashlib.sha1(f.read()).hexdigest()
self._full_base = os.path.join(self._temp, base)
elif base_string is not None:
base = hashlib.sha1(base_string.encode('utf8')).hexdigest()
self._full_base = os.path.join(self._temp, base)
else:
self._full_base = self._temp
def no_msg(message, *args, **kwargs):
pass
self._warnings = warnings if warnings is not None else no_msg
self.verbose = False
atexit.register(lambda: self.remove_all_locks())
def clean_cache(self, section=None):
"""Cleans the cache of this cache object."""
self.remove_all_locks()
if section is not None and "/" in section:
raise ValueError("invalid section '{0}'".format(section))
if section is not None:
path = os.path.join(self._full_base, section)
else:
path = self._full_base
if not os.path.exists(path):
return
shutil.rmtree(path)
def list_sections(self):
"""List all sections."""
if not os.path.exists(self._full_base):
return []
return [
name for name in os.listdir(self._full_base)
if os.path.isdir(os.path.join(self._full_base, name))
]
def get_file(self, cache_id_obj, section=None):
"""Returns the file path for the given cache object."""
section = "default" if section is None else section
if "/" in section:
raise ValueError("invalid section '{0}'".format(section))
cache_id = "{:08x}".format(
zlib.crc32(b"&".join(sorted([
str(k).encode('utf8') + b"=" + str(v).encode('utf8')
for k, v in cache_id_obj.items()
]))) & 0xffffffff)
return os.path.join(self._full_base,
os.path.join(section,
os.path.join(
"{0}".format(cache_id[:2]),
"{0}.tmp".format(cache_id[2:]))))
def _remove_lock(self, k):
try:
self._locks[k].remove()
del self._locks[k]
except KeyError:
pass
def enforce_ram_quota(self):
locks = self._locks.values()
full_size = sum([l.get_size() for l in locks])
ram_quota = self._ram_quota
if full_size > ram_quota:
locks.sort(key=lambda l: l.get_last_access())
for l in locks:
old_size = l.get_size()
l.force_to_disk()
new_size = l.get_size()
full_size -= old_size - new_size
if full_size <= ram_quota:
break
def remove_all_locks(self):
"""Removes all locks and ensures their content is written to disk."""
locks = list(self._locks.items())
locks.sort(key=lambda l: l[1].get_last_access())
for l in locks:
self._remove_lock(l[0])
def get_hnd(self, cache_id_obj, section=None, method=None):
"""Gets a handle for the given cache file with exclusive access.
The handle is meant to be used in a resource block.
Parameters
----------
cache_id_obj : object
An object uniquely identifying the cached resource. Note that some
methods require the cache id object to be json serializable. The
string representation of each element, however, has to reflect its
content in a lossless way.
method : string (optional; default=None)
Defines the method used to encode the cached content. If None the
default method of this cache is used. The method must be consistent
between multiple accesses of the same cache resource.
"""
cache_file = self.get_file(cache_id_obj, section)
if cache_file not in self._locks:
try:
while not self._own.acquire(True):
pass
if cache_file not in self._locks:
if method is None:
m = self._method
else:
m = methods.get(method, None)
if m is None:
raise ValueError(
"unknown method: '{0}'".format(method))
res = _CacheLock(cache_file, cache_id_obj, self._full_base,
self._quota, self._ram_quota,
self._warnings, self.verbose, m)
self._locks[cache_file] = res
else:
res = None
finally:
self._own.release()
else:
res = None
if res is None:
res = self._locks[cache_file]
res.ensure_cache_id(cache_id_obj)
self.enforce_ram_quota()
return res
class _CacheLock(object):
def __init__(self, cache_file, cache_id_obj, base, quota,
ram_quota, warnings, verbose, method):
"""Creates a handle for the given cache file."""
self._cache_file = cache_file
self._lock = threading.RLock()
self._base = base
self._quota = quota
self._ram_quota = ram_quota
self._warnings = warnings
self._start_time = None
self._last_access = get_time()
self._write, self._read = method
self._cache_id_obj = self._get_canonical_id(cache_id_obj)
self._out = None
self._done = False
self.verbose = verbose
def _get_canonical_id(self, cache_id_obj):
return self._read(self._write(cache_id_obj, 0, ""))[0]
def ensure_cache_id(self, cache_id_obj):
"""Ensure the integrity of the cache id object."""
cache_id = self._get_canonical_id(cache_id_obj)
if cache_id != self._cache_id_obj:
raise ValueError(
"cache mismatch {0} != {1}".format(
cache_id, self._cache_id_obj))
def name(self):
"""The cache file."""
return self._cache_file
def is_done(self):
"""Conservatively determine whether this cache is ready and can safely
be removed from the lock index.
"""
return self._done or self._out is not None
def has(self):
"""Whether the cache file exists in the file system."""
self._done = os.path.exists(self._cache_file)
return self._done or self._out is not None
def _cache_id_desc(self):
def convert(v):
if isinstance(v, basestring):
return v
if isinstance(v, dict):
return "{{..{0}}}".format(len(v.keys()))
if isinstance(v, collections.Iterable):
return "[..{0}]".format(len(v))
return str(v)
return "[{0}]".format(", ".join([
"{0}={1}".format(k, convert(v))
for (k, v) in self._cache_id_obj.items()
]))
def read(self):
"""Reads the cache file as pickle file."""
def warn(msg, elapsed_time, current_time):
desc = self._cache_id_desc()
self._warnings(
"{0} {1}: {2}s < {3}s", msg, desc, elapsed_time, current_time)
file_time = get_time()
out = self._out
if out is None:
if self.verbose:
self._warnings("reading {0} from disk", self._cache_id_desc())
with open(self._cache_file, 'rb') as f_in:
out = None
while True:
t_out = f_in.read(CHUNK_SIZE)
if not len(t_out):
break
if out is not None:
out += t_out
else:
out = t_out
self._out = out
(cache_id_obj, elapsed_time, res) = self._read(out)
self.ensure_cache_id(cache_id_obj)
real_time = get_time() - file_time
if elapsed_time is not None and real_time > elapsed_time:
warn("reading cache from disk takes longer than computing!",
elapsed_time, real_time)
elif self._start_time is not None and elapsed_time is not None:
current_time = get_time() - self._start_time
if elapsed_time < current_time:
warn("reading cache takes longer than computing!",
elapsed_time, current_time)
self._last_access = get_time()
return res
def write(self, obj):
"""Writes the given object to the cache file as pickle. The cache file with
its path is created if needed.
"""
if self.verbose:
self._warnings("cache miss for {0}", self._cache_id_desc())
if self._start_time is not None:
elapsed = get_time() - self._start_time
else:
elapsed = None
out = self._write(self._cache_id_obj, elapsed, obj)
self._out = out
self.force_to_disk(self.get_size() > self._ram_quota)
self._last_access = get_time()
return self._read(out)[2]
def get_size(self):
out = self._out
return len(out) / 1024.0 / 1024.0 if out is not None else 0.0
def get_last_access(self):
return self._last_access
def force_to_disk(self, removeMem=True):
out = self._out
if out is None:
return
cache_file = self._cache_file
if os.path.exists(cache_file):
self._done = True
if removeMem:
self._out = None
if self.verbose:
self._warnings("free memory of {0}", self._cache_id_desc())
return
own_size = len(out) / 1024.0 / 1024.0
quota = self._quota
if quota is not None and own_size > quota:
self._warnings(
"single file exceeds quota: {0}MB > {1}MB", own_size, quota)
return # file exceeds quota
def get_size(start_path):
total_size = 0
for dirpath, _dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size / 1024.0 / 1024.0
base = self._base
while quota is not None and get_size(base) + own_size > quota:
oldest_fp = None
oldest_time = None
for dirpath, _dirnames, filenames in os.walk(base):
for f in filenames:
fp = os.path.join(dirpath, f)
cur_time = os.path.getatime(fp)
if oldest_time is None or cur_time < oldest_time:
oldest_time = cur_time
oldest_fp = fp
if oldest_fp is None:
self._warnings(
"cannot free enough space for quota ({0}MB > {1}MB)!",
get_size(base) + own_size, quota)
return # cannot free enough space
self._warnings("removing old cache file: {0}", oldest_fp)
os.remove(oldest_fp)
if not os.path.exists(os.path.dirname(cache_file)):
os.makedirs(os.path.dirname(cache_file))
try:
if self.verbose:
self._warnings(
"writing cache to disk: {0}", self._cache_id_desc())
with open(cache_file, 'wb') as f_out:
cur_chunk = 0
while cur_chunk < len(out):
next_chunk = cur_chunk + CHUNK_SIZE
f_out.write(out[cur_chunk:next_chunk])
cur_chunk = next_chunk
except:
# better remove everything written if an exception
# occurs during I/O -- we don't want partial files
if os.path.exists(cache_file):
os.remove(cache_file)
raise
self._done = True
if removeMem:
self._out = None
if self.verbose:
self._warnings("free memory of {0}", self._cache_id_desc())
def remove(self):
self.force_to_disk()
def __enter__(self):
while not self._lock.acquire(True):
pass
self._start_time = get_time()
return self
def __exit__(self, _type, _value, _traceback):
self._lock.release()
self._start_time = None
return False
``` |
{
"source": "josu-arcaya/orfeon",
"score": 3
} |
#### File: orfeon/genpipe/genpipe.py
```python
import argparse
import logging
import random
import yaml
import numpy as np
from numpy.random import choice
total_location = ["IE","ES","US"]
class Model:
def __init__(self,
model_name:str,
cpus: int,
memory: float,
location: str,
privacy_type: int):
self.model = model_name
self.resources = {'cpus':cpus, 'memory':memory}
self.privacy = {'location':location, 'type':privacy_type}
class PADL:
def __init__(self,
version:str):
self.version = version
self.pipeline = []
def main():
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-8s %(message)s')
text = 'This application generates PADL defined analytic models.'
parser = argparse.ArgumentParser(description=text)
required = parser.add_argument_group('required arguments')
required.add_argument('-m', '--models', type=str,
help='The number of models to be generated.', required=True)
args = parser.parse_args()
number_of_models = int(args.models)
total_cpus = 0
total_memory = 0
p = PADL('1.0')
random.seed(1)
np.random.seed(1)
for i in range(number_of_models):
model_size = choice([0,1,2], 1, p=[0.8,0.15,0.05])[0]
#model_size = random.random()
if model_size == 0:
# small model
cpus = random.randint(1,7)
memory = random.randint(1,16)
elif model_size == 1:
# medium model
cpus = random.randint(7,32)
memory = random.randint(16,64)
elif model_size == 2:
# large model
cpus = random.randint(32,128)
memory = random.randint(64,256)
m = Model(model_name=f"m{i}",
cpus=cpus,
memory=memory,
location=random.choice(total_location),
privacy_type=random.randint(0,2))
p.pipeline.append(m.__dict__)
total_cpus += cpus
total_memory += memory
with open(r'/tmp/pipeline.yaml', 'w') as file:
documents = yaml.dump(p, file)
logging.info(f"{number_of_models} models generated")
logging.info(f"{total_cpus} total cpus")
logging.info(f"{total_memory} total memory")
if __name__ == '__main__':
main()
```
#### File: unit/core/testutils.py
```python
import unittest
import logging
from src.core.utils import Infrastructure, Pipeline
class TestInfrastructure(unittest.TestCase):
def test_load_infrastructure_1(self):
file_location = 'src/test/resources/infrastructure.csv'
infra = Infrastructure(file_location).load()
self.assertTrue( len(infra.index)>0 )
self.assertEqual( infra.hostname[0], 'WKM0092')
self.assertEqual( infra.core_count[5], 7)
class TestPipeline(unittest.TestCase):
def test_load_pipeline(self):
file_location = 'src/test/resources/pipeline.yaml'
with open(file_location, 'r') as input_data_file:
input_data = input_data_file.read()
p = Pipeline(input_data).load()
# assert cpus in device 0
#self.assertEqual(p[0][0], 4)
self.assertEqual(p.cpus[0], 4)
# assert memory in device 2
#self.assertEqual(p[1][2], 8)
self.assertEqual(p.memory[2], 8)
``` |
{
"source": "josu-arcaya/padl",
"score": 3
} |
#### File: padl/lib/index.py
```python
from flask import Flask, request, render_template
from jsonschema import ValidationError
from lint.utils import Padl
app = Flask(__name__)
@app.route('/')
def hello(msg=''):
return render_template('hello.html', msg=msg, input_padl='', txt_colour='white')
@app.route('/validate', methods=['POST'])
def validate():
#return request.form['yaml'] + "valid!"
try:
txt_colour = 'green'
msg = 'Valid!'
Padl(request.form['yaml']).validate()
except ValidationError:
txt_colour = 'red'
msg = 'Not Valid'
return render_template('hello.html', msg=msg, input_padl=request.form['yaml'], txt_colour=txt_colour)
``` |
{
"source": "josuav1/MPContribs",
"score": 2
} |
#### File: mpcontribs/api/__init__.py
```python
import logging, os
from importlib import import_module
from bson.decimal128 import Decimal128
from flask import Flask, redirect, current_app
from flask_marshmallow import Marshmallow
from flask_mongoengine import MongoEngine
from flask_log import Logging
from flasgger import Swagger
from flask_json import FlaskJSON
for mod in ['matplotlib', 'toronado.cssutils', 'selenium.webdriver.remote.remote_connection']:
log = logging.getLogger(mod)
log.setLevel('INFO')
logger = logging.getLogger('app')
def get_collections(db):
conn = db.app.extensions['mongoengine'][db]['conn']
return conn.mpcontribs.list_collection_names()
# http://flask.pocoo.org/snippets/77/
def get_resource_as_string(name, charset='utf-8'):
with current_app.open_resource(name) as f:
return f.read().decode(charset)
def create_app():
app = Flask(__name__)
app.config.from_pyfile('config.py', silent=True)
app.jinja_env.globals['get_resource_as_string'] = get_resource_as_string
if app.config.get('DEBUG'):
from flask_cors import CORS
CORS(app) # enable for development (allow localhost)
json = FlaskJSON(app)
@json.encoder
def custom_encoder(o):
if isinstance(o, Decimal128):
return float(o.to_decimal())
Logging(app)
Marshmallow(app)
db = MongoEngine(app)
swagger = Swagger(app, template=app.config.get('TEMPLATE'))
collections = get_collections(db)
for collection in collections:
module_path = '.'.join(['mpcontribs', 'api', collection, 'views'])
try:
module = import_module(module_path)
except ModuleNotFoundError:
logger.warning('API module {} not found!'.format(module_path))
continue
try:
blueprint = getattr(module, collection)
app.register_blueprint(blueprint, url_prefix='/'+collection)
except AttributeError as ex:
logger.warning('Failed to register blueprint {}: {}'.format(
module_path, collection, ex
))
return app
```
#### File: api/projects/views.py
```python
from mongoengine.queryset import DoesNotExist
from mongoengine.context_managers import no_dereference
from mongoengine.queryset.visitor import Q
from flask import Blueprint, request, current_app
from bson.decimal128 import Decimal128
from pandas.io.json.normalize import nested_to_record
from mpcontribs.api.core import SwaggerView
from mpcontribs.api.projects.document import Projects
from mpcontribs.api.contributions.document import Contributions
from mpcontribs.api.structures.document import Structures
projects = Blueprint("projects", __name__)
class ProjectsView(SwaggerView):
def get(self):
"""Retrieve (and optionally filter) projects.
---
operationId: get_entries
parameters:
- name: search
in: query
type: string
description: string to search for in `title`, `description`, and \
`authors` using a MongoEngine/MongoDB text index. Provide \
a space-separated list to the search query parameter to \
search for multiple words. For more, see \
https://docs.mongodb.com/manual/text-search/.
- name: mask
in: query
type: array
items:
type: string
default: ["project", "title"]
description: comma-separated list of fields to return (MongoDB syntax)
responses:
200:
description: list of projects
schema:
type: array
items:
$ref: '#/definitions/ProjectsSchema'
"""
mask = request.args.get('mask', 'project,title').split(',')
objects = Projects.objects.only(*mask)
search = request.args.get('search')
entries = objects.search_text(search) if search else objects.all()
return self.marshal(entries)
# TODO: only staff can start new project
def post(self):
"""Create a new project.
Only MP staff can submit a new/non-existing project (or use POST
endpoints in general). The staff member's email address will be set as
the first readWrite entry in the permissions dict.
"""
return NotImplemented()
class ProjectView(SwaggerView):
def get(self, project):
"""Retrieve provenance info for a single project.
---
operationId: get_entry
parameters:
- name: project
in: path
type: string
pattern: '^[a-zA-Z0-9_]{3,30}$'
required: true
description: project name/slug
- name: mask
in: query
type: array
items:
type: string
default: ["title", "authors", "description", "urls"]
description: comma-separated list of fields to return (MongoDB syntax)
responses:
200:
description: single project
schema:
$ref: '#/definitions/ProjectsSchema'
"""
mask_default = ','.join(['title', 'authors', 'description', 'urls'])
mask = request.args.get('mask', mask_default).split(',')
objects = Projects.objects.only(*mask)
return self.marshal(objects.get(project=project))
# TODO: only emails with readWrite permissions can use methods below
def put(self, project):
"""Replace a project's provenance entry"""
# TODO id/project are read-only
return NotImplemented()
def patch(self, project):
"""Partially update a project's provenance entry"""
return NotImplemented()
schema = self.Schema(dump_only=('id', 'project')) # id/project read-only
schema.opts.model_build_obj = False
payload = schema.load(request.json, partial=True)
if payload.errors:
return payload.errors # TODO raise JsonError 400?
# set fields defined in model
if 'urls' in payload.data:
urls = payload.data.pop('urls')
payload.data.update(dict(
('urls__'+key, getattr(urls, key)) for key in urls
))
# set dynamic fields for urls
for key, url in request.json.get('urls', {}).items():
payload.data['urls__'+key] = url
return payload.data
#Projects.objects(project=project).update(**payload.data)
def delete(self, project):
"""Delete a project's provenance entry"""
# TODO should also delete all contributions
return NotImplemented()
class TableView(SwaggerView):
def get(self, project):
"""Retrieve a table of contributions for a project.
---
operationId: get_table
parameters:
- name: project
in: path
type: string
pattern: '^[a-zA-Z0-9_]{3,30}$'
required: true
description: project name/slug
- name: columns
in: query
type: array
items:
type: string
description: comma-separated list of column names to tabulate
- name: page
in: query
type: integer
default: 1
description: page to retrieve (in batches of `per_page`)
- name: per_page
in: query
type: integer
default: 20
minimum: 2
maximum: 20
description: number of results to return per page
- name: q
in: query
type: string
description: substring to search for in first non-id column
- name: order
in: query
type: string
description: sort ascending or descending
enum: [asc, desc]
- name: sort_by
in: query
type: string
description: column name to sort by
responses:
200:
description: Paginated table response in backgrid format (items = rows of table)
schema:
type: object
properties:
total_count:
type: integer
total_pages:
type: integer
page:
type: integer
last_page:
type: integer
per_page:
type: integer
items:
type: array
items:
type: object
"""
# config and parameters
explorer = 'http://localhost:8080/explorer' if current_app.config['DEBUG'] \
else 'https://portal.mpcontribs.org/explorer'
mp_site = 'https://materialsproject.org/materials'
mask = ['content.data', 'content.structures', 'identifier']
search = request.args.get('q')
page = int(request.args.get('page', 1))
PER_PAGE_MAX = current_app.config['PER_PAGE_MAX']
per_page = int(request.args.get('per_page', PER_PAGE_MAX))
per_page = PER_PAGE_MAX if per_page > PER_PAGE_MAX else per_page
order = request.args.get('order')
sort_by = request.args.get('sort_by', 'identifier')
general_columns = ['identifier', 'id']
user_columns = request.args.get('columns', '').split(',')
objects = Contributions.objects(project=project).only(*mask)
# default user_columns
sample = objects.first()['content']['data']
data_keys = sorted(list(
k.rsplit('.', 1)[0] if k.endswith('.display') else k
for k, v in nested_to_record(sample, sep='.').items()
if not k.endswith('.value') and not k.endswith('.unit')
))
if not data_keys:
return {
'total_count': 0, 'total_pages': 0, 'page': 1,
'last_page': 1, 'per_page': per_page, 'items': []
}
formula_key_exists = bool('formula' in data_keys)
if formula_key_exists:
general_columns.append('formula')
else:
# test whether search key exists in all docs and is not a number/object
search_key = data_keys[0].replace('.', '__')
q1 = {f'content__data__{search_key}__exists': False}
q2 = {f'content__data__{search_key}__type': 'object'}
if objects(Q(**q1) | Q(**q2)).count() < 1:
general_columns.append(data_keys[0])
else:
general_columns.append('formula')
if not user_columns[0]:
if formula_key_exists:
data_keys.remove('formula')
user_columns = data_keys if 'formula' in general_columns else data_keys[1:]
# add units to column names
units = [objects.distinct(f'content.data.{col}.unit') for col in user_columns]
columns = general_columns + [
'{} [{}]'.format(col, units[idx][0])
if units[idx] else col
for idx, col in enumerate(user_columns)
]
# search and sort
if search is not None:
kwargs = {
f'content__data__{general_columns[-1]}__exists': True,
f'content__data__{general_columns[-1]}__contains': search
}
objects = objects(Q(identifier__contains=search) | Q(**kwargs))
sort_by_key = sort_by
if ' ' in sort_by and sort_by[-1] == ']':
sort_by = sort_by.split(' ')[0] # remove unit
sort_by_key = f'content.data.{sort_by}.value'
elif sort_by in columns[2:]:
sort_by_key = f'content.data.{sort_by}'
order_sign = '-' if order == 'desc' else '+'
order_by = f"{order_sign}{sort_by_key}"
objects = objects.order_by(order_by)
# generate table page
items = []
for doc in objects.paginate(page=page, per_page=per_page).items:
mp_id = doc['identifier']
contrib = nested_to_record(doc['content']['data'], sep='.')
search_value = contrib.get(general_columns[-1], mp_id).replace(' ', '')
row = [f"{mp_site}/{mp_id}", f"{explorer}/{doc['id']}", search_value]
for idx, col in enumerate(user_columns):
cell = ''
if 'CIF' in col:
structures = doc['content']['structures']
if '.' in col: # grouped columns
sname = '.'.join(col.split('.')[:-1]) # remove CIF string from field name
for d in structures:
if d['name'] == sname:
cell = f"{explorer}/{d['id']}.cif"
break
elif structures:
cell = f"{explorer}/{structures[0]['id']}.cif"
else:
cell = contrib.get(col+'.value', contrib.get(col, ''))
row.append(str(cell))
items.append(dict(zip(columns, row)))
total_count = objects.count()
total_pages = int(total_count/per_page)
if total_pages%per_page:
total_pages += 1
return {
'total_count': total_count, 'total_pages': total_pages, 'page': page,
'last_page': total_pages, 'per_page': per_page, 'items': items
}
class GraphView(SwaggerView):
def get(self, project):
"""Retrieve overview graph for a project.
---
operationId: get_graph
parameters:
- name: project
in: path
type: string
pattern: '^[a-zA-Z0-9_]{3,30}$'
required: true
description: project name/slug
- name: columns
in: query
type: array
items:
type: string
required: true
description: comma-separated list of column names to plot (in MongoDB dot notation)
- name: filters
in: query
type: array
items:
type: string
description: list of `column__operator:value` filters \
with `column` in dot notation and `operator` in mongoengine format \
(http://docs.mongoengine.org/guide/querying.html#query-operators). \
`column` needs to be a valid field in `content.data`.
- name: page
in: query
type: integer
default: 1
description: page to retrieve (in batches of `per_page`)
- name: per_page
in: query
type: integer
default: 200
minimum: 2
maximum: 200
description: number of results to return per page
responses:
200:
description: x-y-data in plotly format
schema:
type: array
items:
type: object
properties:
x:
type: array
items:
type: number
y:
type: array
items:
type: number
"""
mask = ['content.data', 'identifier']
columns = request.args.get('columns').split(',')
filters = request.args.get('filters', '').split(',')
page = int(request.args.get('page', 1))
PER_PAGE_MAX = 200
per_page = int(request.args.get('per_page', PER_PAGE_MAX))
per_page = PER_PAGE_MAX if per_page > PER_PAGE_MAX else per_page
with no_dereference(Contributions) as ContributionsDeref:
objects = ContributionsDeref.objects(project=project).only(*mask)
data = [{'x': [], 'y': [], 'text': []} for col in columns]
# C__gte:0.42,C__lte:2.10,ΔE-QP.direct__lte:11.3 -> content__data__C__value__lte
if filters:
query = {}
for f in filters:
if '__' in f and ':' in f:
k, v = f.split(':')
col, op = k.rsplit('__', 1)
col = col.replace(".", "__")
key = f'content__data__{col}__value__{op}'
query[key] = float(v)
objects = objects(**query)
for obj in objects.paginate(page=page, per_page=per_page).items:
d = nested_to_record(obj['content']['data'], sep='.')
if all(f'{c}.display' in d.keys() for c in columns):
for idx, col in enumerate(columns):
val = d.get(f'{col}.display')
if val:
data[idx]['x'].append(obj.identifier)
data[idx]['y'].append(val.split(' ')[0])
data[idx]['text'].append(str(obj.id))
return data
# url_prefix added in register_blueprint
# also see http://flask.pocoo.org/docs/1.0/views/#method-views-for-apis
multi_view = ProjectsView.as_view(ProjectsView.__name__)
projects.add_url_rule('/', view_func=multi_view, methods=['GET'])#, 'POST'])
single_view = ProjectView.as_view(ProjectView.__name__)
projects.add_url_rule('/<string:project>', view_func=single_view,
methods=['GET'])#, 'PUT', 'PATCH', 'DELETE'])
table_view = TableView.as_view(TableView.__name__)
projects.add_url_rule('/<string:project>/table', view_func=table_view, methods=['GET'])
graph_view = GraphView.as_view(GraphView.__name__)
projects.add_url_rule('/<string:project>/graph', view_func=graph_view, methods=['GET'])
```
#### File: als_beamline/explorer/views.py
```python
import os
from django.shortcuts import render
from django.template import RequestContext
from mpcontribs.users.utils import get_context
project = os.path.dirname(__file__).split(os.sep)[-2]
def index(request):
ctx = RequestContext(request)
try:
#columns = ['formula', 'cid']
#keys = RecursiveDict([
# ('composition', ['Co', 'Cu', 'Ce']),
# #('position', ['x', 'y']),
# ('XAS', ['min', 'max']),
# ('XMCD', ['min', 'max'])
#])
#columns += ['##'.join([k, sk]) for k, subkeys in keys.items() for sk in subkeys]
ctx.update(get_context(project))
except Exception as ex:
ctx['alert'] = str(ex)
return render(request, "explorer_index.html", ctx.flatten())
```
#### File: als_beamline/scripts/pre_submission.py
```python
import os, json, copy
from tqdm import *
from mpcontribs.io.core.utils import nest_dict, normalize_root_level
import mspScan as msp
from ALS_import import treat_xmcd
import xas_process as xas_process
# TODO mechanism to choose correct translate
# TODO: improve concept for the mapping of keys to compositions.
from translate_vicalloy import get_translate
#from translate_PyPt import get_translate
def run(mpfile, nmax=None):
#print json.dumps(mpfile.document, indent=4)
print mpfile.document['_hdata'].keys()
# datasource = mpfile.document['general'].pop('Datasource')
datasource = mpfile.document['_hdata']['general'].pop('input_file')
subdir = os.path.abspath(os.path.join(
datasource['work_dir'], datasource['directory']
))
# TODO Potentially we have to insert a preprocessing step, probably in msp
scandata_f = msp.read_scans(subdir, datacounter="Counter 1")
scan_groups = scandata_f.groupby(datasource['group_by'].split())
process_template = mpfile.document['general'].pop('process_template')
translate = get_translate(datasource['work_dir'])
keys = scan_groups.groups.keys()
keys.sort()
for i,g in enumerate(tqdm(keys, leave=True)):
# TODO: Group information is saved into the output. Rethink?
comp, sx, sy = translate(g)
composition = normalize_root_level(comp)[1]
process_template_copy = copy.deepcopy(process_template)
process_template_copy['position'] = {'x': sx, 'y': sy}
mpfile.document.rec_update(nest_dict(
process_template_copy, [composition, 'process_chain']
))
sg = scan_groups.get_group(g)
for process_chain_name in process_template.keys():
scan_params = mpfile.document[composition]['process_chain'][process_chain_name]
xmcd_frame = treat_xmcd(sg, scan_params, xas_process.process_dict)
mpfile.add_data_table(
composition, xmcd_frame[['Energy', 'XAS', 'XMCD']],
'_'.join(['data', process_chain_name])
)
if nmax is not None and i > nmax:
break
```
#### File: als_beamline/scripts/translate_vicalloy.py
```python
import pandas as pd
import os, warnings
from scipy.interpolate import interp2d
def get_translate(workdir=None):
filename = os.path.join(workdir, "Vicalloy/Fe-Co-V_140922a_META_DATA.csv")
compdata_f = pd.read_csv(filename, sep='\t').dropna()
print compdata_f.head()
x = compdata_f["Xnom (mm)"].values
y = compdata_f["Ynom (mm)"].values
Co_concentration = compdata_f["Co (at%)"].values
Fe_concentration = compdata_f["Fe (at%)"].values
V_concentration = compdata_f["V (at%)"].values
method = 'linear'
# method = 'nearest'
with warnings.catch_warnings():
warnings.simplefilter("ignore")
Co_concI = interp2d(x,y,Co_concentration, kind = method)
Fe_concI = interp2d(x,y,Fe_concentration, kind = method)
V_concI = interp2d(x,y,V_concentration , kind = method)
def translate(key):
manip_z, manip_y = key
sample_y = manip_z - 69.5
sample_x = (manip_y +8) *2
Co = Co_concI(sample_x,sample_y)[0]/100.
Fe = Fe_concI(sample_x,sample_y)[0]/100.
V = V_concI(sample_x,sample_y)[0]/100.
return (
"Fe{:.2f}Co{:.2f}V{:.2f}".format(Fe,Co,V),
sample_x, sample_y
)
return translate
```
#### File: users/dilute_solute_diffusion/pre_submission.py
```python
from __future__ import unicode_literals
import os, json, requests
from pandas import read_excel, isnull, ExcelWriter
from mpcontribs.io.core.recdict import RecursiveDict
from mpcontribs.io.core.utils import clean_value, nest_dict
from mpcontribs.users.utils import duplicate_check
@duplicate_check
def run(mpfile, hosts=None, download=False, **kwargs):
#mpfile.unique_mp_cat_ids = False
from pymatgen import MPRester
mpr = MPRester()
fpath = os.path.join(os.environ['HOME'], 'work', 'dilute_solute_diffusion.xlsx')
if download or not os.path.exists(fpath):
figshare_id = mpfile.hdata.general['info']['figshare_id']
url = 'https://api.figshare.com/v2/articles/{}'.format(figshare_id)
print 'get figshare article {}'.format(figshare_id)
r = requests.get(url)
figshare = json.loads(r.content)
mpfile.document['_hdata']['version'] = figshare['version']
print 'read excel from figshare into DataFrame'
df_dct = None
for d in figshare['files']:
if 'xlsx' in d['name']:
# Dict of DataFrames is returned, with keys representing sheets
df_dct = read_excel(d['download_url'], sheet_name=None)
break
if df_dct is None:
print 'no excel sheet found on figshare'
return
print 'save excel to disk'
writer = ExcelWriter(fpath)
for sheet, df in df_dct.items():
df.to_excel(writer, sheet)
writer.save()
else:
df_dct = read_excel(fpath, sheet_name=None)
print len(df_dct), 'sheets loaded.'
print 'looping hosts ...'
host_info = df_dct['Host Information']
host_info.set_index(host_info.columns[0], inplace=True)
host_info.dropna(inplace=True)
for idx, host in enumerate(host_info):
if hosts is not None:
if isinstance(hosts, int) and idx+1 > hosts:
break
elif isinstance(hosts, list) and not host in hosts:
continue
print 'get mp-id for {}'.format(host)
mpid = None
for doc in mpr.query(
criteria={'pretty_formula': host},
properties={'task_id': 1}
):
if doc['sbxd'][0]['decomposes_to'] is None:
mpid = doc['task_id']
break
if mpid is None:
print 'mp-id for {} not found'.format(host)
continue
print 'add host info for {}'.format(mpid)
hdata = host_info[host].to_dict(into=RecursiveDict)
for k in hdata.keys():
v = hdata.pop(k)
ks = k.split()
if ks[0] not in hdata:
hdata[ks[0]] = RecursiveDict()
unit = ks[-1][1:-1] if ks[-1].startswith('[') else ''
subkey = '_'.join(ks[1:-1] if unit else ks[1:]).split(',')[0]
if subkey == "lattice_constant":
unit = u'Å'
try:
hdata[ks[0]][subkey] = clean_value(v, unit.replace('angstrom', u'Å'))
except ValueError:
hdata[ks[0]][subkey] = v
hdata['formula'] = host
df = df_dct['{}-X'.format(host)]
rows = list(isnull(df).any(1).nonzero()[0])
if rows:
cells = df.ix[rows].dropna(how='all').dropna(axis=1)[df.columns[0]]
note = cells.iloc[0].replace('following', cells.iloc[1])[:-1]
hdata['note'] = note
df.drop(rows, inplace=True)
mpfile.add_hierarchical_data(nest_dict(hdata, ['data']), identifier=mpid)
print 'add table for D₀/Q data for {}'.format(mpid)
df.set_index(df['Solute element number'], inplace=True)
df.drop('Solute element number', axis=1, inplace=True)
df.columns = df.ix[0]
df.index.name = 'index'
df.drop('Solute element name', inplace=True)
df = df.T.reset_index()
if str(host) == 'Fe':
df_D0_Q = df[[
'Solute element name', 'Solute D0, paramagnetic [cm^2/s]',
'Solute Q, paramagnetic [eV]'
]]
elif hdata['Host']['crystal_structure'] == 'HCP':
df_D0_Q = df[['Solute element name', 'Solute D0 basal [cm^2/s]', 'Solute Q basal [eV]']]
else:
df_D0_Q = df[['Solute element name', 'Solute D0 [cm^2/s]', 'Solute Q [eV]']]
df_D0_Q.columns = ['El.', 'D₀ [cm²/s]', 'Q [eV]']
mpfile.add_data_table(mpid, df_D0_Q, 'D₀_Q')
if hdata['Host']['crystal_structure'] == 'BCC':
print 'add table for hop activation barriers for {} (BCC)'.format(mpid)
columns_E = [
'Hop activation barrier, E_{} [eV]'.format(i) for i in range(2,5)
] + [
"Hop activation barrier, E'_{} [eV]".format(i) for i in range(3,5)
] + [
"Hop activation barrier, E''_{} [eV]".format(i) for i in range(3,5)
] + [
'Hop activation barrier, E_{} [eV]'.format(i) for i in range(5,7)
]
df_E = df[['Solute element name'] + columns_E]
df_E.columns = ['El.'] + [
'E{} [eV]'.format(i) for i in ['₂', '₃', '₄']
] + [
'E`{} [eV]'.format(i) for i in ['₃', '₄']
] + [
'E``{} [eV]'.format(i) for i in ['₃', '₄']
] + [
'E{} [eV]'.format(i) for i in ['₅', '₆']
]
mpfile.add_data_table(mpid, df_E, 'hop_activation_barriers')
print 'add table for hop attempt frequencies for {} (BCC)'.format(mpid)
columns_v = [
'Hop attempt frequency, v_{} [THz]'.format(i) for i in range(2,5)
] + [
"Hop attempt frequency, v'_{} [THz]".format(i) for i in range(3,5)
] + [
"Hop attempt frequency, v''_{} [THz]".format(i) for i in range(3,5)
] + [
'Hop attempt frequency, v_{} [THz]'.format(i) for i in range(5,7)
]
df_v = df[['Solute element name'] + columns_v]
df_v.columns = ['El.'] + [
'v{} [THz]'.format(i) for i in ['₂', '₃', '₄']
] + [
'v``{} [THz]'.format(i) for i in ['₃', '₄']
] + [
'v``{} [THz]'.format(i) for i in ['₃', '₄']
] + [
'v{} [THz]'.format(i) for i in ['₅', '₆']
]
mpfile.add_data_table(mpid, df_v, 'hop_attempt_frequencies')
elif hdata['Host']['crystal_structure'] == 'FCC':
print 'add table for hop activation barriers for {} (FCC)'.format(mpid)
columns_E = ['Hop activation barrier, E_{} [eV]'.format(i) for i in range(5)]
df_E = df[['Solute element name'] + columns_E]
df_E.columns = ['El.'] + ['E{} [eV]'.format(i) for i in ['₀', '₁', '₂', '₃', '₄']]
mpfile.add_data_table(mpid, df_E, 'hop_activation_barriers')
print 'add table for hop attempt frequencies for {} (FCC)'.format(mpid)
columns_v = ['Hop attempt frequency, v_{} [THz]'.format(i) for i in range(5)]
df_v = df[['Solute element name'] + columns_v]
df_v.columns = ['El.'] + ['v{} [THz]'.format(i) for i in ['₀', '₁', '₂', '₃', '₄']]
mpfile.add_data_table(mpid, df_v, 'hop_attempt_frequencies')
elif hdata['Host']['crystal_structure'] == 'HCP':
print 'add table for hop activation barriers for {} (HCP)'.format(mpid)
columns_E = [
"Hop activation barrier, E_X [eV]", "Hop activation barrier, E'_X [eV]",
"Hop activation barrier, E_a [eV]", "Hop activation barrier, E'_a [eV]",
"Hop activation barrier, E_b [eV]", "Hop activation barrier, E'_b [eV]",
"Hop activation barrier, E_c [eV]", "Hop activation barrier, E'_c [eV]"
]
df_E = df[['Solute element name'] + columns_E]
df_E.columns = ['El.'] + [
'Eₓ [eV]', 'E`ₓ [eV]', 'Eₐ [eV]', 'E`ₐ [eV]',
'E_b [eV]', 'E`_b [eV]', 'Eꪱ [eV]', 'E`ꪱ [eV]'
]
mpfile.add_data_table(mpid, df_E, 'hop_activation_barriers')
print 'add table for hop attempt frequencies for {} (HCP)'.format(mpid)
columns_v = ['Hop attempt frequency, v_a [THz]'] + ['Hop attempt frequency, v_X [THz]']
df_v = df[['Solute element name'] + columns_v]
df_v.columns = ['El.'] + ['vₐ [THz]'] + ['vₓ [THz]']
mpfile.add_data_table(mpid, df_v, 'hop_attempt_frequencies')
print mpfile
print 'DONE'
```
#### File: users/esters/pre_submission.py
```python
import os
#from mpcontribs.users.utils import duplicate_check
from pymongo import MongoClient
client = MongoClient('mongodb+srv://'+os.environ['MPCONTRIBS_MONGO_HOST'])
db = client['mpcontribs']
print(db.contributions.count_documents({'project': 'esters'}))
#@duplicate_check
def run(mpfile, **kwargs):
identifier = 'mp-27902' #mpfile.ids[0]
doc = db.contributions.find_one(
{'identifier': identifier, 'project': 'esters'},
{'_id': 1, 'content.structures': 1}
)
if 'structures' in doc['content']:
print('structures already added for', identifier)
return
print(doc['_id'])
contcar = os.path.join(os.path.dirname(__file__), 'CONTCAR')
input_string = open(contcar, 'r').read()
name = 'BiSe'
mpfile.add_structure(input_string, name=name, identifier=identifier, fmt='poscar')
sdct = mpfile.document[identifier]['structures'][name]
sdct.pop('@module')
sdct.pop('@class')
if sdct['charge'] is None:
sdct.pop('charge')
sdct['identifier'] = identifier
sdct['project'] = 'esters'
sdct['name'] = name
sdct['cid'] = doc['_id']
r = db.structures.insert_one(sdct)
print(r.inserted_id)
r = db.contributions.update_one(
{'_id': doc['_id']},
{'$set': {'content.structures': [r.inserted_id]}}
)
print(r.matched_count, r.modified_count)
from mpcontribs.io.archieml.mpfile import MPFile
mpfile = MPFile()
mpfile.max_contribs = 1
run(mpfile)
print(mpfile)
```
#### File: users/jarvis_dft/pre_submission.py
```python
import os, tarfile, json, urllib, gzip, sys
from mpcontribs.io.core.recdict import RecursiveDict
from mpcontribs.io.core.utils import nest_dict
from monty.json import MontyDecoder
#from mpcontribs.users.utils import duplicate_check
from mpcontribs.io.core.utils import clean_value, get_composition_from_string
from pymongo import MongoClient
client = MongoClient('mongodb+srv://'+os.environ['MPCONTRIBS_MONGO_HOST'])
db = client['mpcontribs']
print(db.contributions.count_documents({'project': 'jarvis_dft'}))
#@duplicate_check
def run(mpfile, **kwargs):
from pymatgen import Structure
reference_project = None
input_data, input_keys, extra = RecursiveDict(), RecursiveDict(), RecursiveDict()
#input_urls = mpfile.document['_hdata'].pop('input_urls')
input_urls = {
'NUS': {
"file": "http://www.2dmatpedia.org/static/db.json.gz",
"detail": "http://www.2dmatpedia.org/2dmaterials/doc/{}"
},
'JARVIS': {
"file": "https://www.ctcms.nist.gov/~knc6/jdft_{}.json.tgz",
"detail": "https://www.ctcms.nist.gov/~knc6/jsmol/{}.html"
}
}
for project in input_urls:
input_url = input_urls[project]['file']
if '{}' in input_url:
input_url = input_url.format('2d') # TODO 3d for Jarvis
#dbfile = os.path.join(os.environ['HOME'], 'work', input_url.rsplit('/')[-1])
dbfile = input_url.rsplit('/')[-1]
if not os.path.exists(dbfile):
print('downloading', dbfile, '...')
urllib.request.urlretrieve(input_url, dbfile)
ext = os.path.splitext(dbfile)[1]
is_nus = bool(ext == '.gz')
id_key = 'source_id' if is_nus else 'mpid'
if not is_nus:
with tarfile.open(dbfile, "r:gz") as tar:
member = tar.getmembers()[0]
raw_data = json.load(tar.extractfile(member), cls=MontyDecoder)
else:
reference_project = project
raw_data = []
with gzip.open(dbfile, 'rb') as f:
for line in f:
raw_data.append(json.loads(line, cls=MontyDecoder))
input_data[project] = RecursiveDict((d[id_key], d) for d in raw_data)
input_keys[project] = [
'material_id', 'exfoliation_energy_per_atom', 'structure'
] if is_nus else ['jid', 'exfoliation_en', 'final_str']
extra[project] = [
('fin_en', ('E', 'meV/atom')),
('op_gap', ('ΔE|optB88vdW', 'meV/atom')),
('mbj_gap', ('ΔE|mbj', 'meV/atom')),
#('kv', ('Kᵥ', 'GPa')),
#('gv', ('Gᵥ', 'GPa'))
] if not is_nus else []
print(len(input_data[project]), 'materials loaded for', project)
projects = input_data.keys()
identifiers = []
for d in input_data.values():
identifiers += list(d.keys())
for identifier in set(identifiers):
print(identifier)
data, structures = RecursiveDict(), RecursiveDict()
for project in projects:
if project not in data:
data[project] = RecursiveDict()
if identifier in input_data[project]:
d = input_data[project][identifier]
structures[project] = d[input_keys[project][-1]]
if data.get('formula') is None:
data['formula'] = get_composition_from_string(
structures[project].composition.reduced_formula
)
data[project]['id'] = input_urls[project]['detail'].format(d[input_keys[project][0]])
if input_keys[project][1] in d:
Ex = d[input_keys[project][1]]
if project == reference_project:
Ex *= 1000.
data[project]['Eₓ'] = clean_value(Ex, 'eV')
for k, (sym, unit) in extra[project]:
if d[k] != 'na':
data[project][sym] = clean_value(d[k], unit)
mpfile.add_hierarchical_data(nest_dict(data, ['data']), identifier=identifier)
#r = db.contributions.update_one(
# {'identifier': identifier, 'project': 'jarvis_dft'},
# {'$set': {'content.data': mpfile.document[identifier]['data']}},
# upsert=True
#)
#print(r.matched_count, r.modified_count, r.upserted_id)
doc = db.contributions.find_one(
{'identifier': identifier, 'project': 'jarvis_dft'},
{'_id': 1, 'content.structures': 1}
)
if 'structures' in doc['content']:
print('structures already added for', identifier)
continue
print(doc['_id'])
inserted_ids = []
for project, structure in structures.items():
try:
mpfile.add_structure(structure, name=project, identifier=identifier)
sdct = mpfile.document[identifier]['structures'][project]
sdct.pop('@module')
sdct.pop('@class')
if sdct['charge'] is None:
sdct.pop('charge')
sdct['identifier'] = identifier
sdct['project'] = 'jarvis_dft'
sdct['name'] = project
sdct['cid'] = doc['_id']
r = db.structures.insert_one(sdct)
inserted_ids.append(r.inserted_id)
except Exception as ex:
print(str(ex))
print(inserted_ids)
r = db.contributions.update_one(
{'_id': doc['_id']},
{'$set': {'content.structures': inserted_ids}}
)
print(r.matched_count, r.modified_count)
from mpcontribs.io.archieml.mpfile import MPFile
mpfile = MPFile()
mpfile.max_contribs = 3200
run(mpfile)
```
#### File: redox_thermo_csp/rest/energy_analysis.py
```python
from __future__ import unicode_literals
import json
import os
import pandas as pd
from pymatgen.core.composition import Composition
from pymatgen.core.units import FloatWithUnit
from scipy.constants import R
from scipy.integrate import quad
from mpcontribs.users.redox_thermo_csp.rest.utils import remove_comp_one, add_comp_one, rootfind, s_th_o
from mpcontribs.users.redox_thermo_csp.rest.utils import dh_ds, funciso, funciso_theo, d_h_num_dev_calc
from mpcontribs.users.redox_thermo_csp.rest.utils import get_mpids_comps_perov_brownm, split_comp
from mpcontribs.users.redox_thermo_csp.rest.utils import redenth_act, find_active, get_debye_temp
class WaterSplitting:
@staticmethod
def dg_zero_water_splitting(temp):
"""
Uses linear fits of data in Barin, Thermochemical Data of Pure Substances
Only valid for steam!
:return: dg_zero
"""
dg_zero = ((-0.052489 * temp) + 245.039) * 1000
return dg_zero
@staticmethod
def k_water_splitting(temp):
"""
Get the equilibrium constant of water
:param temp: temperature in K
:return: equilibrium constant
"""
dg_zero = WaterSplitting().dg_zero_water_splitting(temp)
k_eq = pd.np.exp(dg_zero / (-R * temp))
return k_eq
@staticmethod
def get_h2_h2o_ratio(temp, po2):
"""
Converts an oxygen partial pressure into a ratio of H2 to H2O for water splitting
:param temp: temperature in K
:param po2: oxygen partial pressure
:return: ratio of H2 to H2O
"""
h2_h2o = WaterSplitting().k_water_splitting(temp) / pd.np.sqrt(po2)
return h2_h2o
@staticmethod
def get_po2(temp, h2_h2o):
"""
Converts a ratio of H2 to H2O for water splitting into an oxygen partial pressure
:param temp: temperature in K
:param h2_h2o: ratio of H2 to H2O
:return: oxygen partial pressure
"""
po2 = (WaterSplitting().k_water_splitting(temp) / h2_h2o) ** 2
return po2
class CO2Splitting:
@staticmethod
def dg_zero_co2_splitting(temp):
"""
Uses linear fits of data in Barin, Thermochemical Data of Pure Substances
:return: dg_zero
"""
dg_zero_co2 = (temp ** 2) * 9.44E-7 - (0.0032113 * temp) - 393.523
dg_zero_co = -0.0876385 * temp - 111.908
dg_zero = (-dg_zero_co2 + dg_zero_co) * 1000
return dg_zero
@staticmethod
def k_co2_splitting(temp):
"""
Get the equilibrium constant of water
:param temp: temperature in K
:return: equilibrium constant
"""
dg_zero = CO2Splitting().dg_zero_co2_splitting(temp)
k_eq = pd.np.exp(dg_zero / (-R * temp))
return k_eq
@staticmethod
def get_co_co2_ratio(temp, po2):
"""
Converts an oxygen partial pressure into a ratio of CO to CO2 for water spltting
:param temp: temperature in K
:param po2: oxygen partial pressure
:return: ratio of H2 to H2O
"""
h2_h2o = CO2Splitting().k_co2_splitting(temp) / pd.np.sqrt(po2)
return h2_h2o
@staticmethod
def get_po2(temp, co_co2):
"""
Converts a ratio of CO to CO2 for water splitting into an oxygen partial pressure
:param temp: temperature in K
:param h2_h2o: ratio of H2 to H2O
:return: oxygen partial pressure
"""
po2 = (CO2Splitting().k_co2_splitting(temp) / co_co2) ** 2
return po2
class EnergyAnalysis:
"""
Analyze the energy input for different redox cycles
"""
def __init__(self, process="Air Separation"):
self.process = process
@staticmethod
def c_p_water_liquid(temp):
"""
Calculates the heat capacity of liquid water.
:return: cp_water
"""
# constants: Chase, NIST-JANAF Thermochemistry tables, Fourth Edition, 1998
shomdat = [-203.6060, 1523.290, -3196.413, 2474.455, 3.855326]
temp_frac = temp / 1000
c_p_water = shomdat[0] + (shomdat[1] * temp_frac) + (shomdat[2] * (temp_frac ** 2)) + (
shomdat[3] * (temp_frac ** 3)) + (shomdat[4] / (temp_frac ** 2))
return c_p_water
@staticmethod
def c_p_steam(temp):
"""
Calculates the heat capacity of steam
:return: cp_steam
"""
if temp < 1700:
shomdat = [30.09200, 6.832514, 6.793435, -2.534480, 0.082139]
else:
shomdat = [41.96126, 8.622053, -1.499780, 0.098119, -11.15764]
temp_frac = temp / 1000
c_p_steam = shomdat[0] + (shomdat[1] * temp_frac) + (shomdat[2] * (temp_frac ** 2)) + (
shomdat[3] * (temp_frac ** 3)) + (shomdat[4] / (temp_frac ** 2))
return c_p_steam
@staticmethod
def get_heat_capacity(temp, td):
# credits to Dr. <NAME>, LBNL
t_ratio = temp / td
def integrand(x):
return (x ** 4 * pd.np.exp(x)) / (pd.np.exp(x) - 1) ** 2
if isinstance(t_ratio, int) or isinstance(t_ratio, float):
cv_p = 9 * R * (t_ratio ** 3) * quad(integrand, 0, t_ratio ** -1)[0]
else:
cv_p = []
for i in range(len(t_ratio)):
cv_i = 9 * R * (t_ratio[i] ** 3) * quad(integrand, 0, t_ratio[i] ** -1)[0]
cv_p = np.append(cv_p, cv_i)
return cv_p * 5
@staticmethod
def get_heat_capacity_mixed(temp, delta, td_p=None, td_b=None):
enal = EnergyAnalysis()
cv_p = enal.get_heat_capacity(temp, td_p) * 5
cv_b = enal.get_heat_capacity(temp, td_b) * 4.5
ratio_p = (0.5 - delta) / 0.5
ratio_b = delta / 0.5
cv_m = pd.np.multiply(ratio_p, cv_p) + pd.np.multiply(ratio_b, cv_b)
return temp, cv_m
@staticmethod
def heat_input_linear(temp_1, temp_2, delta_1, delta_2, t_d_perov, t_d_brownm, num=40):
"""
Uses an approximation to calculate the integral c(T, delta)*T dT by splitting the interval into a number of
slices with constant c
Uses a linear approximation for delta between delta_1 and delta_2
This method is a lot faster than the actual integration and the errors of the approximation are negligible
(at default settings: < 1E-5, typically approx. 1E-6)
:param temp_1: initial temperature(s)
:param temp_2: final temperature(s)
:param delta_1: initial non-stoichiometry value(s)
:param delta_2: final non-stoichiometry values(s)
:param num: number of steps for the approximation of the integral
:return: heat input to heat perovskite from temp_1 to temp_2 considering the change in delta (in J)
positive for heating, negative for cooling
"""
try:
# treatment of arrays for output of multiple data points
dqs = []
if not (isinstance(temp_1, float) or (isinstance(temp_1, int))):
for i in range(len(temp_1)):
tempval = pd.np.linspace(temp_1[i], temp_2[i], num=num)
deltaval = pd.np.linspace(delta_1[i], delta_2[i], num=num)
# calculate average values within intervals
delta_x0_x1 = pd.np.empty(len(deltaval) - 1)
for i in range(len(deltaval) - 1):
delta_x0_x1[i] = (deltaval[i] + deltaval[i + 1]) / 2
temp_x0_x1 = pd.np.empty(len(tempval) - 1)
for i in range(len(tempval) - 1):
temp_x0_x1[i] = (tempval[i] + tempval[i + 1]) / 2
# length of a temperature step
del_temp = (temp_2 - temp_1) / len(temp_x0_x1)
# calculate the area under the step for each step
dq = 0
for i in range(len(delta_x0_x1)):
cv_step = EnergyAnalysis().get_heat_capacity_mixed(temp_x0_x1[i], delta_x0_x1[i], td_p=t_d_perov,
td_b=t_d_brownm)[1]
q_step = cv_step * del_temp
dq += q_step
dqs = pd.np.append(dqs, dq)
dq = dqs
else:
tempval = pd.np.linspace(temp_1, temp_2, num=num)
deltaval = pd.np.linspace(delta_1, delta_2, num=num)
# calculate average values within intervals
delta_x0_x1 = pd.np.empty(len(deltaval) - 1)
for i in range(len(deltaval) - 1):
delta_x0_x1[i] = (deltaval[i] + deltaval[i + 1]) / 2
temp_x0_x1 = pd.np.empty(len(tempval) - 1)
for i in range(len(tempval) - 1):
temp_x0_x1[i] = (tempval[i] + tempval[i + 1]) / 2
# length of a temperature step
del_temp = (temp_2 - temp_1) / len(temp_x0_x1)
# calculate the area under the step for each step
dq = 0
for i in range(len(delta_x0_x1)):
cv_step = EnergyAnalysis().get_heat_capacity_mixed(temp_x0_x1[i], delta_x0_x1[i], td_p=t_d_perov,
td_b=t_d_brownm)[1]
q_step = cv_step * del_temp
dq += q_step
except TypeError:
dq = None
raise ValueError("Elastic tensors or crystal structures not available for this set of materials.")
return dq
@staticmethod
def energy_steam_generation(temp_1, temp_2, h_2_h2o, celsius=True, h_rec=0.0):
"""
Calculates the energy required to heat water, evaporate it and to generate steam at temperature "temp"
Assuming water at ambient pressure, boiling point 100 °C
:param temp_1: initial temperature of water/steam
:param temp_2: steam temperature
:param h_2_h2o: partial pressure ratio h2/h2o
:param celsius: if True, temperature values are assumed to be in degrees celsius
:param h_rec: heat recovery efficiency, can be between 0 and 1
:return: energy required to generate steam per mol of H2 in the product stream in kJ/mol
"""
if celsius:
temp_1 = temp_1 + 273.15
temp_2 = temp_2 + 273.15
enal = EnergyAnalysis()
# liquid water (at ambient pressure)
# this code only considers water at ambient pressure!
if temp_1 < 373.15:
if temp_2 > 373.15:
energy_1 = quad(enal.c_p_water_liquid, temp_1, 373.15)[0]
else:
energy_1 = quad(enal.c_p_water_liquid, temp_1, temp_2)[0]
else:
energy_1 = 0
if temp_2 > 373.15:
if temp_1 < 373.15:
energy_2 = quad(enal.c_p_steam, 373.15, temp_2)[0]
else:
energy_2 = quad(enal.c_p_steam, temp_1, temp_2)[0]
else:
energy_2 = 0
# from the literature
heat_vaporization = 40790
if temp_1 < 373.15 < temp_2:
total_energy = energy_1 + energy_2 + heat_vaporization
else:
total_energy = energy_1 + energy_2
# per mol of H2
total_energy = total_energy / h_2_h2o
# considering heat recovery
total_energy = total_energy * (1 - h_rec)
return total_energy / 1000
@staticmethod
def energy_integral_theo(enth_steps, celsius, temp_1, temp_2, compstr, dh_min, dh_max, t_d_perov, t_d_brownm,
p_o_2_1, p_o_2_2):
"""
Determines the chemical energy change using theoretical data. All variables explained in
EnergyAnalysis.calc
"""
# To get a good approximation of the integral over the enthalpy values, the area under the curve is calculated
# stepwise. The actual integral calculation would take too long, as each enthalpy value is calculated
# numerically
# We are only considering the case of linear change of both pressure and temperature between reduction and oxidation here
if celsius:
tempval = pd.np.linspace(temp_1 + 273.15, temp_2 + 273.15, num=enth_steps)
else:
tempval = pd.np.linspace(temp_1, temp_2, num=enth_steps)
p_val = pd.np.logspace(pd.np.log10(p_o_2_1), pd.np.log10(p_o_2_2), num=enth_steps)
sample_spl = split_comp(compstr)
act = find_active(mat_comp=sample_spl)[1]
delta_vals = []
for i in range(len(tempval)):
args_theo = (pd.np.log(p_val[i]), tempval[i], None, t_d_perov, t_d_brownm, dh_min, dh_max, act)
delta_val_i = rootfind(1e-10, 0.5-1e-10, args_theo, funciso_theo)
delta_vals = pd.np.append(delta_vals, delta_val_i)
dh_vals = []
for i in range(len(tempval)):
dh_i = d_h_num_dev_calc(delta=delta_vals[i], dh_1=dh_min, dh_2=dh_max, temp=tempval[i], act=act)
dh_vals = pd.np.append(dh_vals, dh_i)
# calculate energy stepwise
energy_red = []
for i in range(len(delta_vals) - 1):
# deltastep * average dh
h_x0_x1_i = (dh_vals[i] + dh_vals[i + 1]) / 2
energy_i = (delta_vals[i + 1] - delta_vals[i]) * h_x0_x1_i
energy_red = pd.np.append(energy_red, energy_i)
energy_integral_dh = sum(energy_red) / 1000
return energy_integral_dh
@staticmethod
def mechanical_envelope(p_red):
"""
Uses the "mechanical envelope" function from <NAME> al.
dx.doi.org/10.1016/j.solener.2016.11.023
Estimates the energy required to pump one mol of oxygen at this pressure using mechanical pumps.
:param p_red: oxygen partial pressure at reduction conditions
:return: pump_ener_envelope: mechanical energy required to pump one mol of O
"""
if (p_red < 1E-6) or (p_red > 0.7):
q_pump = float('inf') # mechanical envelope not applicable in this range
else:
eff_sol = 0.4
temp = 473 # this is the operating temperature of the pump
a0 = 0.30557
a1 = -0.17808
a2 = -0.15514
a3 = -0.03173
a4 = -0.00203
p0 = 1e5
p = p_red * p0
eff = a0 + a1*pd.np.log10(p/p0) + a2*(pd.np.log10(p/p0))**2 + a3*(pd.np.log10(p/p0))**3 + a4*(pd.np.log10(p/p0))**4
q_iso=R*temp*pd.np.log(p0/p)
q_pump=(q_iso/eff) / eff_sol
q_pump = q_pump / 2000
return q_pump
@staticmethod
def dhf_h2o(t_ox):
"""
Gets the heat of formation of water for at certain temperature
Based on the Shomate equation and the NIST-JANAF thermochemical tables
H° − H°298.15= A*t + B*t2/2 + C*t3/3 + D*t4/4 − E/t + F − H
H° = A*t + B*t2/2 + C*t3/3 + D*t4/4 − E/t + F
https://webbook.nist.gov/cgi/cbook.cgi?ID=C7732185&Units=SI&Mask=1#Thermo-Gas
"""
if t_ox <= 1700:
a = 30.09200
b = 6.832514
c = 6.793435
d = -2.534480
e = 0.082139
f = -250.8810
else:
a = 41.96426
b = 8.622053
c = -1.499780
d = 0.098119
e = -11.15764
f = -272.1797
t_1000 = t_ox / 1000
hform = a*t_1000
hform += 0.5*b*(t_1000**2)
hform += (1/3)*c*(t_1000**3)
hform += (1/4)*c*(t_1000**4)
hform += -e/t_1000
hform += f
return hform
@staticmethod
def dh_co_co2(t_ox):
"""
Gets the heat of formation of CO2 and of CO and returns the difference to get the heat of reaction
Based on the Shomate equation and the NIST-JANAF thermochemical tables
H° − H°298.15= A*t + B*t2/2 + C*t3/3 + D*t4/4 − E/t + F − H
H° = A*t + B*t2/2 + C*t3/3 + D*t4/4 − E/t + F
CO2: https://webbook.nist.gov/cgi/cbook.cgi?ID=C124389&Units=SI&Mask=1#Thermo-Gas
CO: https://webbook.nist.gov/cgi/cbook.cgi?ID=C630080&Units=SI&Mask=1#Thermo-Gas
"""
t_1000 = t_ox / 1000
# CO2
if t_ox <= 1200:
a = 24.99735
b = 55.18696
c = -33.69137
d = 7.948387
e = -0.136638
f = -403.6075
else:
a = 58.16639
b = 2.720074
c = -0.492289
d = 0.038844
e = -6.447293
f = -425.9186
hco2 = a*t_1000
hco2 += 0.5*b*(t_1000**2)
hco2 += (1/3)*c*(t_1000**3)
hco2 += (1/4)*c*(t_1000**4)
hco2 += -e/t_1000
hco2 += f
# CO
if t_ox <= 1300:
a = 25.56759
b = 6.096130
c = 4.054656
d = -2.671301
e = 0.131021
f = -118.0089
else:
a = 35.15070
b = 1.300095
c = -0.205921
d = 0.013550
e = -3.282780
f = -127.8375
hco = a*t_1000
hco += 0.5*b*(t_1000**2)
hco += (1/3)*c*(t_1000**3)
hco += (1/4)*c*(t_1000**4)
hco += -e/t_1000
hco += f
return hco2-hco
def calc(self, p_ox, p_red, t_ox, t_red, data_origin="Exp", data_use="combined",
enth_steps=30, sample_ident=-1, celsius=True, from_file=True,
heat_cap=True,
heat_cap_approx=True
):
"""
Performs an energy analysis using experimental data.
:param p_ox: Oxidation partial pressure of oxygen (in bar) or ratio p(H2)/p(H2O) / p(CO)/p(CO2)
:param p_red: Oxygen partial pressure for reduction (in bar)
:param t_ox: Oxidation temperature
:param t_red: Reduction temperature
:param data_origin: "Exp": experimental data
"Theo": theoretical data
***only relevant if 'data_origin' = "Theo"
:param data_use:
"endmembers": uses redox members of solid solution endmembers to estimate redox
enthalpies of solid solutions
"combined": corrects above-mentioned data by the actual redox enthalpies for the solid
solutions calcualted via DFT
:param enth_steps: number of enthalpy values which are calculated for each material in order to
reach a good approximation of the integral over dH vs. delta
:param sample_ident: Sample number(s) (experimental data) or composition (theoretical data),
default value '-1'-> analyze all samples
:param pump_ener: allows to consider the pumping energy required to pump from p_o_2_1 to p_o_2_2
input in kJ per kg of redox material in the oxidized state + the losses
This depends on many factors, such as the type of pumps used, the volume of the
reaction chamber, the reactor type etc., so the user needs to calculate this
value beforehand depending on the individual process conditions
In case some of the pumping energy can be recovered, this share needs to be
subtracted beforehand, as it is not considered herein.
:param celsius: if True, assumes all input temperatures are in °C instead of K
:param from_file: if True, takes the enthalpies, Debye temperatures, and materials lists from
the file "theo_redenth_debye.json". Much faster than using the MPRester
Only works if sample_ident = -1
:param heat_cap: if True, sensible energy to heat the samples is considered
:param heat_cap_approx: if True, uses values for SrFeOx in case of missing heat capacity data
:return: dict_result: dictonary with results for different materials
"""
si_first = sample_ident
# correct temperature values for Kelvin/Celsius
if celsius:
temp_1_corr = t_ox + 273.15
temp_2_corr = t_red + 273.15
else:
temp_1_corr = t_ox
temp_2_corr = t_red
if data_origin == "Exp": # currently not in use for updates of existing data
# load experimental sample data from file
path = os.path.abspath("")
filepath = os.path.join(path, "exp_data.json")
with open(filepath) as handle:
expdata = json.loads(handle.read())
# use equivalent partial pressures for Water Splitting and CO2 splitting
if self.process == "Water Splitting":
p_ox = WaterSplitting().get_po2(temp=temp_1_corr, h2_h2o=p_ox)
elif self.process == "CO2 Splitting":
p_ox = CO2Splitting().get_po2(temp=temp_1_corr, co_co2=p_ox)
# iterate over samples
if isinstance(sample_ident, collections.Sized) and not isinstance(sample_ident, str):
no_range = range(len(sample_ident))
sample = None
else:
no_range = range(1)
if data_origin == "Exp":
sample = int(sample_ident)
else:
sample = str(sample_ident)
# iterate over all available samples
if sample_ident == -1:
sample = None
if data_origin == "Exp":
no_range = range(0, 150)
sample_ident = no_range
else:
if not from_file:
filename = os.path.join(os.path.abspath('..'), "datafiles", "perovskite_theo_list.csv")
if not os.path.exists(filename):
raise ImportError("File 'perovskite_theo_list.csv' not found.")
fo = open(filename, "rb")
sample_ident = pd.np.genfromtxt(fo, dtype='str', delimiter=",", skip_header=1)
fo.close()
else:
sampledata = views.get_theo_data()
sample_ident = sampledata["compstr"]
no_range = range(len(sample_ident))
sample_l, chemical_energy_l, sensible_energy_l, mol_mass_ox_l, prodstr_alt_l = [], [], [], [], []
mol_prod_mol_red_l, t_ox_l, t_red_l, p_ox_l, p_red_l, compstr_l = [], [], [], [], [], []
delta_1_l, delta_2_l, mass_redox_l, prodstr_l, l_prod_kg_red_l, g_prod_kg_red_l = [], [], [], [], [], []
for i in no_range:
if not sample:
sample = sample_ident[i]
# this only works if the sample number/data exists
try:
if data_origin == "Exp":
exp_index = -1
for k in range(len(expdata)):
if int(expdata["Sample number"][k]) == sample:
exp_index = k
if exp_index == -1:
raise ValueError("Experimental data for this sample not found.")
compstr = expdata["theo_compstr"][exp_index]
compstr_x = compstr.split("Ox")[0]
# this formats the parameters the same way we have them in views.py
fit_param_enth = {"a": float(expdata["dH_max"][exp_index]),
"b": float(expdata["dH_min"][exp_index]),
"c": float(expdata["t"][exp_index]),
"d": float(expdata["s"][exp_index])}
fit_type_entr = str(expdata["fit type entropy"][exp_index])
if fit_type_entr == "Dilute_Species":
fit_par_ent = {"a": float(expdata["entr_dil_s_v"][exp_index]),
"b": float(expdata["entr_dil_a"][exp_index]),
"c": float(expdata["delta_0"][exp_index])}
else:
fit_par_ent = {"a": float(expdata["entr_solid_sol_s"][exp_index]),
"b": float(expdata["entr_solid_sol_shift"][exp_index]),
"c": float(expdata["delta_0"][exp_index])}
theo_compstr = compstr
splitcomp = split_comp(compstr)
delta_0 = float(expdata["delta_0"][exp_index])
actf = find_active(mat_comp=splitcomp)[1]
act_mat = {"Material": float(actf)}
fit_param_fe = {"a": 231.062,
"b": -24.3338,
"c": 0.839785,
"d": 0.219157}
pars = { "fit_par_ent": fit_par_ent,
"fit_param_enth": fit_param_enth,
"fit_type_entr": fit_type_entr,
"delta_0": delta_0,
"fit_param_fe": fit_param_fe,
"act_mat": act_mat
}
args_1 = (pd.np.log(p_ox), temp_1_corr, pars, s_th_o(temp_1_corr))
args_2 = (pd.np.log(p_red), temp_2_corr, pars, s_th_o(temp_2_corr))
delta_1 = rootfind(1e-10, 0.5-1e-10, args_1, funciso)
delta_2 = rootfind(1e-10, 0.5-1e-10, args_2, funciso)
# use theoretical elastic tensors
sampledata = views.get_theo_data()
for z in range(len(sampledata["compstr"])):
if (sampledata["compstr"][z]).split("O3")[0] == compstr.split("Ox")[0]:
index_debye = z
t_d_perov = float(sampledata["Debye temp perovskite"][index_debye])
t_d_brownm = float(sampledata["Debye temp brownmillerite"][index_debye])
else:
# if composition does not contain ones as stoichiometries, add them
sample = add_comp_one(compstr=sample)
if not from_file or si_first != -1:
try:
red_active = redenth_act(sample)
except TypeError:
raise ValueError("Enthalpy data not available for this material.")
h_min = red_active[1]
h_max = red_active[2]
act = red_active[3]
else:
h_min = float(sampledata["dH_min"][i])
h_max = float(sampledata["dH_max"][i])
act = float(sampledata["act"][i])
compstr = sample
compstr_x = compstr.split("O")[0]
if not from_file or si_first != -1:
try: # get Debye temperatures for vibrational entropy
mp_ids = get_mpids_comps_perov_brownm(compstr=compstr)
t_d_perov = get_debye_temp(mp_ids[0])
t_d_brownm = get_debye_temp(mp_ids[1])
except Exception as e: # if no elastic tensors or no data for this material is available
mp_ids = ("mp-510624", "mp-561589") # using data for SrFeOx if no data is available (close approximation)
t_d_perov = get_debye_temp(mp_ids[0])
t_d_brownm = get_debye_temp(mp_ids[1])
else:
t_d_perov = float(sampledata["Debye temp perovskite"][i])
t_d_brownm = float(sampledata["Debye temp brownmillerite"][i])
args_theo_1 = (pd.np.log(p_ox), temp_1_corr, None, t_d_perov, t_d_brownm, h_min, h_max, act)
delta_1 = rootfind(1e-10, 0.5-1e-10, args_theo_1, funciso_theo)
args_theo_2 = (pd.np.log(p_red), temp_2_corr, None, t_d_perov, t_d_brownm, h_min, h_max, act)
delta_2 = rootfind(1e-10, 0.5-1e-10, args_theo_2, funciso_theo)
# calculate the mass change in %
comp_ox = compstr_x + "O" + str(float(3 - delta_1))
comp_red = compstr_x + "O" + str(float(3 - delta_2))
mol_mass_ox = float(Composition(comp_ox).weight)
mol_mass_red = float(Composition(comp_red).weight)
mass_redox_i = ((mol_mass_ox - mol_mass_red) / mol_mass_ox) * 100
# define reaction products
if self.process == "Air Separation":
prodstr = "O2"
prodstr_alt = "O"
elif self.process == "Water Splitting":
prodstr = "H2"
prodstr_alt = prodstr
elif self.process == "CO2 Splitting":
prodstr = "CO"
prodstr_alt = prodstr
else:
raise ValueError("Process must be either Air Separation, Water Splitting, or CO2 Splitting!")
# only continue if the user-designated reduction step actually leads to reduction
# if not, set result to infinite
if delta_2 < delta_1:
ener_i = pd.np.ones(5) * float('inf')
per_kg_redox = pd.np.ones(5) * float('inf')
per_kg_wh_redox = pd.np.ones(5) * float('inf')
kj_mol_prod = pd.np.ones(5) * float('inf')
energy_l = pd.np.ones(5) * float('inf')
energy_l_wh = pd.np.ones(5) * float('inf')
efficiency = float('inf')
mol_prod_mol_red = float('inf')
l_prod_kg_red = float('inf')
g_prod_kg_red = float('inf')
else:
# mol product per mol of redox material
mol_prod_mol_red = delta_2 - delta_1
# L product per kg of redox material (SATP)
l_prod_kg_red = mol_prod_mol_red * (24.465 / (0.001 * mol_mass_ox))
# convert mol O to mol O2
if self.process == "Air Separation":
l_prod_kg_red = l_prod_kg_red * 0.5
# g product per kg redox material
g_prod_kg_red = float(Composition(prodstr).weight) * (l_prod_kg_red / 24.465)
if data_origin == "Exp":
d_delta = delta_0
else:
d_delta = 0.0
# correct for d_delta
d_delta_1 = delta_1 - d_delta
d_delta_2 = delta_2 - d_delta
# chemical energy
if data_origin == "Exp":
s_th_mean = (s_th_o(temp_1_corr) + s_th_o(temp_1_corr)) / 2
def dh_func_exp(d_delta_func):
return dh_ds(d_delta_func, s_th_mean, pars)[0]
energy_integral_dh = quad(dh_func_exp, d_delta_1, d_delta_2)[0]
if energy_integral_dh < 0:
raise ValueError("negative chemical energy due to insuffiencent experimental data...skipping this sample")
else:
energy_integral_dh = EnergyAnalysis(process=self.process).energy_integral_theo(
celsius=celsius, compstr=compstr, dh_max=h_max,
dh_min=h_min, enth_steps=enth_steps, p_o_2_1=p_ox, p_o_2_2=p_red, temp_1=t_ox, temp_2=t_red,
t_d_perov=t_d_perov, t_d_brownm = t_d_brownm)
# sensible energy
energy_sensible = 0
if heat_cap:
energy_sensible = EnergyAnalysis().heat_input_linear(temp_1=temp_1_corr, temp_2=temp_2_corr, delta_1=delta_1,
delta_2=delta_2, t_d_perov=t_d_perov, t_d_brownm=t_d_brownm, num=40) / 1000
chemical_energy_l.append(energy_integral_dh)
sensible_energy_l.append(energy_sensible)
mol_mass_ox_l.append(mol_mass_ox)
mol_prod_mol_red_l.append(mol_prod_mol_red)
t_ox_l.append(temp_1_corr)
t_red_l.append(temp_2_corr)
p_ox_l.append(p_ox)
p_red_l.append(p_red)
compstr_l.append(compstr)
delta_1_l.append(delta_1)
delta_2_l.append(delta_2)
mass_redox_l.append(mass_redox_i)
prodstr_l.append(prodstr)
prodstr_alt_l.append(prodstr_alt)
l_prod_kg_red_l.append(l_prod_kg_red)
g_prod_kg_red_l.append(g_prod_kg_red)
# skip this sample if the sample number does not exist
except Exception as e:
pass
#print("No data for sample " + str(sample) + " found!" + str(e))
sample = None
resdict = { "Chemical Energy": chemical_energy_l,
"Sensible Energy": sensible_energy_l,
"mol_mass_ox": mol_mass_ox_l,
"mol_prod_mol_red": mol_prod_mol_red_l,
"T_ox": t_ox_l,
"T_red": t_red_l,
"p_ox": p_ox_l,
"p_red": p_red_l,
"compstr": compstr_l,
"delta_1": delta_1_l,
"delta_2": delta_2_l,
"mass_redox": mass_redox_l,
"prodstr": prodstr_l,
"prodstr_alt": prodstr_alt_l,
"l_prod_kg_red": l_prod_kg_red_l,
"g_prod_kg_red": g_prod_kg_red_l}
return resdict
def on_the_fly(self, resdict, pump_ener, w_feed, h_rec, h_rec_steam, celsius=True, h_val="high", p_ox_wscs=0, rem_unstable=True):
"""
Allows to calculate the energy input for different conditions rather quickly, without having to re-calculate
the time-intensive chemical and sensible energy every time again
:param resdict: dictionary with results (mainly for chemical and sesible energy, as calculated by
EnergyAnalysis().calc()
:param pump_ener: allows to consider the pumping energy required to pump from p_o_2_1 to p_o_2_2
input in kJ per kg of redox material in the oxidized state + the losses
This depends on many factors, such as the type of pumps used, the volume of the
reaction chamber, the reactor type etc., so the user needs to calculate this
value beforehand depending on the individual process conditions
In case some of the pumping energy can be recovered, this share needs to be
subtracted beforehand, as it is not considered herein.
:param h_rec: heat recovery efficiency factor (0...1) for chemical and sensible energy
***these values are only relevant for water splitting***
:param h_rec_steam: heat recovery efficiency factor (0...1) for recovery of heat stored in the steam
:param w_feed: water inlet temperature (in °C or K as defined by 'celsius')
:param h_val: heating value of hydrogen: 'low' -> lower heating value,
'high' -> higher heating value
:param p_ox_wscs: ratio H2/H2O / ratio CO/CO2
:param rem_unstable: if True, phases which are potentially unstable for chemical reasons are removed
this is based on the phases in "unstable_phases.json"
currently, phases are excluded for the following reasons:
- tolerance factor below 0.9 (e.g. EuCuO3, which cannot be synthesized as opposed to EuFeO3)
- phases with expected high covalency (V5+ cations, for instance, NaVO3 is stable but not a perovskite)
- phases with expected low melting point (Mo5+ cations, see this article for NaMoO3
http://www.journal.csj.jp/doi/pdf/10.1246/bcsj.64.161)
By default, this is always True and there is no way in the user front-end to change this.
However, this could be changed manually by the developers, if neccessary.
"""
if self.process == "Air Separation":
p_ox_wscs = 1
# initialize result variables
result_val_ener_i = pd.np.empty(6)
result_val_per_kg_redox = pd.np.empty(6)
result_val_per_kg_wh_redox = pd.np.empty(6)
result_val_per_kj_mol_prod = pd.np.empty(6)
result_val_per_energy_l = pd.np.empty(6)
result_val_per_energy_l_wh = pd.np.empty(6)
result_val_efficiency = pd.np.empty(2)
result_val_mol_prod_mol_red = pd.np.empty(2)
result_val_l_prod_kg_red = pd.np.empty(2)
result_val_g_prod_kg_red = pd.np.empty(2)
result_val_delta_redox = pd.np.empty(2)
result_val_mass_change = pd.np.empty(2)
for rd in resdict:
chemical_energy = rd['Chemical Energy']
energy_sensible = rd['Sensible Energy']
t_ox = rd['T_ox']
t_red = rd['T_red']
t_mean = (t_ox + t_red) / 2
delta_1 = rd['delta_1']
delta_2 = rd['delta_2']
g_prod_kg_red = rd['g_prod_kg_red']
l_prod_kg_red = rd['l_prod_kg_red']
mass_redox_i = rd['mass_redox']
mol_mass_ox = rd['mol_mass_ox']
mol_prod_mol_red = rd['mol_prod_mol_red']
p_ox = rd['p_ox']
p_red = rd['p_red']
compstr = rd['compstr']
prodstr = rd['prodstr']
prodstr_alt = rd['prodstr_alt']
unstable = rd['unstable']
# chemical energy stored in products
if self.process == "Water Splitting":
dh_wscs = EnergyAnalysis().dhf_h2o(t_mean) * mol_prod_mol_red
elif self.process == "CO2 Splitting":
dh_wscs = EnergyAnalysis().dh_co_co2(t_mean) * mol_prod_mol_red
else:
dh_wscs = 0
energy_integral_dh = chemical_energy - ( (chemical_energy + dh_wscs) * h_rec )
if len(resdict) < 50: # for experimental data: convert J/mol to kJ/mol
energy_integral_dh = energy_integral_dh / 1000
# wscs does not matter, as no water splitting / co2 splitting is considered for exp data
# pumping energy
if pump_ener != -1:
energy_pumping = (float(pump_ener) * mol_mass_ox) / 1000
else: # using mechanical envelope
# per mol O
energy_pumping = EnergyAnalysis().mechanical_envelope(p_red=p_red)
# per mol material
energy_pumping = energy_pumping * mol_prod_mol_red
# steam generation
if self.process == "Water Splitting" and h_rec_steam != 1:
energy_steam = mol_prod_mol_red * EnergyAnalysis().energy_steam_generation(temp_1=w_feed,
temp_2=((t_ox+t_red)*0.5)-273.15,
h_2_h2o=p_ox_wscs,
celsius=celsius,
h_rec=h_rec_steam)
else:
energy_steam = 0
# total energy
energy_total = energy_integral_dh + energy_sensible * (1 - h_rec) + energy_pumping + energy_steam
ener_i = pd.np.array([energy_total, energy_integral_dh, energy_sensible * (1 - h_rec),
energy_pumping,
energy_steam])
# kJ/kg of redox material
per_kg_redox = (ener_i / mol_mass_ox) * 1000
# Wh/kg of redox material
per_kg_wh_redox = per_kg_redox / 3.6
# kJ/mol of product (O, H2, or CO)
kj_mol_prod = ener_i / (delta_2 - delta_1)
# kJ/L of product (ideal gas at SATP)
energy_l = kj_mol_prod / 24.465
# convert from O to O2
if self.process == "Air Separation":
energy_l = 2 * energy_l
# Wh/L of product (ideal gas at SATP)
energy_l_wh = energy_l / 3.6
# calculate efficiency for water splitting
if self.process == "Water Splitting":
# source for heating values
# https://h2tools.org/node/3131
if h_val == "low":
h_v = 119.96
elif h_val == "high":
h_v = 141.88
else:
raise ValueError("heating_value must be either 'high' or 'low'")
# convert kJ/mol H2 to MJ/kg H2 -> divide by 2.016
efficiency = (h_v / (kj_mol_prod[0] / 2.016)) * 100
else:
efficiency = None
delta_redox_i = [float(delta_2 - delta_1)]
mass_change_i = [float(mass_redox_i)]
compdisp = remove_comp_one(compstr=compstr)
invalid_val = False # remove data of unstable compounds
if rem_unstable and unstable:
invalid_val = True
# append new values to result and add compositions
if (ener_i[0] < 0) or invalid_val: # sort out negative values, heat input is always positive
ener_i[0] = float('inf')
res_i = pd.np.append(ener_i, compdisp)
result_val_ener_i = pd.np.vstack((result_val_ener_i, res_i))
if per_kg_redox[0] < 0 or invalid_val:
per_kg_redox[0] = float('inf')
res_i = pd.np.append(per_kg_redox, compdisp)
result_val_per_kg_redox = pd.np.vstack((result_val_per_kg_redox, res_i))
if per_kg_wh_redox[0] < 0 or invalid_val:
per_kg_wh_redox[0] = float('inf')
res_i = pd.np.append(per_kg_wh_redox, compdisp)
result_val_per_kg_wh_redox = pd.np.vstack((result_val_per_kg_wh_redox, res_i))
if kj_mol_prod[0] < 0 or invalid_val:
kj_mol_prod[0] = float('inf')
res_i = pd.np.append(kj_mol_prod, compdisp)
result_val_per_kj_mol_prod = pd.np.vstack((result_val_per_kj_mol_prod, res_i))
if energy_l[0] < 0 or invalid_val:
energy_l[0] = float('inf')
res_i = pd.np.append(energy_l, compdisp)
result_val_per_energy_l = pd.np.vstack((result_val_per_energy_l, res_i))
if energy_l_wh[0] < 0 or invalid_val:
energy_l_wh[0] = float('inf')
res_i = pd.np.append(energy_l_wh, compdisp)
result_val_per_energy_l_wh = pd.np.vstack((result_val_per_energy_l_wh, res_i))
if efficiency < 0 or invalid_val:
efficiency = float('-inf')
res_i = pd.np.append(efficiency, compdisp)
result_val_efficiency = pd.np.vstack((result_val_efficiency, res_i))
if mol_prod_mol_red < 0 or invalid_val:
mol_prod_mol_red = float('-inf')
res_i = pd.np.append(mol_prod_mol_red, compdisp)
result_val_mol_prod_mol_red = pd.np.vstack((result_val_mol_prod_mol_red, res_i))
if l_prod_kg_red < 0 or invalid_val:
l_prod_kg_red = float('-inf')
res_i = pd.np.append(l_prod_kg_red, compdisp)
result_val_l_prod_kg_red = pd.np.vstack((result_val_l_prod_kg_red, res_i))
if g_prod_kg_red < 0 or invalid_val:
g_prod_kg_red = float('-inf')
res_i = pd.np.append(g_prod_kg_red, compdisp)
result_val_g_prod_kg_red = pd.np.vstack((result_val_g_prod_kg_red, res_i))
if delta_redox_i < 0 or invalid_val:
delta_redox_i = float('-inf')
res_i = pd.np.append(delta_redox_i, compdisp)
result_val_delta_redox = pd.np.vstack((result_val_delta_redox, res_i))
if mass_change_i < 0 or invalid_val:
mass_change_i = float('-inf')
res_i = pd.np.append(mass_change_i, compdisp)
result_val_mass_change = pd.np.vstack((result_val_mass_change, res_i))
# sort results
result_val_ener_i = sorted(result_val_ener_i[1:], key=lambda x: float(x[0]))
result_val_per_kg_redox = sorted(result_val_per_kg_redox[1:], key=lambda x: float(x[0]))
result_val_per_kg_wh_redox = sorted(result_val_per_kg_wh_redox[1:], key=lambda x: float(x[0]))
result_val_per_kj_mol_prod = sorted(result_val_per_kj_mol_prod[1:], key=lambda x: float(x[0]))
result_val_per_energy_l = sorted(result_val_per_energy_l[1:], key=lambda x: float(x[0]))
result_val_per_energy_l_wh = sorted(result_val_per_energy_l_wh[1:], key=lambda x: float(x[0]))
if self.process == "Water Splitting":
result_val_efficiency = sorted(result_val_efficiency[1:], key=lambda x: float(x[0]), reverse=True)
else:
result_val_efficiency = result_val_efficiency[1:]
result_val_mol_prod_mol_red = sorted(result_val_mol_prod_mol_red[1:], key=lambda x: float(x[0]), reverse=True)
result_val_l_prod_kg_red = sorted(result_val_l_prod_kg_red[1:], key=lambda x: float(x[0]), reverse=True)
result_val_g_prod_kg_red = sorted(result_val_g_prod_kg_red[1:], key=lambda x: float(x[0]), reverse=True)
result_val_delta_redox = sorted(result_val_delta_redox[1:], key=lambda x: float(x[0]), reverse=True)
result_val_mass_change = sorted(result_val_mass_change[1:], key=lambda x: float(x[0]), reverse=True)
# create dictionary with results
dict_result = {"kJ/mol redox material": result_val_ener_i,
"kJ/kg redox material": result_val_per_kg_redox,
"Wh/kg redox material": result_val_per_kg_wh_redox,
str("kJ/mol of " + prodstr_alt): result_val_per_kj_mol_prod,
str("kJ/L of " + prodstr): result_val_per_energy_l,
str("Wh/L of " + prodstr): result_val_per_energy_l_wh,
"Heat to fuel efficiency in % (only valid for Water Splitting)": result_val_efficiency,
str("mol " + prodstr_alt + " per mol redox material"): result_val_mol_prod_mol_red,
str("L " + prodstr + " per mol redox material"): result_val_l_prod_kg_red,
str("g " + prodstr + " per mol redox material"): result_val_g_prod_kg_red,
"Change in non-stoichiometry between T_ox and T_red": result_val_delta_redox,
"Mass change between T_ox and T_red": result_val_mass_change
}
return dict_result
```
#### File: users/redox_thermo_csp/utils.py
```python
from itertools import groupby
import pandas as pd
from pymatgen import MPRester, Structure
from pymatgen.core.composition import Composition
from pymatgen.analysis.reaction_calculator import ComputedReaction
from pymatgen.core.units import FloatWithUnit
from pymatgen.analysis.elasticity import ElasticTensor
import pymatgen.core.periodic_table as ptable
mpr = MPRester()
def redenth_act(compstr):
"""
Finds redox enthalpies for a perovskite solid solution, both for the solid solution and for the endmembers
dh_min and dh_max are based on the redox enthalpy of the endmembers. Ideally, the theoretical redox enthalpy of
the solid solution corresponds to the weigthed average of dh_min and dh_max. If not, and "combined" is selected
in the data use variable, dh_min and dh_max are corrected using the actual theoretical redox enthalpy of the
solid solution.
:return:
theo_solid_solution: theoretical redox enthalpy for the solid solution, if available on the Materials Project
dh_min: minimum redox enthalpy of the solid solution, based on the endmember redox enthalpy
dh_max: maximum redox enthalpy of the solid solution, based on the endmember redox enthalpy
"""
dh_min = None
dh_max = None
# calculate redox enthalpies of endmembers
try:
dhs = calc_dh_endm(compstr)
# only if both are found the values shall be used
if (not dhs[0]) or (not dhs[1]):
raise TypeError()
dh_min = dhs[1]
dh_max = dhs[0]
# this happens if either the brownmillerite or the perovskite data is not on the Materials Project
except TypeError:
pass
except IndexError:
pass
theo_solid_solution = None
# calcualte redox enthalpies for complete perovskite -> brownmillerite reduction
try:
theo_solid_solution = find_theo_redenth(compstr)
# this happens if either the brownmillerite or the perovskite data is not on the Materials Project
except IndexError:
pass
splitcomp = split_comp(compstr)
# use a step function first to calculate the total redox enthalpy from perovskite to
# brownmillerite as expected according to the endmember redox enthalpies
conc_act = find_active(mat_comp=splitcomp)[1]
red_enth_mean_endm = (conc_act * dh_min) + ((1 - conc_act) * dh_max)
if theo_solid_solution:
if not red_enth_mean_endm:
difference = float('inf')
else:
difference = theo_solid_solution - red_enth_mean_endm
if abs(difference) > 30000 or not splitcomp[-1]:
dh_min = theo_solid_solution
dh_max = theo_solid_solution
else:
dh_min = dh_min + difference
dh_max = dh_max + difference
return theo_solid_solution, dh_min, dh_max, conc_act
def calc_dh_endm(compstr):
"""
Calculates the maximum and minimum redox enthalpy of a solid solution based on the redox enthalpies of its
endmembers
Uses the average redox enthalpy of A_1 B_1 O3 and A_2 B_1 O3, depending on the concentration of the two
A species
Calculates the same for A_1 B_2 O3 and A_2 B_2 O3
Whichever is higher is the upper limit for the redox enthalpy of the solid solution dh_max
The other one is the lower limit dh_min
:return: dh_max, dh_min
"""
endm = find_endmembers(compstr)
dh_1 = find_theo_redenth(endm[0]) * endm[4] + find_theo_redenth(endm[1]) * \
endm[5]
dh_2 = find_theo_redenth(endm[2]) * endm[4] + find_theo_redenth(endm[2]) * \
endm[5]
if dh_1 > dh_2:
dh_max = dh_1
dh_min = dh_2
else:
dh_max = dh_2
dh_min = dh_1
return dh_max, dh_min
def find_theo_redenth(compstr):
"""
Finds theoretical redox enthalpies from the Materials Project from perovskite to brownmillerite
based partially on https://github.com/materialsproject/pymatgen/blob/b3e972e293885c5b3c69fb3e9aa55287869d4d84/
examples/Calculating%20Reaction%20Energies%20with%20the%20Materials%20API.ipynb
:param compstr: composition as a string
:return:
red_enth: redox enthalpy in kJ/mol O
"""
compstr_perovskite = compstr.split("O")[0] + "O3"
comp_spl = split_comp(compstr)
chem_sys = ""
for i in range(len(comp_spl)):
if comp_spl[i] is not None:
chem_sys = chem_sys + comp_spl[i][0] + "-"
chem_sys = chem_sys + "O"
chem_sys = chem_sys.split("-")
all_entries = mpr.get_entries_in_chemsys(chem_sys)
# This method simply gets the lowest energy entry for all entries with the same composition.
def get_most_stable_entry(formula):
relevant_entries = [entry for entry in all_entries if
entry.composition.reduced_formula == Composition(formula).reduced_formula]
relevant_entries = sorted(relevant_entries, key=lambda e: e.energy_per_atom)
return relevant_entries[0]
formula_spl = [''.join(g) for _, g in groupby(str(compstr), str.isalpha)]
perov_formula = []
for k in range(len(formula_spl)):
try:
perov_formula += str(int(float(formula_spl[k]) * 8))
except ValueError:
perov_formula += str(formula_spl[k])
perov_formula = "".join(perov_formula)
perov_formula = str(perov_formula).split("O")[0] + "O24"
perovskite = get_most_stable_entry(perov_formula)
brownm_formula = []
for k in range(len(formula_spl)):
try:
brownm_formula += str(int(float(formula_spl[k]) * 32))
except ValueError:
brownm_formula += str(formula_spl[k])
brownm_formula = "".join(brownm_formula)
brownm_formula = str(brownm_formula).split("O")[0] + "O80"
brownmillerite = get_most_stable_entry(brownm_formula)
# for oxygen: do not use the most stable phase O8 but the most stable O2 phase
def get_oxygen():
relevant_entries = [entry for entry in all_entries if
entry.composition == Composition("O2")]
relevant_entries = sorted(relevant_entries, key=lambda e: e.energy_per_atom)
return relevant_entries[0]
oxygen = get_oxygen()
reaction = ComputedReaction([perovskite], [brownmillerite, oxygen])
energy = FloatWithUnit(reaction.calculated_reaction_energy, "eV atom^-1")
# figure out the stoichiometry of O2 in the reaction equation in order to normalize the energies per mol of O
try:
o_stoich = float(str(str(reaction.as_dict).split(" O2")[0]).split()[-1])
except ValueError:
o_stoich = 1
# energy in J/mol per mol of O2
ener = (float(energy.to("kJ mol^-1")) * 1000) / o_stoich
# per mol of O
ener = ener / 2
return ener
def split_comp(compstr):
"""
Splits a string containing the composition of a perovskite solid solution into its components
Chemical composition: (am_1, am_2)(tm_1, tm_2)Ox
:param compstr: composition as a string
:return: am_1, am_2, tm_1, tm_2;
each of these output variables contains the species and the stoichiometries
i.e. ("Fe", 0.6)
"""
am_1, am_2, tm_1, tm_2 = None, None, None, None
compstr_spl = [''.join(g) for _, g in groupby(str(compstr), str.isalpha)]
for l in range(len(compstr_spl)):
try:
if ptable.Element(compstr_spl[l]).is_alkaline or ptable.Element(
compstr_spl[l]).is_alkali or ptable.Element(compstr_spl[l]).is_rare_earth_metal:
if am_1 is None:
am_1 = [compstr_spl[l], float(compstr_spl[l + 1])]
elif am_2 is None:
am_2 = [compstr_spl[l], float(compstr_spl[l + 1])]
if ptable.Element(compstr_spl[l]).is_transition_metal and not (
ptable.Element(compstr_spl[l]).is_rare_earth_metal):
if tm_1 is None:
tm_1 = [compstr_spl[l], float(compstr_spl[l + 1])]
elif tm_2 is None:
tm_2 = [compstr_spl[l], float(compstr_spl[l + 1])]
# stoichiometries raise ValueErrors in pymatgen .is_alkaline etc., ignore these errors and skip that entry
except ValueError:
pass
return am_1, am_2, tm_1, tm_2
def find_active(mat_comp):
"""
Finds the more redox-active species in a perovskite solid solution
Args:
sample_no:
An integer sample number or a string as identifier that occurs
in the input file name.
mat_comp:
The materials composition data, as to be generated by self.sample_data
Returns:
act_spec:
more redox active species
act:
stoichiometry of the more redox active species
"""
# calculate charge of the A site metals
charge_sum = 0
for i in range(2):
if mat_comp[i]:
if ptable.Element(mat_comp[i][0]).is_alkali:
charge_sum += mat_comp[i][1]
elif ptable.Element(mat_comp[i][0]).is_alkaline:
charge_sum += 2 * mat_comp[i][1]
elif (ptable.Element(mat_comp[i][0]).is_lanthanoid or (
mat_comp[i][0] == "Bi")) and mat_comp[i][0] != "Ce":
charge_sum += 3 * mat_comp[i][1]
elif mat_comp[i][0] == "Ce":
charge_sum += 4 * mat_compp[i][1]
else:
raise ValueError("Charge of A site species unknown.")
red_order = None
# charge on B sites 4+
# experimentally well-established order of A2+B4+O3 perovskite reducibility: Ti - Mn - Fe - Co - Cu
if round((6 - charge_sum), 2) == 4:
red_order = ["Ti", "Mn", "Fe", "Co", "Cu"]
# charge on B sites 3+
# order of binary oxide reducibility according to Materials Project (A2O3 -> AO + O2)
if round((6 - charge_sum), 2) == 3:
red_order = ["Sc", "Ti", "V", "Cr", "Fe", "Mn", "Cu", "Co", "Ni", "Ag"] # changed Ni<->Ag order according to DFT results
# charge on B sites 5+
# order of binary oxide reducibility according to Materials Project (A2O3 -> AO + O2)
if round((6 - charge_sum), 2) == 5:
red_order = ["Ta", "Nb", "W", "Mo", "V", "Cr"]
act_a = None
if red_order:
for i in range(len(red_order)):
if mat_comp[2][0] == red_order[i]:
more_reducible = red_order[i + 1:-1]
if mat_comp[3] is not None and (mat_comp[3][0] in more_reducible):
act_a = mat_comp[3]
else:
act_a = mat_comp[2]
if act_a is None:
raise ValueError("B species reducibility unknown, preferred reduction of species not predicted")
# correct bug for the most reducible species
if act_a[0] == red_order[-2] and (red_order[-1] in str(mat_comp)):
act_a[0] = red_order[-1]
act_a[1] = 1-act_a[1]
return act_a[0], act_a[1]
def find_endmembers(compstr):
"""
Finds the endmembers of a solid solution (A_1 A_2)(B_1 B_2) O3 of four perovskite species:
A_1 B_1 O3
A_2 B_1 O3
A_1 B_2 O3
A_2 B_2 O3
:return:
endmember_1a, endmember_1b: two endmembers A_1 B_1 O3 and A_2 B_1 O3 with the same transition metal but
different A species
endmember_2a, endmember_2b: two endmembers A_1 B_2 O3 and A_2 B_2 O3 with the same transition metal but
different A species
a_conc: concentration of the A species A_1
b_conc: concentration of the A species A_2
"""
am_1 = split_comp(compstr)[0]
if split_comp(compstr)[1]:
am_2 = split_comp(compstr)[1]
else:
am_2 = None
tm_1 = split_comp(compstr)[2]
if split_comp(compstr)[3]:
tm_2 = split_comp(compstr)[3]
else:
tm_2 = None
endmember_1a = am_1[0] + "1" + tm_1[0] + "1" + "O"
if am_2:
endmember_1b = am_2[0] + "1" + tm_1[0] + "1" + "O"
else:
endmember_1b = endmember_1a
if tm_2:
endmember_2a = am_1[0] + "1" + tm_2[0] + "1" + "O"
else:
endmember_2a = endmember_1a
if tm_2 and am_2:
endmember_2b = am_2[0] + "1" + tm_2[0] + "1" + "O"
elif tm_2:
endmember_2b = endmember_2a
else:
endmember_2b = endmember_1a
a_conc = am_1[1]
if am_2:
b_conc = am_2[1]
else:
b_conc = 0
return endmember_1a, endmember_1b, endmember_2a, endmember_2b, a_conc, b_conc
def get_debye_temp(mpid):
"""
Calculates the debye temperature from eleastic tensors on the Materials Project
Credits: <NAME>
"""
pd.np.seterr(over="ignore") # ignore overflow in double scalars
data = mpr.get_data(mpid)[0]
struct = Structure.from_str(data['cif'], fmt='cif')
c_ij = ElasticTensor.from_voigt(data['elasticity']['elastic_tensor'])
td = c_ij.debye_temperature(struct)
return td
```
#### File: users/swf/pre_submission.py
```python
from mpcontribs.config import mp_level01_titles
from mpcontribs.io.core.recdict import RecursiveDict
from mpcontribs.io.core.utils import clean_value, get_composition_from_string
from mpcontribs.users.utils import duplicate_check
def round_to_100_percent(number_set, digit_after_decimal=1):
unround_numbers = [
x / float(sum(number_set)) * 100 * 10**digit_after_decimal
for x in number_set
]
decimal_part_with_index = sorted([
(index, unround_numbers[index] % 1)
for index in range(len(unround_numbers))
], key=lambda y: y[1], reverse=True)
remainder = 100 * 10**digit_after_decimal - sum(map(int, unround_numbers))
index = 0
while remainder > 0:
unround_numbers[decimal_part_with_index[index][0]] += 1
remainder -= 1
index = (index + 1) % len(number_set)
return [int(x)/float(10**digit_after_decimal) for x in unround_numbers]
@duplicate_check
def run(mpfile, **kwargs):
import pymatgen
import pandas as pd
from mpcontribs.users.swf.rest.rester import SwfRester
# load data from google sheet
google_sheet = mpfile.document[mp_level01_titles[0]].pop('google_sheet')
google_sheet += '/export?format=xlsx'
df_dct = pd.read_excel(google_sheet, sheet_name=None)
# rename sheet columns
elements = ['Fe', 'V', 'Co']
df_dct['IP Energy Product'].columns = ['IP_Energy_product'] + elements
df_dct['total'].columns = elements
df_dct['MOKE'].columns = elements + ['thickness', 'MOKE_IP_Hc']
df_dct['VSM'].columns = elements + ['thickness', 'VSM_IP_Hc']
df_dct['formula'].columns = elements
df_dct['Kondorsky'].columns = ['angle', 'Kondorsky_Model', 'Experiment']
# round all compositions to 100%
for sheet, df in df_dct.items():
if sheet != 'Kondorsky':
for idx, row in df.iterrows():
df.loc[idx:idx, elements] = round_to_100_percent(row[elements])
row5 = df_dct['formula'].iloc[0]
formula5 = get_composition_from_string(
pymatgen.Composition(10*row5).formula.replace(' ', '')
)
dct = dict((k, clean_value(v, '%')) for k,v in row5.to_dict().items())
mpfile.add_hierarchical_data({'data': dct}, identifier=formula5)
mpfile.add_data_table(
formula5, df_dct['Kondorsky'], name='Angular Dependence of Switching Field'
)
for sheet, df in df_dct.items():
if sheet == 'formula' or sheet == 'Kondorsky' or sheet == 'total':
continue
for idx, row in df.iterrows():
composition = pymatgen.Composition(row[elements]*10)
formula = get_composition_from_string(composition.formula.replace(' ', ''))
dct = dict((k, clean_value(v, '%')) for k,v in row[elements].to_dict().items())
mpfile.add_hierarchical_data({'data': dct}, identifier=formula)
columns = [x for x in row.index if x not in elements]
if columns:
data = row[columns].round(decimals=1)
dct = dict((k, clean_value(v)) for k,v in data.to_dict().items())
mpfile.add_hierarchical_data({'data': dct}, identifier=formula)
```
#### File: mpcontribs/users/utils.py
```python
import inspect, os
def get_user_explorer_name(path, view='index'):
return '_'.join(
os.path.dirname(os.path.normpath(path)).split(os.sep)[-4:] + [view]
)
def duplicate_check(f):
existing_identifiers = {}
def wrapper(*args, **kwargs):
module = inspect.getmodule(f)
module_split = module.__name__.split('.')[:-1]
mod_path = os.sep.join(module_split)
from mpcontribs.users_modules import get_user_rester
Rester = get_user_rester(mod_path)
test_site = kwargs.get('test_site', True)
with Rester(test_site=test_site) as mpr:
for doc in mpr.query_contributions(criteria=mpr.query):
existing_identifiers[doc['identifier']] = doc['_id']
try:
f(*args, **kwargs)
except StopIteration:
print('not adding more contributions')
mpfile = args[0]
update = 0
for identifier in mpfile.ids:
if identifier in existing_identifiers:
cid = existing_identifiers[identifier]
mpfile.insert_top(identifier, 'cid', cid)
update += 1
print(len(mpfile.ids), 'contributions to submit.')
if update > 0:
print(update, 'contributions to update.')
wrapper.existing_identifiers = existing_identifiers
return wrapper
from mpcontribs.client import load_client
from mpcontribs.io.core.recdict import RecursiveDict
from mpcontribs.io.core.components.tdata import Table
def get_context(project, columns=None):
ctx = {'project': project}
client = load_client()
prov = client.projects.get_entry(project=project).response().result
for k in ['id', 'project', 'other']:
prov.pop(k)
ctx['title'] = prov.pop('title')
ctx['provenance'] = RecursiveDict(prov).render()
data = client.projects.get_table(
project=project, columns=columns, per_page=3
).response().result
if data['items']:
columns = list(data['items'][0].keys())
table = Table(data['items'], columns=columns)
ctx['table'] = table.render(project=project)
return ctx
```
#### File: io/archieml/test_mpfile.py
```python
import os
from mpcontribs.io.archieml.mpfile import MPFile
def test_get_string():
test_file = os.path.join(os.path.dirname(__file__), 'test_archieml.txt')
mpfile = MPFile.from_file(test_file)
mpfile_test = MPFile.from_string(mpfile.get_string())
assert mpfile.document == mpfile_test.document
```
#### File: core/components/sdata.py
```python
from mpcontribs.config import mp_level01_titles
from mpcontribs.io.core.recdict import RecursiveDict
from IPython.display import display_html
class Structures(RecursiveDict):
"""class to hold and display list of pymatgen structures for single mp-id"""
def __init__(self, content):
from pymatgen import Structure
super(Structures, self).__init__(
(key, Structure.from_dict(struc))
for key, struc in content.get(mp_level01_titles[3], {}).items()
)
def _ipython_display_(self):
for name, structure in self.items():
if structure:
display_html('<h4>{}</h4>'.format(name), raw=True)
display_html('<p>{}</p>'.format(
structure.__repr__().replace('\n', '<br>').replace(' ', ' ')
), raw=True)
class StructuralData(RecursiveDict):
"""class to hold and display all pymatgen structures in MPFile"""
def __init__(self, document):
super(StructuralData, self).__init__(
(identifier, Structures(content))
for identifier, content in document.items()
)
def _ipython_display_(self):
for identifier, sdata in self.items():
if identifier != mp_level01_titles[0] and sdata:
display_html('<h2>Structural Data for {}</h2>'.format(identifier), raw=True)
display_html(sdata)
```
#### File: core/components/tdata.py
```python
import uuid, json
import pandas as pd
from IPython.display import display_html, HTML
from mpcontribs.config import mp_level01_titles, mp_id_pattern
from mpcontribs.io.core.utils import nest_dict, clean_value
from mpcontribs.io.core.recdict import RecursiveDict
from urllib.parse import urlparse
class Table(pd.DataFrame):
def __init__(self, data, columns=None, index=None, cid=None, name=None):
super(Table, self).__init__(data=data, index=index, columns=columns)
self.cid = cid
self.name = name
def to_dict(self):
from pandas import MultiIndex
for col in self.columns:
self[col] = self[col].apply(lambda x: clean_value(x, max_dgts=6))
rdct = super(Table, self).to_dict(orient='split', into=RecursiveDict)
if not isinstance(self.index, MultiIndex):
rdct.pop('index')
rdct["@module"] = self.__class__.__module__
rdct["@class"] = self.__class__.__name__
return rdct
@classmethod
def from_dict(cls, d):
index = None
if 'index' in d:
from pandas import MultiIndex
index = MultiIndex.from_tuples(d['index'])
obj = cls(
d['data'], columns=d['columns'], index=index,
cid=d['cid'], name=d['name']
) if 'cid' in d and 'name' in d else cls(
d['data'], columns=d['columns'], index=index
)
return obj
@classmethod
def from_items(cls, rdct, **kwargs):
return super(Table, cls).from_dict(RecursiveDict(rdct), **kwargs)
def to_backgrid_dict(self):
"""Backgrid-conform dict from DataFrame"""
# shorten global import times by importing django here
import numpy as np
from mpcontribs.io.core.utils import get_composition_from_string
from pandas import MultiIndex
import pymatgen.util as pmg_util
from pymatgen.core.composition import CompositionError
table = dict()
nrows_max = 260
nrows = self.shape[0]
df = Table(self.head(n=nrows_max)) if nrows > nrows_max else self
if isinstance(df.index, MultiIndex):
df.reset_index(inplace=True)
table['columns'] = []
table['rows'] = super(Table, df).to_dict(orient='records')
for col_index, col in enumerate(list(df.columns)):
cell_type = 'number'
# avoid looping rows to minimize use of `df.iat` (time-consuming in 3d)
if not col.startswith('level_') and col[-1] != ']':
is_url_column = True
for row_index in range(df.shape[0]):
cell = str(df.iat[row_index, col_index])
is_url_column = bool(is_url_column and (not cell or mp_id_pattern.match(cell)))
if is_url_column:
if cell:
value = 'https://materialsproject.org/materials/{}'.format(cell)
table['rows'][row_index][col] = value
elif cell:
try:
composition = get_composition_from_string(cell)
composition = pmg_util.string.unicodeify(composition)
table['rows'][row_index][col] = composition
except (CompositionError, ValueError, OverflowError):
try:
# https://stackoverflow.com/a/38020041
result = urlparse(cell)
if not all([result.scheme, result.netloc, result.path]):
break
is_url_column = True
except:
break
cell_type = 'uri' if is_url_column else 'string'
col_split = col.split('.')
nesting = [col_split[0]] if len(col_split) > 1 else []
table['columns'].append({
'name': col, 'cell': cell_type, 'nesting': nesting, 'editable': 0
})
if len(col_split) > 1:
table['columns'][-1].update({'label': '.'.join(col_split[1:])})
if len(table['columns']) > 12:
table['columns'][-1]['renderable'] = 0
return table
def render(self, project=None, total_records=None):
"""use BackGrid JS library to render Pandas DataFrame"""
# if project given, this will result in an overview table of contributions
# TODO check for index column in df other than the default numbering
jtable = json.dumps(self.to_backgrid_dict())
if total_records is None:
total_records = self.shape[0]
config = {"total_records": total_records}
config['uuids'] = [str(uuid.uuid4()) for i in range(3)]
if project is None:
config['name'] = self.name
config['cid'] = self.cid
else:
config['project'] = project
jconfig = json.dumps(config)
html = '<div id="{}"></div>'.format(config['uuids'][0])
html += '<div id="{}" style="width:100%;"></div>'.format(config['uuids'][1])
html += '<div id="{}"></div>'.format(config['uuids'][2])
html += f'<script>render_table({{table: {jtable}, config: {jconfig}}})</script>'
return html
def _ipython_display_(self):
display(HTML(self.render()))
class Tables(RecursiveDict):
"""class to hold and display multiple data tables"""
def __init__(self, content=RecursiveDict()):
super(Tables, self).__init__(
(key, value) for key, value in content.items()
if isinstance(value, Table)
)
def __str__(self):
return 'tables: {}'.format(' '.join(self.keys()))
def _ipython_display_(self):
for name, table in self.items():
display_html('<h3>{}</h3>'.format(name), raw=True)
display_html(table)
class TabularData(RecursiveDict):
"""class to hold and display all tabular data of a MPFile"""
def __init__(self, document):
super(TabularData, self).__init__()
from pymatgen import Structure
scope = []
for key, value in document.iterate():
if isinstance(value, Table):
self[scope[0]].rec_update({'.'.join(scope[1:]): value})
elif not isinstance(value, Structure):
level, key = key
level_reduction = bool(level < len(scope))
if level_reduction:
del scope[level:]
if value is None:
scope.append(key)
if scope[0] not in self:
self[scope[0]] = Tables()
def __str__(self):
return 'mp-ids: {}'.format(' '.join(self.keys()))
def _ipython_display_(self):
for identifier, tables in self.items():
if isinstance(tables, dict) and tables:
display_html('<h2>Tabular Data for {}</h2>'.format(identifier), raw=True)
display_html(tables)
``` |
{
"source": "josubg/CorefGraph",
"score": 3
} |
#### File: multisieve/features/animacyannotator.py
```python
import re
from corefgraph.multisieve.features.constants import ANIMACY, INANIMATE, ANIMATE, UNKNOWN
from corefgraph.constants import POS, NER
from corefgraph.multisieve.features.baseannotator import FeatureAnnotator
from corefgraph.resources.dictionaries import pronouns
from corefgraph.resources.files.animate import animate_words, inanimate_words
from corefgraph.resources.rules import rules
from corefgraph.resources.tagset import pos_tags, ner_tags
__author__ = '<NAME> <<EMAIL>>'
__date__ = '3/19/14'
class AnimacyAnnotator(FeatureAnnotator):
""" Marks the animacy of the mentions using their NER, POS and form.
"""
name = "animacy"
features = [ANIMACY]
use_bergsma_number_lists = True
def extract_and_mark(self, mention):
""" Extract and mark the animacy of the mention.
The animacy is marked as ANIMATE, INANIMATE or UNKNOWN constant in the
ANIMACY attribute of the mention.
:param mention: The mention to mark.
:return: Nothing
"""
mention[ANIMACY] = self._get_animacy(mention=mention)
def _get_animacy(self, mention):
"""Determines the gender of the word.
:param mention: The mention which animacy is wanted.
:return: ANIMATE, INANIMATE or UNKNOWN constant
"""
head_word = self.graph_builder.get_head_word(mention)
word_form = rules.get_head_word_form(self.graph_builder, mention)
word_ner = mention.get(NER)
word_pos = head_word.get(POS)
# Normalize parameters
normalized_ner = word_ner
normalized_form = word_form.lower()
normalized_form = re.sub("\d", "0", normalized_form)
normalized_pos = word_pos.replace("$", "")
# Pronouns
if pos_tags.pronoun(normalized_pos) or pronouns.all(normalized_form):
if pronouns.inanimate(normalized_form):
return INANIMATE
elif pronouns.animate(normalized_form):
return ANIMATE
else:
return UNKNOWN
# NER
if ner_tags.animate(normalized_ner):
return ANIMATE
if ner_tags.inanimate(normalized_ner):
return INANIMATE
# Use the mention POS to determine the feature
if pos_tags.inanimate(word_pos):
return INANIMATE
if pos_tags.animate(word_pos):
return ANIMATE
# Bergsma Lists
if self.use_bergsma_number_lists:
if word_form in animate_words:
return ANIMATE
if word_form in inanimate_words:
return INANIMATE
return UNKNOWN
```
#### File: multisieve/features/genderannotator.py
```python
from corefgraph.multisieve.features.constants import GENDER, MALE, FEMALE, NEUTRAL, \
UNKNOWN, NUMBER, PLURAL, MENTION, PRONOUN_MENTION
from corefgraph.constants import FORM, NER, POS, ID
from corefgraph.multisieve.features.baseannotator import FeatureAnnotator
from corefgraph.resources.dictionaries import pronouns
from corefgraph.resources.files.gender import bergma_counter, female_names, \
female_words, male_names, male_words, neutral_words
from corefgraph.resources.rules import rules
from corefgraph.resources.tagset import ner_tags, pos_tags
__author__ = '<NAME> <<EMAIL>>'
__date__ = '3/19/14'
class GenderAnnotator(FeatureAnnotator):
""" Annotate mention gender.
"""
name = "gender"
features = [GENDER]
use_bergsma_gender_lists = True
use_names_list = True
use_probabilistic_gender_classification = True
prominence_boost = 0.5
threshold = 2
def extract_and_mark(self, mention):
""" Extract the gender and annotate it into the mention.
:param mention: The mention to annotate.
:return: Nothing.
"""
gender = self._get_gender(mention)
self.logger.debug("Gender: Result -%s- #%s", gender, mention[FORM])
mention[GENDER] = gender
def _get_gender(self, mention):
""" Pass trough a list of selector to get mention gender.
:param mention: The mention to get gender
:return: MALE, FEMALE, NEUTRAL or UNKNOWN constant.
"""
head_word = self.graph_builder.get_head_word(mention)
headword_pos = head_word[POS]
headstring = rules.get_head_word_form(self.graph_builder, mention).lower()
# Words until headwords
mention_string = []
for word in self.graph_builder.get_words(mention):
mention_string.append(word[FORM])
if word[ID] == head_word[ID]:
break
mention_string = " ".join(mention_string).lower()
try:
mention_type = mention[MENTION]
except KeyError:
self.logger.warning("warning: Gender without MENTION TYPE")
mention_type = UNKNOWN
try:
mention_number = mention[NUMBER]
except KeyError:
self.logger.warning("warning: Gender without MENTION NUMBER")
mention_number = UNKNOWN
gender = self._pronoun_gender(mention_string, mention_type)
if gender is not None:
self.logger.debug("Gender: Pronoun")
return gender
if self.use_probabilistic_gender_classification and mention_number != PLURAL:
gender_statistic = self._get_statistic_gender(headstring)
if gender_statistic is not None:
self.logger.debug("Gender: Statistical")
return gender_statistic
gender = self._person_gender(mention)
if gender is not None:
self.logger.debug("Gender: Person")
return gender
gender = self._pos_gender(headword_pos)
if gender:
self.logger.debug("Gender: Part-of-speech")
return gender
if self.use_names_list:
gender = self._name_gender(mention_string)
if gender:
self.logger.debug("Gender: Name list -%s-", headstring)
return gender
if self.use_bergsma_gender_lists:
gender = self._list_gender(mention_string)
if gender:
self.logger.debug("Gender: List -%s-", headstring)
return gender
return UNKNOWN
@staticmethod
def _pos_gender(word_pos):
""" Use the mention POS to determine the feature.
:param word_pos: The POS of the mention as String.
:return: MALE, FEMALE, NEUTRAL constant or None.
"""
if pos_tags.male(word_pos):
return MALE
if pos_tags.female(word_pos):
return FEMALE
if pos_tags.neutral(word_pos):
return NEUTRAL
return None
@staticmethod
def _pronoun_gender(word_form, mention_type):
""" Check if is a pronoun and determine gender
:param word_form: The lower case word form
:return: if pronoun MALE, FEMALE, NEUTRAL or UNKNOWN constant,
else None.
"""
if mention_type == PRONOUN_MENTION:
if pronouns.male(word_form):
return MALE
if pronouns.female(word_form):
return FEMALE
if pronouns.neutral(word_form):
return NEUTRAL
return UNKNOWN
return None
@staticmethod
def _list_gender(word_form):
""" Try to annotate the gender with a constant of names.
:param word_form: The original-cased word form.
:return: MALE, FEMALE, NEUTRAL constants or none.
"""
if word_form.lower() in male_words:
return MALE
if word_form.lower() in female_words:
return FEMALE
if word_form.lower() in neutral_words:
return NEUTRAL
return None
@staticmethod
def _name_gender(word_form):
""" Try to annotate gender with name by gender lists.
:param word_form: The original-cased word form.
:return: MALE, FEMALE constants or None.
"""
if word_form.lower() in female_names:
return FEMALE
elif word_form.lower() in male_names:
return MALE
return None
def _person_gender(self, mention):
""" Check if the mention is a person and use different approach to
get word relevant to gender detection.
:param mention: The mention to annotate.
:return: if person MALE, FEMALE, NEUTRAL or UNKNOWN constant,
else None.
"""
if ner_tags.person(mention.get(NER)):
for token in self.graph_builder.get_words(mention):
word_form = token[FORM].lower()
if word_form in male_words or word_form in male_names:
return MALE
if word_form in female_words or word_form in female_names:
return FEMALE
return UNKNOWN
return None
def _get_statistic_gender(self, mention_string):
""" Use the Bergsma-Lin algorithm to set the gender of the mention.
:param mention_string: The lowe-case form of the mention.
:return: MALE, FEMALE, NEUTRAL constants or None.
"""
if mention_string not in bergma_counter:
return None
male, female, neutral, plural = \
bergma_counter.get(mention_string)
if (male * self.prominence_boost > female + neutral) and \
(male > self.threshold):
return MALE
elif (female * self.prominence_boost > male + neutral) and \
(female > self.threshold):
return FEMALE
elif (neutral * self.prominence_boost > male + female) and \
(neutral > self.threshold):
return NEUTRAL
return None
```
#### File: multisieve/features/genericsannotator.py
```python
from corefgraph.multisieve.features.constants import GENERIC
from corefgraph.constants import POS, FORM, SPAN, LEMMA, ID
from corefgraph.multisieve.features.baseannotator import FeatureAnnotator
from corefgraph.resources.dictionaries import pronouns, verbs
from corefgraph.resources.tagset import pos_tags
__author__ = '<NAME> <<EMAIL>>'
__date__ = '3/19/14'
class GenericsAnnotator(FeatureAnnotator):
"""Annotator of the generic mentions.
"""
name = "generic"
features = [GENERIC]
def extract_and_mark(self, mention):
"""Check and set generic feature for generic mentions.
:param mention: The mention to check.
:return Nothing.
"""
mention[GENERIC] = False
head_word = self.graph_builder.get_head_word(mention)
# Bare plural
if pos_tags.plural_common_noun(head_word[POS]) and \
(mention[SPAN][1] - mention[SPAN][0] == 0):
if pronouns.all(mention[FORM]):
#return False
pass
mention[GENERIC] = True
# Generic you as in "you know"
elif mention[self.graph_builder.doc_type] != self.graph_builder.doc_article and \
pronouns.second_person(mention[FORM].lower()):
you = head_word
sentence = self.graph_builder.get_root(you)
words = [word
for word
in self.graph_builder.get_sentence_words(sentence)]
you_index = words.index(you)
if (you_index + 1 < len(words)) and \
verbs.generics_you_verbs(
words[you_index + 1][FORM].lower()):
# words[you_index + 1][LEMMA].lower()):
mention[GENERIC] = True
```
#### File: multisieve/filters/basefilter.py
```python
from logging import getLogger
__author__ = "<NAME> <<EMAIL>>"
class BaseFilter(object):
""" Base class for filters. To create a new filter import and inherit from
this class.
"""
short_name = "base"
def __init__(self, graph_builder, extractor):
self.logger = getLogger("{0}.{1}".format(__name__, self.short_name))
self.graph_builder = graph_builder
self.extractor = extractor
def filter(self, mention, prev_mentions):
""" Overload this in each filter.
:param mention: The mention to test
:return: True or False.
"""
self.logger.warning("filter_mention not override!")
return False
```
#### File: multisieve/filters/nonwordFilter.py
```python
from .basefilter import BaseFilter
from corefgraph.resources.dictionaries import stopwords
from corefgraph.constants import FORM, ID
__author__ = "<NAME> <<EMAIL>>"
class NonWordFilter(BaseFilter):
"""Class that filter mentions that have non word heads."""
short_name = "NonWordFilter"
def filter(self, mention, prev_mentions):
""" check if the mention head is a non-word.
The check is case insensitive.
:param mention: The mention to test
:return: True or False
"""
head_word = self.graph_builder.get_head_word(mention)
head_form = head_word[FORM].lower()
if stopwords.non_words(head_form):
self.logger.debug(
"Mention is non word: %s(%s)", mention[FORM], mention[ID])
return True
return False
```
#### File: multisieve/filters/pleonasticFilter.py
```python
from corefgraph.resources.rules import rules
from corefgraph.multisieve.filters.basefilter import BaseFilter
from corefgraph.constants import FORM, ID
from corefgraph.resources.dictionaries import pronouns
__author__ = "<NAME> <<EMAIL>>"
class PleonasticFilter(BaseFilter):
""" Class to remove mentions thar are pleonastic pronouns."""
short_name = "PleonasticFilter"
def filter(self, mention, prev_mentions):
""" check if the mention is pleonastic.
:param mention: The mention to test.
:return: True or False.
"""
if not pronouns.pleonastic(mention[FORM].lower()):
return False
if rules.is_pleonastic(constituent=mention, graph_builder=self.graph_builder):
self.logger.debug(
"Mention is pleonastic it: %s(%s)",
mention[ID], self.graph_builder.get_root(mention)[FORM])
return True
self.logger.debug(
"Mention is not pleonastic %s(%s)",
mention[ID], self.graph_builder.get_root(mention)[FORM])
return False
```
#### File: multisieve/filters/quantityFilter.py
```python
from corefgraph.multisieve.filters.basefilter import BaseFilter
from corefgraph.constants import ID, FORM, SPAN, NER
__author__ = "<NAME> <<EMAIL>>"
class QuantityFilter(BaseFilter):
""" Class to remove mentions thar are quantities."""
short_name = "QuantityFilter"
def filter(self, mention, prev_mentions):
""" check if the mention is a non-word
:param mention: The mention to test.
:return: True or False.
"""
head_word = self.graph_builder.get_head_word(mention)
if self._inside_money(head_word[SPAN]):
self.logger.debug(
"Mention is money or perceptual: %s(%s)", mention[FORM], mention[ID])
return True
return False
def _inside_money(self, mention_span):
""" Check if a span is inside any Named entity Mention span and is not
the mention.
:param mention_span: The span of the mention.
"""
for entity in self.extractor.named_entities:
if entity[NER] == "MONEY" or entity[NER] == "PERCENT":
if self.graph_builder.is_inside(mention_span, entity[SPAN]):
return True
return False
```
#### File: multisieve/filters/relativesFilter.py
```python
from corefgraph.multisieve.filters.basefilter import BaseFilter
from corefgraph.constants import FORM, ID, POS
from corefgraph.resources.tagset import pos_tags
from corefgraph.resources.dictionaries import pronouns
__author__ = "<NAME> <<EMAIL>>"
class RelativesFilter(BaseFilter):
""" Class to remove mentions thar are pleonastic pronouns."""
short_name = "RelativesFilter"
def filter(self, mention, prev_mentions):
""" check if the mention is pleonastic.
:param mention: The mention to test.
:return: True or False.
"""
if pos_tags.relative_pronoun(mention.get(POS, "")):
words = self.graph_builder.get_words(self.graph_builder.get_root(mention))
mention_words = self.graph_builder.get_words(mention)
first_word_index = words.index(mention_words[0])
last_word_index = words.index(mention_words[-1])
if first_word_index > 0:
if pos_tags.determinant(words[first_word_index-1][POS]):
return True
next_word = words[last_word_index+1]
if pos_tags.pronoun(next_word[POS]) or pronouns.all(next_word[FORM]):
if mention[FORM].lower() == "que":
self.logger.debug(
"Mention is relative %s(%s)",
mention[ID], self.graph_builder.get_root(mention)[FORM])
return True
return False
```
#### File: multisieve/purges/singeltonPurge.py
```python
from .basepurge import BasePurge
from corefgraph.constants import FORM, ID
__author__ = "<NAME> <<EMAIL>>"
class SingletonPurge(BasePurge):
""" Purge any singleton in the system
"""
short_name = "Singleton"
def purge_mention(self, mention):
""" Nothing to do here.
:param mention: The mention to test
:return: True or False.
"""
return False
def purge_entity(self, entity):
""" Purge any .
:param entity: The entity to test
:return: True or False.
"""
if len(entity) < 2:
mention = entity[0]
self.logger.debug("Purged singleton: %s(%s)", mention[FORM], mention[ID])
return True
return False
```
#### File: multisieve/sieves/base.py
```python
from collections import Counter
from logging import getLogger
from corefgraph.constants import SPAN, ID, FORM, UTTERANCE, POS, NER, SPEAKER, CONSTITUENT, TAG, INVALID, GOLD_ENTITY
from corefgraph.resources.dictionaries import pronouns, stopwords
from corefgraph.resources.rules import rules
from corefgraph.resources.tagset import ner_tags, constituent_tags
from corefgraph.resources.tagset import pos_tags
from corefgraph.multisieve.features.constants import UNKNOWN, PERSON, FIRST_PERSON, SECOND_PERSON, GENERIC, \
STARTED_BY_INDEFINITE_PRONOUN, APPOSITIVE, PREDICATIVE_NOMINATIVE, MENTION, PROPER_MENTION, NOMINAL_MENTION, \
PRONOUN_MENTION, STARTED_BY_INDEFINITE_ARTICLE, NUMBER, ANIMACY, GENDER, ENUMERATION_MENTION
__author__ = '<NAME> <<EMAIL>>'
class Sieve(object):
""" The base of all the sieves of the system. It contains all the check,
resolve and merge basic mechanics and also the methods to extract
information from entities and candidates.
"""
short_name = "base"
auto_load = True
# Filter options
DISCOURSE_SALIENCE = True
ONLY_FIRST_MENTION = True
USE_INCOMPATIBLES = True
NO_PRONOUN_MENTION = True
NO_ENUMERATION_MENTION = False
NO_APPOSITIVE_MENTION = False
NO_PRONOUN_CANDIDATE = False
NO_ENUMERATION_CANDIDATE = False
NO_APPOSITIVE_CANDIDATE = False
NO_STOP_WORDS = False
INCOMPATIBLE_DISCOURSE = False
SENTENCE_DISTANCE_LIMIT = False
I_WITHIN_I = False
NO_SUBJECT_OBJECT = False
IS_INSIDE = True
gold_check = True
UNKNOWN_VALUES = {UNKNOWN, None, }
INCOMPATIBLES = "incompatible"
UNRELIABLE = 3
def __init__(self, meta_info):
self.meta = Counter()
self.logger = getLogger(__name__ + "." + self.short_name)
self.meta_info = meta_info
self.correct_link = []
self.wrong_link = []
self.lost_link = []
self.no_link = []
self.graph_builder = None
def get_meta(self):
return {
"OK": self.correct_link,
"WRONG": self.wrong_link,
"LOST": self.lost_link,
"NO": self.no_link,
}
def resolve(self, graph_builder, mentions_order, candidates_order):
"""Runs each sentence compare each mention and its candidates.
:param graph_builder: The manager to ask or manipulate the graph.
:param candidates_order: A list sentences that are a list of mentions in BFS.
:param mentions_order: A list sentences that are a list of mentions in textual order.
"""
self.graph_builder = graph_builder
output_clusters = dict()
self.logger.info(
"SIEVE: =========== %s Start ===========", self.short_name)
# for each sentence for each mention in tree traversal order
for index_sentence, sentence in enumerate(mentions_order):
for index_mention, mention in enumerate(sentence):
self.logger.debug("RESOLVE: ---------- New mention ----------")
self.log_mention(mention)
# Skip the mention?
mention_entity_idx, mention_entity = mention.get("entity")
if not self.validate(mention=mention, entity=mention_entity):
self.logger.debug("RESOLVE: Invalid mention")
else:
candidates = self.get_candidates(
mentions_order, candidates_order, mention, index_sentence)
for candidate in candidates:
self.logger.debug("RESOLVE: +++++ New Candidate +++++")
self.log_candidate(candidate)
candidate_entity_idx, candidate_entity = \
candidate.get("entity")
if self.are_coreferent(
entity=mention_entity, mention=mention,
candidate_entity=candidate_entity, candidate=candidate):
if self.meta_info:
if self.check_gold(mention, candidate):
self.logger.info(
"CORRECT LINK (%s):%s ", self.short_name,
self.context(
mention_entity, mention,
candidate_entity, candidate))
self.correct_link.append(
(mention[ID], candidate[ID]))
else:
self.logger.debug(
"WRONG LINK (%s):%s ", self.short_name,
self.context(
mention_entity, mention,
candidate_entity, candidate))
self.wrong_link.append(
(mention[ID], candidate[ID]))
try:
del output_clusters[mention_entity_idx]
except KeyError:
pass
# If passed the sieve link candidate and stop search
# for that entity
try:
del output_clusters[candidate_entity_idx]
except KeyError:
pass
self.logger.debug("RESOLVE: End candidate (LINKED).")
mention_entity_idx, mention_entity = self._merge(
mention_entity, candidate_entity)
break
else:
if self.meta_info:
if self.check_gold(mention, candidate):
if not self.check_in_entity(candidate, mention_entity):
self.logger.debug(
"LOST LINK(%s):%s ", self.short_name,
self.context(
mention_entity, mention,
candidate_entity, candidate))
self.lost_link.append(
(mention[ID], candidate[ID]))
else:
self.no_link.append((mention[ID], candidate[ID],))
self.logger.debug("RESOLVE: End candidate(Not linked).")
self.logger.debug("RESOLVE: End mention.")
output_clusters[mention_entity_idx] = mention_entity
return output_clusters
def are_coreferent(self, entity, mention, candidate_entity, candidate):
""" Determine if the candidate is a valid entity coreferent.
:param candidate: The candidate to be part of the entity.
:param mention: The selected mention to represent the entity.
:param candidate_entity: The entity of the candidate mention.
:param entity: The entity that is going to be evaluated.
"""
self.meta["asked"] += 1
if mention.get(INVALID) or candidate.get(INVALID):
return False
if self.USE_INCOMPATIBLES:
for c_mention in candidate_entity:
if c_mention[ID] in mention.get(self.INCOMPATIBLES, ()):
self.meta["filter_incompatible"] += 1
self.logger.debug(
"LINK FILTERED incompatible mentions inside entities.")
return False
if self.SENTENCE_DISTANCE_LIMIT:
sentence_distance = self.graph_builder.sentence_distance(
mention, candidate)
if sentence_distance > self.SENTENCE_DISTANCE_LIMIT \
and not (mention.get(PERSON) in (FIRST_PERSON, SECOND_PERSON)):
self.meta["filter_to_far"] += 1
self.logger.debug(
"LINK FILTERED Candidate to far and not I or You.")
return False
if self.UNRELIABLE and (stopwords.unreliable(mention[FORM].lower())) and \
(self.graph_builder.sentence_distance(
element_a=mention, element_b=candidate) > self.UNRELIABLE):
self.meta["filter_to_far_this"] += 1
self.logger.debug("LINK FILTERED too far this. Candidate")
return False
if self.check_in_entity(mention=candidate, entity=entity):
self.meta["filter_already_linked"] += 1
self.logger.debug("LINK FILTERED already linked. Candidate")
return False
if candidate.get(GENERIC, False) and candidate.get(PERSON) == SECOND_PERSON:
self.meta["filter_generic_candidate"] += 1
self.logger.debug("LINK FILTERED Generic Candidate")
return False
if self.IS_INSIDE and (self.graph_builder.is_inside(mention[SPAN], candidate[SPAN]) or
self.graph_builder.is_inside(candidate[SPAN], mention[SPAN])):
self.meta["filtered_inside"] += 1
self.logger.debug("LINK FILTERED Inside. Candidate")
return False
if self.INCOMPATIBLE_DISCOURSE and \
self.incompatible_discourse(
entity_a=candidate_entity, entity_b=entity):
self.meta["filtered_discourse"] += 1
self.logger.debug("LINK FILTERED incompatible discourse")
return False
representative_mention = self.entity_representative_mention(entity)
if self.NO_SUBJECT_OBJECT and \
self.subject_object(candidate_entity, entity):
self.meta["filtered_subject_object"] += 1
self.logger.debug("LINK FILTERED Subject-object")
self.invalid(
entity_a=entity, mention_a=mention,
entity_b=candidate_entity, mention_b=candidate)
return False
if self.I_WITHIN_I and \
self.i_within_i(
mention_a=representative_mention, mention_b=candidate):
self.meta["filtered_i_within_i"] += 1
self.logger.debug(
"LINK FILTERED I within I construction: %s", candidate[FORM])
self.invalid(
entity_a=entity, mention_a=mention,
entity_b=candidate_entity, mention_b=candidate)
return False
if self.NO_PRONOUN_CANDIDATE and self.is_pronoun(candidate):
self.logger.debug("FILTERED LINK mention pronoun")
self.meta["Filtered_mention_pronoun"] += 1
return False
self.meta["First pass"] += 1
if self.NO_ENUMERATION_CANDIDATE and candidate[MENTION] == ENUMERATION_MENTION:
self.logger.debug("FILTERED LINK candidate enumeration")
self.meta["Filtered_enumeration"] += 1
return False
if self.NO_APPOSITIVE_CANDIDATE and candidate.get(APPOSITIVE, False):
self.logger.debug("FILTERED LINK candidate appositive")
self.meta["mention_filtered_enumeration"] += 1
return False
return True
def validate(self, mention, entity):
""" Determine if the mention is valid for this sieve.
:param mention: The mention to check.
:param entity: The entity of the mention.
"""
if self.DISCOURSE_SALIENCE and \
not self.discourse_salience(mention=mention):
return False
# Filter all no first mentions
if self.ONLY_FIRST_MENTION and \
not self.first_mention(mention=mention, entity=entity):
self.meta["mention_filtered_no_first"] += 1
self.logger.debug(
"MENTION FILTERED: Not first one: %s", mention[FORM])
return False
# Filter Narrative you
if self.narrative_you(mention=mention):
self.meta["mention_filtered_narrative_you"] += 1
self.logger.debug(
"MENTION FILTERED: is a narrative you: %s", mention[FORM])
return False
# filter generics
if mention.get(GENERIC, False):
self.meta["mention_filtered_generic"] += 1
self.logger.debug(
"MENTION FILTERED: is generic: %s", mention[FORM])
return False
# Filter stopWords
if self.NO_STOP_WORDS and stopwords.stop_words(mention[FORM].lower()):
self.meta["mention_filtered_stop_word"] += 1
self.logger.debug(
"MENTION FILTERED: is a stop word: %s", mention[FORM])
return False
# Filter all pronouns
if self.NO_PRONOUN_MENTION and self.is_pronoun(mention):
self.meta["mention_filtered_pronoun"] += 1
self.logger.debug(
"MENTION FILTERED: Is a pronoun: %s", mention[FORM])
return False
if self.NO_ENUMERATION_MENTION and mention[MENTION] == ENUMERATION_MENTION:
self.logger.debug("MENTION FILTERED enumeration form")
self.meta["mention_filtered_enumeration"] += 1
return False
if self.NO_APPOSITIVE_MENTION and mention.get(APPOSITIVE, False):
self.logger.debug("MENTION FILTERED APPOSITIVE form")
self.meta["mention_filtered_enumeration"] += 1
return False
return True
def discourse_salience(self, mention):
""" Determine if a mention is relevant by its discourse salience.
:param mention: The mention to check discourse salience
:return: True if is relevant mention
"""
# If starts with, or is, a undefined pronouns, Filter it.
if mention[STARTED_BY_INDEFINITE_PRONOUN]:
self.logger.debug(
"MENTION FILTERED: is undefined: %s", mention[FORM])
self.meta["mention_filtered_is_undefined"] += 1
return False
# If start with indefinite article and isn't part of an appositive or
# predicative-nominative constructions filter it.
if not mention.get(APPOSITIVE, False) and not mention.get(PREDICATIVE_NOMINATIVE, False) and \
self.is_undefined(mention=mention):
self.meta["mention_filtered_starts_undefined"] += 1
self.logger.debug(
"MENTION FILTERED: starts with undefined: %s", mention[FORM])
return False
return True
def first_mention(self, mention, entity):
""" Check if the mention is the first no pronoun mention with discourse
salience of the cluster.
:param mention: The mention to check.
:param entity: The entity of the mention.
:return: True or False
"""
for m in entity:
if self.is_pronoun(m):
continue
if not self.discourse_salience(m):
continue
if m[ID] == mention[ID]:
return True
return False
return entity[0][ID] == mention[ID]
def get_candidates(self, text_order, candidate_order, mention, index_sent):
""" Gets the candidates ordered for the sieve check. This function is
made for the need of reorder candidates in the sieve X. Also, another
sieves may benefit form this in the future.
:param text_order: The list of sentences that contain the list of mentions that form the text.
:param candidate_order: The list of sentences that contain the list of mentions that form the text in bts order.
:param mention: The mention whose candidates whe need.
:param index_sent: The index of the current sentence.
@rtype : list
:return: A list of ordered candidates.
"""
index_mention = [c[ID] for c in candidate_order[index_sent]].index(mention["id"])
return candidate_order[index_sent][:index_mention] + [m for s in reversed(text_order[:index_sent]) for m in s]
def invalid(self, entity_a, mention_a, entity_b, mention_b):
""" Set the two mentions invalid for each other.
:param entity_a: Entity of the mention.
:param mention_a: One of the mentions.
:param entity_b: Entity of the other mention.
:param mention_b: The other mention.
:return:
"""
if self.gold_check:
if self.check_gold(mention_a, mention_b):
self.logger.debug(
"WRONG BLACKLISTED: %s",
self.context(entity_a, mention_a, entity_b, mention_b))
else:
self.logger.debug(
"CORRECT BLACKLISTED: %s",
self.context(entity_a, mention_a, entity_b, mention_b))
else:
self.logger.debug("BLACKLISTED")
try:
mention_a[self.INCOMPATIBLES].add(mention_b[ID])
except KeyError:
mention_a[self.INCOMPATIBLES] = {mention_b[ID]}
try:
mention_b[self.INCOMPATIBLES].add(mention_a[ID])
except KeyError:
mention_b[self.INCOMPATIBLES] = {mention_a[ID]}
def _merge(self, entity_a, entity_b):
""" Merge two entities into new one.
:param entity_a: a entity to merge
:param entity_b: a entity to merge
"""
# Add the new mentions to first cluster
entity = list(sorted(
entity_a + entity_b, key=lambda x: x[SPAN],))
incompatibles = set()
for mention in entity:
incompatibles.update(mention.get(self.INCOMPATIBLES, set()))
idx = entity[0][SPAN]
for mention in entity:
mention["entity"] = (idx, entity)
mention[self.INCOMPATIBLES] = incompatibles
return idx, entity
@staticmethod
def entity_representative_mention(entity):
""" Get the most representative mention of the entity.
:param entity: The entity of which representative mention is fetched.
"""
for mention in entity:
if mention.get(MENTION) == PROPER_MENTION:
return mention
for mention in entity:
if mention.get(MENTION) == NOMINAL_MENTION:
return mention
for mention in entity:
if mention.get(MENTION) == PRONOUN_MENTION:
return mention
return entity[0]
def entity_property(self, entity, property_name):
""" Get a combined property of the values of all mentions of the entity
:param property_name: The name of the property to fetch.
:param entity: The entity of which property is fetched.
"""
combined_property = set(
(mention.get(property_name, UNKNOWN) for mention in entity))
if len(combined_property) > 1:
combined_property = combined_property.difference(
self.UNKNOWN_VALUES)
if len(combined_property) == 0:
combined_property.add(UNKNOWN)
return combined_property
@staticmethod
def entity_ne(entity):
""" Get a combined NE of the values of all mentions of the entity.
Other and no NER tags are cleared. If no NE tag is found None is
returned.
:param entity: The entity of which NE is fetched.
"""
combined_property = set(
(mention.get(NER, None) for mention in entity))
combined_property = list(filter(
lambda x: ner_tags.mention_ner(x), combined_property))
if len(combined_property) == 0:
return set()
return set(combined_property)
def narrative_you(self, mention):
"""The mention is second person(YOU) or the narrator(PER0) in an article.
:param mention: The mention to check.
"""
return \
mention[self.graph_builder.doc_type] == \
self.graph_builder.doc_article and\
mention.get(SPEAKER, False) == "PER0" and \
mention.get(PERSON) == SECOND_PERSON
@staticmethod
def is_pronoun(mention):
""" The mentions is a pronoun mention?
:param mention: The mention to check.
"""
return (mention.get(MENTION) == PRONOUN_MENTION) or pronouns.all(mention[FORM])
@staticmethod
def is_undefined(mention):
""" The mentions is an undefined mention?
:param mention: The mention to check.
"""
return mention[STARTED_BY_INDEFINITE_PRONOUN] or mention[STARTED_BY_INDEFINITE_ARTICLE]
@staticmethod
def is_location(mention):
""" The mentions is a location?
:param mention: The mention to check.
"""
return ner_tags.location(mention.get(NER))
def agree_attributes(self, entity, candidate_entity):
""" All attributes are compatible. Its mean the attributes of each are
a subset one of the another.
:param entity: Entity of the mention
:param candidate_entity: Entity of the candidate
:return: True or False
"""
candidate_gender = self.entity_property(candidate_entity, GENDER)
entity_gender = self.entity_property(entity, GENDER)
if not (self.UNKNOWN_VALUES.intersection(entity_gender) or
self.UNKNOWN_VALUES.intersection(candidate_gender)):
if candidate_gender.difference(entity_gender) \
and entity_gender.difference(candidate_gender):
self.logger.debug(
"Gender disagree %s %s",
entity_gender, candidate_gender)
return False
candidate_number = self.entity_property(candidate_entity, NUMBER)
entity_number = self.entity_property(entity, NUMBER)
if not(self.UNKNOWN_VALUES.intersection(entity_number) or
self.UNKNOWN_VALUES.intersection(candidate_number)):
if candidate_number.difference(entity_number) \
and entity_number.difference(candidate_number):
self.logger.debug(
"Number disagree %s %s",
entity_number, candidate_number)
return False
candidate_animacy = self.entity_property(candidate_entity, ANIMACY)
entity_animacy = self.entity_property(entity, ANIMACY)
if not(self.UNKNOWN_VALUES.intersection(entity_animacy) or
self.UNKNOWN_VALUES.intersection(candidate_animacy)):
if candidate_animacy.difference(entity_animacy) \
and entity_animacy.difference(candidate_animacy):
self.logger.debug(
"Animacy disagree %s %s",
entity_animacy, candidate_animacy)
return False
candidate_ner = self.entity_ne(candidate_entity)
entity_ner = self.entity_ne(entity)
if not(entity_ner is None or candidate_ner is None):
if candidate_ner.difference(entity_ner) and \
entity_ner.difference(candidate_ner):
self.logger.debug(
"NER disagree %s %s",
entity_ner, candidate_ner)
return False
return True
def subject_object(self, entity_a, entity_b):
""" Check if entities are linked by any subject-object relation.
:param entity_a: An entity to check
:param entity_b: An entity to check
:return: True or False
"""
if entity_a[0]["doc_type"] != "article":
return False
for mention_a in entity_a:
for mention_b in entity_b:
if self.graph_builder.sentence_distance(
mention_a, mention_b) > 0:
continue
if mention_a.get("subject", False) and \
mention_b.get("object", False) and \
mention_a["subject"] == mention_b["object"]:
return True
if mention_b.get("subject", False) and \
mention_a.get("object", False) and \
mention_b["subject"] == mention_a["object"]:
return True
pass
return False
def i_within_i(self, mention_a, mention_b):
""" Check if the mention and candidate are in a i-within-i
construction.
:param mention_a: a mention
:param mention_b: another mention
"""
if not self.graph_builder.same_sentence(mention_a, mention_b):
return False
# Aren't appositive
if mention_a.get(APPOSITIVE, False) and mention_b.get(APPOSITIVE, False):
return False
# Aren't Relative pronouns
if rules.is_relative_pronoun(self.graph_builder, mention_b, mention_a) or \
rules.is_relative_pronoun(self.graph_builder, mention_a, mention_b):
return False
# One is included in the other
if self.graph_builder.is_inside(mention_a[SPAN], mention_b[SPAN]) \
or self.graph_builder.is_inside(
mention_b[SPAN], mention_a[SPAN]):
return True
return False
def relaxed_form_word(self, mention):
""" Return the words of the mention without the words after the head
word.
:param mention: The mention where the words are extracted.
:return: a list of words.
"""
mention_words = self.graph_builder.get_words(mention)
mention_head = self.graph_builder.get_head_word(mention)
head = False
for index, word in enumerate(mention_words):
word_pos = word[POS]
if word[ID] == mention_head[ID]:
head = True
if head and pos_tags.relative_pronoun(word_pos):
return [word for word in mention_words[:index]]
# TODO CHANGE TO CLAUSE CONNECTORS
if head and word[FORM] == ",":
return [word for word in mention_words[:index]]
return [word for word in mention_words]
def relaxed_form(self, mention):
""" Return the form of the mention without the words after the head
word. The form is lowered and all words are space separated.
:param mention: The mention where the words are extracted.
:return: a string of word forms separated by spaces.
"""
return " ".join(word[FORM] for word in self.relaxed_form_word(mention=mention)).lower()
def same_speaker(self, mention_a, mention_b):
""" Check if mention refer to the same speaker.
:param mention_a: a mention
:param mention_b: another mention
:return type: Bool
"""
speaker_a = mention_a.get(SPEAKER, False)
speaker_b = mention_b.get(SPEAKER, False)
if not(speaker_a and speaker_b):
return False
if speaker_a == speaker_b:
return True
# Two speakers are the same string
if type(speaker_a) == str and\
type(speaker_b) == str and \
speaker_a == speaker_b:
return True
# Speaker A is B head word
if self._check_speaker(speaker_a, mention_b):
return True
# Speaker B is A head word
if self._check_speaker(speaker_b, mention_a):
return True
return False
def _check_speaker(self, speaker, mention):
""" Is the mention a form of the speaker.
:param speaker:
:param mention:
:return:
"""
# the speaker may be a string or another mention
if not (type(speaker) is str):
speaker = speaker[FORM]
mention_head_form = self.graph_builder.get_head_word(mention)[FORM]
if mention_head_form == speaker:
return True
for speaker_token in speaker.split():
if speaker_token == mention_head_form:
return True
return False
def are_speaker_speech(self, speaker, speech):
""" Tho mention are in a speaker speech relation?
:param speaker: The mention that is a speaker
:param speech: The mention that is inside a speech.
:return: True or False
"""
speech_speaker = speech.get(SPEAKER, False)
# TODO check this Only heads??
if type(speech_speaker) is dict:
speaker_words_ids = [
word[ID]
for word in self.graph_builder.get_words(speaker)]
return speech_speaker[ID] in speaker_words_ids
else:
speaker_head_word = rules.get_head_word_form(self.graph_builder, speaker)\
.lower()
for word in speech_speaker.split(" "):
if word.lower() == speaker_head_word:
return True
return False
def incompatible_discourse(self, entity_a, entity_b):
""" Check if two entities have any incompatible mentions between them.
:param entity_a: A entity
:param entity_b: Another entity
:return: Return True if the entities are incompatible.
"""
for mention_a in entity_a:
doc_type = entity_b[0][self.graph_builder.doc_type]
mention_a_person = mention_a.get(PERSON)
for mention_b in entity_b:
mention_b_person = mention_b.get(PERSON)
if (self.are_speaker_speech(
speaker=mention_a, speech=mention_b) or
self.are_speaker_speech(
speaker=mention_b, speech=mention_a)
) and not (
mention_a_person == FIRST_PERSON and
mention_b_person == FIRST_PERSON):
return True
if doc_type == self.graph_builder.doc_article:
continue
distance = abs(mention_a[UTTERANCE] - mention_b[UTTERANCE])
if distance == 1 and \
not self.same_speaker(mention_a, mention_b):
if mention_a_person != mention_b_person:
if mention_b_person == FIRST_PERSON:
return True
if mention_b_person == SECOND_PERSON:
return True
return False
def check_gold(self, mention, candidate):
""" Check if the link is in the gold Standard.
:param mention: The mention which link want to check.
:param candidate: The candidate of the link.
:return: True or False depends of the veracity
"""
clusters_m = set(m['gold_entity'] for m in self.graph_builder.get_gold_mention_by_span(mention[SPAN]))
clusters_c = set(c['gold_entity'] for c in self.graph_builder.get_gold_mention_by_span(candidate[SPAN]))
return bool(clusters_c and clusters_m and clusters_c.intersection(clusters_m))
def log_mention(self, mention):
""" The function that log the mention and all useful info for this sieve
coreference resolution
:param mention: The mention to show
"""
self.logger.debug("MENTION -%s- %s", mention[FORM], mention[SPAN])
def log_candidate(self, candidate):
""" The function that show the candidate of a link and all the relevant
info for the linking process.
:param candidate:
"""
self.logger.debug("CANDIDATE -%s- %s", candidate[FORM], candidate[SPAN])
def context(self, mention_entity, mention, candidate_entity, candidate):
""" Return a Human readable and sieve specific info string of the
mention, the candidate and the link for logging proposes.
:param mention_entity: The entity of the linked mention.
:param mention: The mention.
:param candidate_entity: The candidate entity
:param candidate: The candidate of the link
:return A ready to read string.
"""
return "{0} -{1}- | {2} -{3}-".format(
mention[FORM], self.graph_builder.get_root(mention)[FORM],
candidate[FORM], self.graph_builder.get_root(candidate)[FORM])
@staticmethod
def check_in_entity(mention, entity):
""" Check if the mention is part of the entity.
:param entity: entity where check.
:param mention: The mention to find.
:return True or False.
"""
return mention[ID] in [m[ID] for m in entity]
class PronounSieve(Sieve):
def pronoun_order(self, sentence_candidates, mention):
""" Reorder the candidates that are in the same sentence of the mention
for pronoun sieve coreference resolution.
:param sentence_candidates: The candidates for coreference that appears in the same sentence
of the main mention.
:param mention: The main mention whose coreference is been checking.
:return: The sentence candidates ordered for coreference pronoun resolution.
"""
reordered = []
reordered_ids = []
current = mention.get(CONSTITUENT, mention)
root_id = self.graph_builder.get_root(current)[ID]
while current[ID] != root_id:
current = self.graph_builder.get_syntactic_parent(current)
if constituent_tags.clause(current.get(TAG)):
for mention_a in sentence_candidates:
if mention_a[ID] not in reordered_ids and \
self.graph_builder.is_inside(mention_a[SPAN], current[SPAN], ) and \
mention_a[SPAN][0] < mention[SPAN][1]:
reordered_ids.append(mention_a[ID])
reordered.append(mention_a)
return reordered
def get_candidates(self, text_order, candidate_order, mention, index_sent):
""" Gets the candidates ordered in a for the sieve check.
:param text_order: The list of sentences that contain the list of mentions that form the text.
:param candidate_order: The list of sentences that contain the list of mentions that form the text in bts order.
:param mention: The mention whose candidates whe need.
:param index_sent: The index of the current sentence.
@rtype : list
:return: A list of ordered candidates.
"""
mention_index = [c[ID] for c in candidate_order[index_sent]].index(mention["id"])
if len(candidate_order[index_sent][mention_index]["entity"][1]) == 1 and self.is_pronoun(mention):
self.logger.debug("ORDERING: pronoun order")
sentence_candidates = self.pronoun_order(candidate_order[index_sent][:mention_index], mention)
other_candidates = [m for s in reversed(text_order[:index_sent]) for m in s]
if pronouns.relative(mention[FORM].lower()):
self.logger.debug("ORDERING: Relative pronoun order")
sentence_candidates.reverse()
return sentence_candidates + other_candidates
else:
return super(PronounSieve, self).get_candidates(text_order, candidate_order, mention, index_sent)
pass
```
#### File: multisieve/sieves/preciseConstruct.py
```python
from corefgraph.constants import SPAN, NER, ID, TAG, FORM
from corefgraph.multisieve.features.constants import APPOSITIVE, PROPER_MENTION, MENTION, \
PREDICATIVE_NOMINATIVE, GENDER, NEUTRAL, \
ANIMACY, INANIMATE, RELATIVE_PRONOUN, DEMONYM, LOCATION
from corefgraph.multisieve.sieves.base import Sieve
from corefgraph.resources.dictionaries import verbs
from corefgraph.resources.rules import rules
from corefgraph.resources.tagset import constituent_tags, ner_tags
__author__ = '<NAME> <<EMAIL>>'
class AppositiveConstruction(Sieve):
"""Two nominal mentions in an appositive construction are coreferent
"""
short_name = "ACC"
# Filter options
NO_PRONOUN_MENTION = False
NO_STOP_WORDS = False
USE_INCOMPATIBLES = False
IS_INSIDE = False
auto_load = False
def validate(self, mention, entity):
"""Entity must be in appositive construction.
:param mention: The mention to check.
:param entity: The entity of the mention.
"""
if not super(self.__class__, self).validate(mention, entity):
return False
# if mention[APPOSITIVE]:
# return True
return True
def are_coreferent(self, entity, mention, candidate_entity, candidate):
"""Candidate is The NP that cover the appositive construction.
:param mention: The selected mention to represent the entity.
:param entity: The entity that mention is part.
:param candidate: The candidate that may corefer the entity.
:param candidate_entity: The entity that candidate is part of it.
:return: True or false
"""
if not Sieve.are_coreferent(
self, entity, mention, candidate_entity, candidate):
return False
# mention = self.entity_representative_mention(entity)
# If candidate or mention are NE use their constituent as mentions
if PROPER_MENTION == mention[MENTION] == candidate[MENTION]:
self.logger.debug("LINK IGNORED are proper nouns")
self.meta["filtered_two_proper_mentions"] += 1
return False
if not self.agree_attributes(
entity=entity, candidate_entity=candidate_entity):
self.logger.debug("LINK IGNORED attributes disagree")
self.meta["filtered_attribute_disagree"] += 1
return False
if self.is_location(mention):
self.meta["filtered_location"] += 1
self.logger.debug("LINK IGNORED is a location: %s",
mention.get(NER, "NO NER"))
return False
# Check the apposition
if mention[APPOSITIVE] and mention[APPOSITIVE][SPAN] == candidate[SPAN]:
self.meta["linked_" + self.short_name] += 1
return True
if candidate[APPOSITIVE] and candidate[APPOSITIVE][SPAN] == mention[SPAN]:
self.meta["linked_" + self.short_name] += 1
return True
# if candidate.get("constituent", candidate) == mention[APPOSITIVE]:
# self.meta["linked_" + self.short_name] += 1
# mention[APPOSITIVE] = True
# return True
# if mention.get("constituent", mention) == candidate[APPOSITIVE]:
# self.meta["linked_" + self.short_name] += 1
# candidate[APPOSITIVE] = True
# return True
self.meta["ignored"] += 1
return False
class PredicativeNominativeConstruction(Sieve):
""" The mention and the candidate are in a subject-object copulative
relation."""
short_name = "PNC"
# Filter options
ONLY_FIRST_MENTION = False
USE_INCOMPATIBLES = False
auto_load = False
def validate(self, mention, entity):
"""Entity must be in a predicative-nominative construction.
:param mention: The mention to check.
:param entity: The entity of the mention"""
if not super(self.__class__, self).validate(mention, entity):
return False
if not mention[PREDICATIVE_NOMINATIVE]:
self.logger.debug("MENTION FILTERED Not predicative nominative")
return False
return True
def are_coreferent(self, entity, mention, candidate_entity, candidate):
""" Candidate is the subject of the predicative-nominative relation of
the mention.
:param mention: The selected mention to represent the entity.
:param entity: The entity that mention is part.
:param candidate: The candidate that may corefer the entity.
:param candidate_entity: The entity that candidate is part of it.
:return: True or false
"""
if not Sieve.are_coreferent(
self, entity, mention, candidate_entity, candidate):
return False
mention = self.entity_representative_mention(entity)
if (self.graph_builder.is_inside(mention[SPAN], candidate[SPAN]) or
self.graph_builder.is_inside(
candidate[SPAN], mention[SPAN])):
return False
if self.graph_builder.same_sentence(mention, candidate):
# S < (NP=m1 $.. (VP < ((/VB/ < /^(am|are|is|was|were|'m|'re|'s|be)$/) $.. NP=m2)))
# S < (NP=m1 $.. (VP < (VP < ((/VB/ < /^(be|been|being)$/) $.. NP=m2))))
mention_parent = self.graph_builder.get_syntactic_parent(mention)
mention_grandparent = self.graph_builder.get_syntactic_parent(
mention_parent)
if constituent_tags.verb_phrase(mention_parent[TAG]):
enclosing_verb_phrase = mention_parent
else:
self.logger.debug("LINK FILTERED No enclosing verb")
self.meta["filtered_no_enclosing_verb"] += 1
return False
if constituent_tags.verb_phrase(mention_grandparent[TAG]):
enclosing_verb_phrase = mention_grandparent
if not verbs.copulative(self.graph_builder.get_syntactic_sibling(
mention)[0]["form"]):
self.logger.debug("LINK FILTERED verb is not copulative")
self.meta["filtered_enclosing_verb_no_copulative"] += 1
return False
siblings = []
enclosing_verb_phrase_id = enclosing_verb_phrase[ID]
for sibling in self.graph_builder.get_syntactic_sibling(
enclosing_verb_phrase):
if sibling[ID] == enclosing_verb_phrase_id:
break
siblings.append(sibling)
siblings = [sibling[ID] for sibling in siblings]
# or siblings[X] == candidate?
if candidate[ID] in siblings:
self.meta["linked_" + self.short_name] += 1
mention[PREDICATIVE_NOMINATIVE] = candidate
return True
self.meta["ignored"] += 1
return False
class RoleAppositiveConstruction(Sieve):
""" Find role appositive relations withing the mentions.
"""
short_name = "RAC"
# Filter options
ONLY_FIRST_MENTION = False
USE_INCOMPATIBLES = False
auto_load = False
IS_INSIDE = False
def validate(self, mention, entity):
"""Entity must be in role appositive construction.
:param mention: The mention to check.
:param entity: the entity where the mention is """
if not super(self.__class__, self).validate(mention, entity):
return False
# constrain(a) The mention must be labeled as person
ner = mention.get(NER, None)
if not ner_tags.person(ner):
self.logger.debug("MENTION FILTERED Not a person -%s-", ner)
return False
return True
def are_coreferent(self, entity, mention, candidate_entity, candidate):
""" Candidate is the NP that the relative pronoun modified.
:param mention: The selected mention to represent the entity.
:param entity: The entity that mention is part.
:param candidate: The candidate that may corefer the entity.
:param candidate_entity: The entity that candidate is part of it.
:return: True or false
"""
if not Sieve.are_coreferent(
self, entity, mention, candidate_entity, candidate):
return False
# mention = self.entity_representative_mention(entity)
# (b) and (c) constrains The candidate must be animate
# and can not be neutral.
if not self.agree_attributes(entity, candidate_entity):
return False
if candidate[GENDER] == NEUTRAL:
self.logger.debug("LINK FILTERED Candidate is neutral")
return False
if candidate[ANIMACY] == INANIMATE:
self.logger.debug("LINK FILTERED Candidate is inanimate")
self.meta["filtered_inanimate"] += 1
return False
if rules.is_role_appositive(self.graph_builder, candidate, mention):
self.meta["linked_" + self.short_name] += 1
mention[APPOSITIVE] = candidate
return True
self.meta["ignored"] += 1
return False
class AcronymMatch(Sieve):
""" A demonym is coreferent to their location."""
# Filter options
short_name = "AMC"
ONLY_FIRST_MENTION = False
USE_INCOMPATIBLES = False
auto_load = False
def are_coreferent(self, entity, mention, candidate_entity, candidate):
""" Mention and candidate are one acronym of the other.
:param mention: The selected mention to represent the entity.
:param entity: The entity that mention is part.
:param candidate: The candidate that may corefer the entity.
:param candidate_entity: The entity that candidate is part of it.
:return: True or false
"""
# TODO limpiar acronimos
# TODO No tiene en cuenta los posibles plurales
if not Sieve.are_coreferent(
self, entity, mention, candidate_entity, candidate):
return False
for candidate in candidate_entity:
candidate_form = candidate[FORM]
if self.is_pronoun(candidate):
self.logger.debug(
"LINK FILTERED Candidate is a pronoun: %s", candidate_form)
self.meta["loop_filtered_pronoun"] += 1
continue
for mention in entity:
mention_form = mention[FORM]
if self.is_pronoun(mention):
self.logger.debug(
"Mention is a pronoun: %s next entity mention",
mention["form"])
continue
if len(candidate_form) > len(mention_form):
sort, large = mention_form, candidate_form
else:
sort, large = candidate_form, mention_form
if sort in large:
self.meta["loop_filtered_short_in_large"] += 1
continue
if not sort.isupper():
self.meta["loop_filtered_short_no_uppercase"] += 1
continue
# generated_acronyms = (filter(str.isupper, large),)
if sort == filter(str.isupper, large):
self.logger.debug("ACRONYM MATCH: %s ", sort)
self.meta["linked_" + self.short_name] += 1
return True
self.meta["ignored"] += 1
return False
class RelativePronoun(Sieve):
""" A relative pronoun is coreferent to the NP that modified."""
short_name = "RPC"
# Filter options
IS_INSIDE = False
ONLY_FIRST_MENTION = False
USE_INCOMPATIBLES = False
NO_PRONOUN_MENTION = False
auto_load = False
def validate(self, mention, entity):
"""Entity must be relative pronoun.
:param mention: The mention to check.
:param entity: The entity og the mention.
"""
if not super(self.__class__, self).validate(mention, entity):
return False
if not mention[RELATIVE_PRONOUN]:
self.logger.debug("MENTION FILTERED Not a relative pronoun")
return False
return True
def are_coreferent(self, entity, mention, candidate_entity, candidate):
""" Candidate is the NP that the relative pronoun modified.
:param mention: The selected mention to represent the entity.
:param entity: The entity that mention is part.
:param candidate: The candidate that may corefer the entity.
:param candidate_entity: The entity that candidate is part of it.
:return: True or false
"""
if not Sieve.are_coreferent(
self, entity, mention, candidate_entity, candidate):
return False
mention = self.entity_representative_mention(entity)
candidate_tag = candidate.get(TAG)
# TODO ESTO
if not constituent_tags.noun_phrase(candidate_tag):
self.logger.debug("LINK FILTERED Candidate is not a noun phrase")
self.meta["filtered_no_NP"] += 1
return False
if rules.is_relative_pronoun(self.graph_builder, candidate, mention):
self.meta["linked"] += 1
return True
self.meta["ignored"] += 1
return False
class DemonymMatch(Sieve):
""" A demonym is coreferent to their location."""
short_name = "DMC"
# Filter options
ONLY_FIRST_MENTION = False
USE_INCOMPATIBLES = False
auto_load = False
def are_coreferent(self, entity, mention, candidate_entity, candidate):
""" Mention and candidate are one demonym of the other.
:param mention: The selected mention to represent the entity.
:param entity: The entity that mention is part.
:param candidate: The candidate that may corefer the entity.
:param candidate_entity: The entity that candidate is part of it.
:return: True or false
"""
if not Sieve.are_coreferent(
self, entity, mention, candidate_entity, candidate):
return False
mention = self.entity_representative_mention(entity)
# TODO Change this
candidate_form = candidate[FORM].lower().replace("the ", "")
mention_form = mention[FORM].lower().replace("the ", "")
if mention_form in candidate.get(LOCATION, ()) or \
candidate_form in mention.get(DEMONYM, ()):
self.meta["linked_" + self.short_name] += 1
return True
self.meta["ignored"] += 1
return False
class PreciseConstructSieve(Sieve):
"""Two nominal mentions in an appositive construction are coreferent
"""
short_name = "PCM"
def __init__(self, meta_info):
super(self.__class__, self).__init__(meta_info)
self.sieves = (
AppositiveConstruction(meta_info),
PredicativeNominativeConstruction(meta_info),
AcronymMatch(meta_info),
RelativePronoun(meta_info),
DemonymMatch(meta_info),
RoleAppositiveConstruction(meta_info),
)
for sieve in self.sieves:
self.meta[sieve.short_name] = sieve.meta
def validate(self, mention, entity):
""" The validations is made in each sub sieve.
:param mention: The mention to check.
:param entity: The entity of the mention.
"""
return True
def are_coreferent(self, entity, mention, candidate_entity, candidate):
""" Check if the candidate and the entity are coreferent with
a sub-sieve pack.
:param mention: The selected mention to represent the entity.
:param entity: The entity that mention is part.
:param candidate: The candidate that may corefer the entity.
:param candidate_entity: The entity that candidate is part of it.
:return: True or false
"""
for sieve in self.sieves:
sieve.graph_builder = self.graph_builder
if sieve.validate(mention=mention, entity=entity):
if sieve.are_coreferent(
entity, mention, candidate_entity, candidate):
self.logger.debug("Match with -%s-", Sieve.short_name)
self.meta["linked"] += 1
return True
self.meta["ignored"] += 1
return False
```
#### File: multisieve/sieves/stringMatch.py
```python
from corefgraph.constants import FORM
from corefgraph.multisieve.sieves.base import Sieve
from corefgraph.resources.rules import rules
__author__ = '<NAME> <<EMAIL>>'
class ExactStringMatch(Sieve):
""" Two mentions are coreferent if their surfaces are equals."""
short_name = "ESM"
# Filter options
ONLY_FIRST_MENTION = False
NO_PRONOUN_MENTION = True
NO_PRONOUN_CANDIDATE = True
NO_STOP_WORDS = True
DISCOURSE_SALIENCE = False
def are_coreferent(self, entity, mention, candidate_entity, candidate):
""" Candidate an primary mention have the same form
:param mention: The selected mention to represent the entity.
:param entity: The entity that mention is part.
:param candidate: The candidate that may corefer the entity.
:param candidate_entity: The entity that candidate is part of it.
:return: True or false
"""
if not super(ExactStringMatch, self).are_coreferent(
entity, mention, candidate_entity, candidate):
return False
mention_form = self.get_form(mention)
candidate_form = self.get_form(candidate)
# Check empty results
if not candidate_form:
self.logger.debug("FILTERED LINK Empty candidate processed form")
self.meta["Filtered_mention_form_empty"] += 1
return False
if not mention_form:
self.logger.debug("FILTERED LINK Empty processed form")
self.meta["Filtered_candidate_form_empty"] += 1
return False
if mention_form == candidate_form:
self.logger.debug("Linked")
self.meta["linked"] += 1
return True
self.meta["ignored"] += 1
return False
def get_form(self, mention):
return rules.clean_string(mention[FORM])
def context(self, mention_entity, mention, candidate_entity, candidate):
""" Return a Human readable and sieve specific info string of the
mention, the candidate and the link for logging proposes.
:param mention_entity: The entity of the linked mention.
:param mention: The mention.
:param candidate_entity: The candidate entity
:param candidate: The candidate of the link
:return: A ready to read string.
"""
return "{0} -{1}- | {2} -{3}- ".format(
mention[FORM], self.graph_builder.get_root(mention)[FORM],
candidate[FORM], self.graph_builder.get_root(candidate)[FORM])
class RelaxedStringMatch(ExactStringMatch):
""" Two mentions are coreferent if their surfaces are similar."""
short_name = "RSM"
# Filter options
NO_ENUMERATION_MENTION = True
NO_ENUMERATION_CANDIDATE = True
NO_PRONOUN_MENTION = True
NO_PRONOUN_CANDIDATE = True
NO_STOP_WORDS = True
NO_APPOSITIVE_CANDIDATE = True
NO_APPOSITIVE_MENTION = True
def get_form(self, mention):
return rules.clean_string(self.relaxed_form(mention))
```
#### File: corefgraph/output/basewriter.py
```python
__author__ = '<NAME> <<EMAIL>>'
__date__ = '11/29/12'
from logging import getLogger
class BaseDocument:
""" The base for create a document writer.
"""
def __init__(self, filename="", stream=None, document_id=None):
self.logger = getLogger(__name__)
self.document_id = document_id
if stream:
self.file = stream
else:
self.file = open(filename, "w")
def store(self, *args, **kwargs):
"""Implement here the storing code in sub classes.
:param args: The arguments needed for store the graph.
:param kwargs: The arguments needed for store the graph with name.
"""
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.file.close()
```
#### File: corefgraph/output/nafwritter.py
```python
from corefgraph.constants import FORM, LEMMA, POS, ID
from corefgraph.output.basewriter import BaseDocument
from pynaf import NAFDocument
import os
import time
import datetime
__author__ = '<NAME> <<EMAIL>>'
class NafDocument(BaseDocument):
""" Store the document in a KAF format(2.1).
"""
short_name = "NAF"
time_format = "%Y-%m-%dT%H:%M:%SZ"
def store(self, graph_builder, encoding, language,
start_time,
end_time,
linguistic_parsers_name="corefgraph",
linguistic_parsers_version="1.0",
linguistic_parsers_layer="coreference",
hostname=os.uname()[1],
**kwargs):
""" Store the graph in a string and return it.
:param graph: the graph to be stored.
:param language: The language code inserted into the kaf file
:param encoding: Encoding set on kaf document
:param linguistic_parsers_name: The linguistic parser name added to kaf header.
:param linguistic_parsers_layer: The linguistic parser layer added to kaf header.
:param linguistic_parsers_version: The linguistic parser version added to kaf header.
:param start_time: Add a mocked start time.
:param end_time: Add a mocked end time.
:param hostname: Set a mocked hostname.
:param kwargs: Unused
"""
start_time = time.strftime(self.time_format, start_time)
end_time = time.strftime(self.time_format, end_time)
# Check if graph contains a pre generated kaf
try:
previous_kaf = graph_builder.get_original()
except KeyError:
previous_kaf = None
if previous_kaf:
kaf_document = previous_kaf
kaf_document.add_linguistic_processors(
layer=linguistic_parsers_layer,
name=linguistic_parsers_name,
version=linguistic_parsers_version,
begin_timestamp=start_time,
end_timestamp=end_time,
hostname=hostname)
for coref_index, entity in enumerate(graph_builder.get_all_coref_entities(), 1):
references = [
[word[ID].split("#")[0] for word in graph_builder.get_words(mention)]
for mention in graph_builder.get_all_entity_mentions(entity)]
kaf_document.add_coreference("co{0}".format(coref_index), references)
else:
kaf_document = NAFDocument(language=language)
words_graphs = graph_builder.get_word_graph()
kaf_document.add_linguistic_processors(
layer=linguistic_parsers_layer,
name=linguistic_parsers_name,
version=linguistic_parsers_version,
begin_timestamp=start_time,
end_timestamp=end_time,
hostname=hostname)
word_index = 1
terms_ids = dict()
for (term_index, graph_word) in enumerate(words_graphs.vertices(), 1):
kaf_words = graph_word[FORM].split(" ")
words_ids = []
for word in kaf_words:
word_id = "w{0}".format(word_index)
kaf_document.add_word(word, word_id, lemma=word[LEMMA])
words_ids.append(word_id)
word_index += 1
term_id = "t{0}".format(term_index)
terms_ids[graph_word] = term_id
kaf_document.add_term(tid=term_id, pos=graph_word[POS], words=words_ids)
for coref_index, entity in enumerate(graph_builder.get_all_coref_entities(), 1):
references = [([terms_ids[word]
for word in graph_builder.get_words(mention)], mention[FORM])
for mention in graph_builder.get_all_entity_mentions(entity)]
kaf_document.add_coreference("co{0}".format(coref_index), references)
kaf_document.write(self.file, encoding=encoding)
return kaf_document
```
#### File: corefgraph/output/textwritter.py
```python
from corefgraph.constants import NER, FORM, ID
from .basewriter import BaseDocument
__author__ = '<NAME> <<EMAIL>>'
class TextDocument(BaseDocument):
""" Store the results into a plain text evaluable by the Conll script
"""
def store(self, graph_builder):
""" Stores the graph content in Conll format into the object file.
:param graph_builder: The graph is going to be stored.
"""
if self.document_id:
if "#" in self.document_id:
document_id = self.document_id.split("#")[0]
part_id = self.document_id.split("#")[1]
else:
self.logger.warning("unknown Document ID part : using 000")
document_id = self.document_id
part_id = "000"
else:
self.logger.warning("unknown Document ID: using document 000")
document_id = "document"
part_id = "000"
self.annotate_ner(graph_builder, graph_builder.get_all_named_entities())
for coref_index, entity in enumerate(graph_builder.get_all_coref_entities(), 1):
self.annotate_mentions(
graph_builder, graph_builder.get_all_entity_mentions(entity), coref_index)
self.logger.debug(
coref_index, entity,
[x[ID] for x in graph_builder.get_all_entity_mentions(entity)])
self.file.write("#begin document ({0}); part {1}\n".format(document_id, part_id))
sentences_roots = graph_builder.get_all_sentences()
for sentence_index, root in enumerate(sentences_roots):
for word_index, word in enumerate(graph_builder.get_sentence_words(root)):
coref = list(word.get("coreference", []))
pre_mark = []
post_mark = []
for mark in coref:
if mark[0] == "[":
pre_mark += mark
if mark[-1] == "]":
post_mark += mark
self.file.write("".join(pre_mark))
self.file.write(word[FORM])
self.file.write("".join(post_mark))
self.file.write(" ")
self.file.write("\n")
self.file.write("\n#end document\n")
@staticmethod
def annotate_ner(graph_builder, ners):
for ner in ners:
# For each ner word assign ner value
# and mark start and end with '(' and ')'
words = graph_builder.get_words(ner)
ner = ner.get(NER, "O")
words[0][NER] = words[0].get(NER, "") + "("
for word in words:
word[NER] = word.get(NER, "") + ner
words[-1][NER] = words[-1].get(NER, "") + ")"
def annotate_mentions(self, graph_builder, mentions, cluster_index):
for mention in mentions:
# For each mention word assign the cluster id to cluster attribute
# and mark start and end with '(' and ')'
terms = graph_builder.get_words(mention)
# Valid for 0, 1 and n list sides
if terms:
if len(terms) == 1:
self._mark_coreference(terms[0], "[{0}".format(cluster_index))
self._mark_coreference(terms[0], "{0}]".format(cluster_index))
else:
self._mark_coreference(terms[0], "[{0}".format(cluster_index))
self._mark_coreference(terms[-1], "{0}]".format(cluster_index))
@staticmethod
def _mark_coreference(word, coreference_string):
""" Append to a word a coreference string
:param word: The word that forms part of a mention
:param coreference_string: The coreference string
"""
if "coreference" not in word:
word["coreference"] = [coreference_string]
else:
word["coreference"].append(coreference_string)
```
#### File: resources/files/utils.py
```python
import marshal
import logging
__author__ = '<NAME> <<EMAIL>>'
logger = logging.getLogger(__name__)
def load_file(file_name):
""" Load a file into a line list and remove the next line ending character.
:param file_name: The name of the file to load
:return: A list of file lines
"""
data_file = open(file_name, 'r')
data = [line[:-1] for line in data_file]
data_file.close()
return data
def split_gendername_file(filename):
""" Load a file of word marked by gender.
The file is a two column per file text file: The first column is the word
and the second is the gender separated by a tab.
:param filename: The name(path) of the file to load.
:return: return a female anf
"""
combined = open(filename, 'r')
male = []
female = []
for index, line in enumerate(combined):
try:
name, gender = line.replace('\n', '').split('\t')
if gender == "MALE":
male.append(name)
elif gender == "FEMALE":
female.append(name)
except Exception as ex:
logger.exception("ERROR in combine name file line: %s", index)
combined.close()
return female, male
def bergma_split(filename):
""" Load the bergsma file into a dict of tuples. Try to keep a marshaled
version of the file. If you changes the file remember to erase the
marshalled version.
"""
marshal_filename = filename + ".marshal"
try:
with open(marshal_filename, 'r') as data_file:
data = marshal.load(data_file)
return data
except IOError as ex:
logger.info("No marshal file")
logger.debug("Reason: %s", ex)
with open(filename, 'r') as data_file:
data = dict()
for index, line in enumerate(data_file):
try:
form, stats = line.split("\t")
data[form] = tuple([int(x) for x in stats.split()])
except Exception as ex:
pass
logger.debug("line(%s) sipped: %s", index, ex)
try:
with open(marshal_filename, 'w') as store_file:
marshal.dump(data, store_file, -1)
logger.warning("Created marshal file")
logger.debug("path: %s", marshal_filename)
except IOError as ex:
logger.warning("Marshal file not created %s", marshal_filename)
pass
return data
``` |
{
"source": "josue0ghost/modelacion-y-simulacion",
"score": 3
} |
#### File: modelacion-y-simulacion/MLB Simulation/main.py
```python
import pandas as pd
import pyodbc
import random
import datetime
import copy as cpy
class Action:
_key = ""
count = 0
valueA = 0
valueB = 0
def __init__(self, key, count):
self._key = key
self.count = count
@property
def key(self):
return self._key
@key.setter
def key(self, value):
self._key = value
class Range:
# Public Attributes
action = ""
# Private Attributes
__start = 0
__close = 0
def __init__(self, base, space, action):
self.action = action
self.__start = base
self.__close = base + space
def in_range(self, number):
return (self.__start <= number) and (number < self.__close)
def to_string(self):
return f"{self.action} | {self.__start} | {self.__close}"
class BatScenario:
name = ""
moves = 0
outs = 0
def __init__(self, name, moves, outs):
self.name = name
self.moves = moves
self.outs = outs
class TeamStats:
id = 0
teamid = ""
_singles = 0
_doubles = 0
_triples = 0
_home_runs = 0
_base_on_balls = 0
_hit_by_pitch = 0
_sacrifice = 0
_double_played = 0
_strike_out = 0
_fg_out = 0
_plates = 0
_leagueID = ""
_divID = ""
name = ""
def __init__(self, item):
self.id = item[0]
self.teamid = item[1]
self._singles = item[2]
self._doubles = item[3]
self._triples = item[4]
self._home_runs = item[5]
self._base_on_balls = item[6]
self._hit_by_pitch = item[7]
self._sacrifice = item[8]
self._double_played = item[9]
self._strike_out = item[10]
self._fg_out = item[11]
self._plates = item[12]
self._leagueID = item[13]
self._divID = item[14]
self.name = item[15]
@property
def leagueID(self):
return self._leagueID
@property
def divID(self):
return self._divID
@property
def base_on_balls(self):
return self._base_on_balls
@base_on_balls.setter
def base_on_balls(self, value):
self._base_on_balls = value
@property
def double_played(self):
return self._double_played
@double_played.setter
def double_played(self, value):
self._double_played = value
@property
def doubles(self):
return self._doubles
@doubles.setter
def doubles(self, value):
self._doubles = value
@property
def fg_outs(self):
return self._fg_out
@fg_outs.setter
def fg_outs(self,value):
self._fg_outs = value
@property
def hit_by_pitch(self):
return self._hit_by_pitch
@hit_by_pitch.setter
def hit_by_pitch(self,value):
self._hit_by_pitch = value
@property
def home_runs(self):
return self._home_runs
@home_runs.setter
def home_runs(self,value):
self._home_runs = value
@property
def sacrifice(self):
return self._sacrifice
@sacrifice.setter
def sacrifice(self, value):
self._sacrifice = value
@property
def singles(self):
return self._singles
@singles.setter
def singles(self,value):
self._singles = value
@property
def strike_out(self):
return self._strike_out
@strike_out.setter
def strike_out(self,value):
self._strike_out = value
@property
def triple(self):
return self._triples
@triple.setter
def triple(self,value):
self._triple = value
class Inning:
# Private Attributes
__bases = []
# Public Attributes
runs = 0
outs = 0
def __init__(self):
self.__bases = [False, False, False, False, False]
self.runs = 0
self.outs = 0
@property
def is_active(self):
return self.outs < 3
@property
def have_runners(self):
return self.__bases.count(True) > 0
'''
Copy an inning
'''
def copy(self):
copy = Inning()
copy.runs = self.runs
copy.outs = self.outs
copy.__bases = cpy.deepcopy(self.__bases)
return copy
'''
Moves n given bases in a inning
Parámeters:
number -- times bases are moving
Exceptions:
if number < 0
'''
def move(self, number=""):
if number == "":
copy = self.copy()
for i in range(4, 0, -1):
copy.__bases[i] = copy.__bases[i - 1]
copy.__bases[0] = False
if copy.__bases[4]:
copy.runs += 1
copy.__bases[4] = False
return copy
else:
v_copy = self.copy()
while number > 0:
v_copy = v_copy.move()
number += -1
return v_copy
'''
Makes n given outs in a inning
Parameters:
number -- amount of outs
'''
def out(self, number=""):
if number == "":
copy = self.copy()
copy.__bases[0] = False
copy.outs += 1
return copy
elif number == 1:
return self.out()
elif number == 2:
return self.double_play()
else:
return self.copy()
'''
Makes a run in a inning
If the team is winner, does add_plate() and move(4)
Then does out() 3 times
Parameters:
is_winner -- Boolean
'''
def one_run(self, is_winner=True):
ini = Inning()
if is_winner:
ini = ini.add_plate()
ini = ini.move(4)
# 3 outs
ini = ini.out()
ini = ini.out()
ini = ini.out()
return ini
'''
Simulates an out of the batting player
and a runner
'''
def double_play(self):
copy = self.out()
if copy.__bases[3] == True:
copy.__bases[3] = False
elif copy.__bases[2] == True:
copy.__bases[2] = False
else:
copy.__bases[1] = False
return copy
'''
Simulates adding a player to home
'''
def add_plate(self):
copy = self.copy()
copy.__bases[0] = True
return copy
class TeamData:
_Name = ""
_Counters = []
_rangeA = []
_rangeB = []
@property
def counters(self):
return {counter.name : counter for counter in self._Counters}
@counters.setter
def counters(self, value):
self._Counters = value
@property
def rangeA(self):
return list(self._rangeA)
@rangeA.setter
def rangeA(self, value):
self._rangeA = value
@property
def rangeB(self):
return list(self._rangeB)
@rangeB.setter
def rangeB(self, value):
self._rangeB = value
'''
Collect the probability data of the two teams
participating in a match
Parameters:
teamStats -- statistics of a team
'''
def __init__(self, teamStats = TeamStats):
self.Name = teamStats.teamid
self.Counters = [
Action("base_on_balls", teamStats.base_on_balls),
Action("double_played", teamStats.double_played),
Action("doubles", teamStats.doubles),
Action("fg_outs", teamStats.fg_outs),
Action("hit_by_pitch", teamStats.hit_by_pitch),
Action("home_runs", teamStats.home_runs),
Action("sacrifice", teamStats.sacrifice),
Action("singles", teamStats.singles),
Action("strike_out", teamStats.strike_out),
Action("triple", teamStats.triple)
]
full_div = lambda num,dem: float(num)/float(dem)
ts_plates = teamStats._plates
res = teamStats._plates - teamStats._sacrifice - teamStats._double_played
for item in self.Counters:
item.valueA = full_div(item.count, ts_plates)
if item.key == "sacrifice" or item.key == "double_played":
item.valueB = 0
else:
item.valueB = full_div(item.count, res)
list_team_a = [x for x in self.Counters]
list_team_a.sort(key=lambda x: x.valueA, reverse=True)
self._rangeA = []
counter = 0
for item in list_team_a:
Range_ = Range(counter, item.valueA, item.key)
self._rangeA.append(Range_)
counter+=item.valueA
list_team_b = [i for i in self.Counters if i.key != "sacrifice" and i.key != "double_played"]
list_team_b.sort(key=lambda x: x.valueB, reverse=True)
counter = 0
self._rangeB = []
for item in list_team_b:
Range_ = Range(counter, item.valueB, item.key)
self._rangeB.append(Range_)
counter+=item.valueB
class Game:
_team_a = TeamData
_team_b = TeamData
_result_a = []
_result_b = []
_simulations = []
'''
Collect the possible outcomes when a batter
goes to a plate
'''
def get_bat_scenarios(self):
item = [0,"",0,0,0,0,0,0,0,0,0,0,0,"","",""]
v_team_stats = TeamStats(item)
l_scenarios = [
BatScenario("singles", 1, 0),
BatScenario("doubles", 2, 0),
BatScenario("triple", 3, 0),
BatScenario("home_runs", 4, 0),
BatScenario("base_on_balls", 1, 0),
BatScenario("hit_by_pitch", 1, 0),
BatScenario("sacrifice", 1, 1),
BatScenario("strike_out", 0, 1),
BatScenario("double_played", 0, 2),
BatScenario("fg_outs", 0, 1),
]
return {scenarios.name : scenarios for scenarios in l_scenarios}
@property
def TeamA(self):
return self._team_a
@TeamA.setter
def TeamA(self, teamA):
self._team_a = teamA
@property
def TeamB(self):
return self._team_b
@TeamB.setter
def TeamB(self, teamB):
self._team_b = teamB
@property
def ResultA(self):
return self._result_a
@ResultA.setter
def ResultA(self, value):
self._result_a = value
@property
def ResultB(self):
return self._result_b
@ResultB.setter
def ResultB(self, value):
self._result_b = value
@property
def Simulations(self):
return list(self._simulations)
@Simulations.setter
def Simulations(self, value):
self._simulations = value
@property
def RunsA(self):
ra_list = [i.runs for i in self._result_a]
return sum(ra_list)
@property
def RunsB(self):
rb_list = [i.runs for i in self._result_b]
return sum(rb_list)
'''
Simulates an inning of a team
Parameterse:
data -- Probabilities of a team -- type: TeamData
'''
def playInning(self, data = TeamData):
data_range_a = data.rangeA
data_range_b = data.rangeB
scenarios = self.get_bat_scenarios()
random.seed(datetime.datetime.now().microsecond)
inning_ = Inning()
while inning_.is_active:
rnd = random.random()
action = ""
if inning_.have_runners:
element = next(x for x in data_range_a if x.in_range(rnd))
action = element.action
else:
element = next(x for x in data_range_b if x.in_range(rnd))
action = element.action
scenario = scenarios[action]
inning_ = inning_.add_plate()
inning_ = inning_.out(scenario.outs)
if(inning_.is_active):
inning_ = inning_.move(scenario.moves)
while inning_.is_active:
inning_ = inning_.out(1)
return inning_
'''
Simulates a game between two teams
Parameters:
team_a_data -- Probabilities of team A -- type: TeamData
team_b_data -- Probabilities of team B -- type: TeamData
'''
def __init__(self, team_a_data = TeamData, team_b_data = TeamData):
self.team_a = team_a_data
self.team_b = team_b_data
self.ResultA = []
self.ResultB = []
while len(self.ResultA) < 9: # and len(self.ResultB) < 9: (len(self.ResultB) will always be the same as A's)
self.ResultA.append(self.playInning(self.team_a))
self.ResultB.append(self.playInning(self.team_b))
extraInning = 11
while self.RunsA == self.RunsB and extraInning > 0:
self.ResultA.append(self.playInning(self.team_a))
self.ResultB.append(self.playInning(self.team_b))
extraInning += -1
if self.RunsA == self.RunsB:
random.seed(datetime.datetime.now().microsecond)
rnd = random.random()
if rnd < 0.5:
self.ResultA.append(Inning.one_run(True))
self.ResultB.append(Inning.one_run(False))
else:
self.ResultA.append(Inning.one_run(False))
self.ResultB.append(Inning.one_run(True))
class Journie:
games = []
results = {}
'''
Simulates a Journie collecting the results
Parameters:
data -- list of teams combinations for matches
'''
def __init__(self, data):
self.games = []
self.results = {}
self.games = [Game(item[0], item[1]) for item in data]
for item in self.games:
if item.RunsA > item.RunsB:
self.results[item.team_a.Name] = True
self.results[item.team_b.Name] = False
else:
self.results[item.team_a.Name] = False
self.results[item.team_b.Name] = True
class Season:
# Private attributes
__results = {}
__journies = []
@property
def results(self):
return self.__results
@property
def journies(self):
return self.__journies
'''
Simulates a Season
consisting in 162 games (3 journies * 54)
Parameters:
teams -- list of TeamData of all the teams
'''
def __init__(self, teams = []):
for i in range(54):
serie = self.combination(teams)
self.__journies += [Journie(serie) for j in range(3)]
self.__results = {item.Name:[0,0] for item in teams}
journies_list = [type('', (object,), {'res':j.results})() for j in self.journies]
for dicc in journies_list:
results = dicc.res.items()
for key,value in results:
res = self.__results[key]
if value:
self.__results[key][0] = res[0] + 1
else:
self.__results[key][1] = res[1] + 1
'''
Returns posible combinations of teams for matches
Parameters:
teams -- list of TeamData of all the teams
'''
def combination(self, teams=[]):
combs = []
copy = cpy.deepcopy(teams)
random.seed(datetime.datetime.now().microsecond)
while len(copy) > 0:
index1 = 0
index2 = 0
while index1 == index2:
index1 = random.randint(0, len(copy)-1)
index2 = random.randint(0, len(copy)-1)
team_data = [
copy[index1],
copy[index2]
]
combs.append(team_data)
if index1 > index2:
copy.pop(index1)
copy.pop(index2)
else:
copy.pop(index2)
copy.pop(index1)
return combs
class Engine:
# Private attributes
__seasons = []
# Public attributes
seasons = []
teams = {}
'''
Runs n given Season simulations
Parameters:
simulations -- number of Season to simulate
'''
def __init__(self, simulations):
conn = pyodbc.connect(
'Driver={SQL Server};'
'Server=url-2021.database.windows.net;'
'Database=mys_url;'
'UID=url_2021;'
'PWD=<PASSWORD>;'
)
SQL = "SELECT * FROM teamStats"
df = pd.read_sql_query(SQL, conn)
stats = df.values.tolist()
data = []
_stats = []
for item in stats:
st = TeamStats(item)
data.append(TeamData(st))
_stats.append(st)
self.teams = {x.teamid: x for x in _stats}
for index in range(simulations):
print(f"Simulating season #{index+1}... please, wait")
self.__seasons.append(Season(data))
self.seasons = cpy.deepcopy(self.__seasons)
# Post Season Tagging
class SimulationsTeamResults:
_sims_team_res_id = 0
_iteracion = 0
_team_id = ""
_team_name = ""
_league = ""
_division = ""
_wins = 0
_losses = 0
league_rank = 0
division_rank = 0
is_in_post_season = False
def to_row(self):
row = []
row.append(self._sims_team_res_id)
row.append(self._iteracion)
row.append(self._team_id)
row.append(self._team_name)
row.append(self._league)
row.append(self._division)
row.append(self._wins)
row.append(self._losses)
row.append(self.league_rank)
row.append(self.division_rank)
row.append(self.is_in_post_season)
return row
def __init__(self, item):
self._sims_team_res_id = item[0]
self._iteracion = item[1]
self._team_id = item[2]
self._team_name = item[3]
self._league = item[4]
self._division = item[5]
self._wins = item[6]
self._losses = item[7]
self.is_in_post_season = False
@property
def strID(self):
return self._sims_team_res_id
@property
def iteration(self):
return self._iteracion
@property
def teamID(self):
return self._team_id
@property
def teamName(self):
return self._team_name
@property
def league(self):
return self._league
@property
def division(self):
return self._division
@property
def wins(self):
return self._wins
@property
def losses(self):
return self._losses
# MAIN EXCECUTION
eng = Engine(10)
season = 1
row = 0
results = []
for item in eng.seasons:
res = item.results.items()
for key,value in res:
t = eng.teams[key]
results.append([row, season, key, t.name, t.leagueID, t.divID, value[0], value[1]])
row += 1
season += 1
print("======================================================")
dfr = pd.DataFrame(results, columns=['Row', 'Season', 'TEAM_ID', 'TEAM_NAME', 'LEAGUE', 'DIVISION', 'WINS', 'LOSSES'])
print(dfr)
print("======================================================")
print("Simulation finished")
sims_team_result_list = []
dfr.sort_values(by='Row', ascending=True, inplace=True)
for line in dfr.values.tolist():
sims_team_result_list.append(SimulationsTeamResults(line))
# functions to assign positions
def league_position(sorted_data_frame):
league_pos = 1
sorted_df_list = sorted_data_frame.values.tolist()
for row in sorted_df_list:
row_num = row[0]
sims_team_result_list[row_num].league_rank = league_pos
league_pos += 1
def division_position(sorted_data_frame):
division_pos = 1
sorted_df_list = sorted_data_frame.values.tolist()
for row in sorted_df_list:
row_num = row[0]
sims_team_result_list[row_num].division_rank = division_pos
division_pos += 1
# assign positons
iterations = int(len(sims_team_result_list) / 30)
for i in range(iterations):
season = i + 1
national_league_df = dfr.query('Season == ' + str(season) + ' and LEAGUE == "NL"', inplace = False)
american_league_df = dfr.query('Season == ' + str(season) + ' and LEAGUE == "AL"', inplace = False)
# sort for position in league
national_league_df.sort_values(by=['WINS'], ascending=False, inplace=True)
league_position(national_league_df)
american_league_df.sort_values(by=['WINS'], ascending=False, inplace=True)
league_position(american_league_df)
# sort for positions in Division
for division in ["W","C","E"]:
# NL
qd = 'DIVISION == "' + division + '"'
national_league_division_df = national_league_df.query(qd, inplace=False)
national_league_division_df.sort_values(by=['WINS'], ascending=False, inplace=True)
division_position(national_league_division_df)
# AL
american_league_division_df = american_league_df.query(qd, inplace=False)
american_league_division_df.sort_values(by=['WINS'], ascending=False, inplace=True)
division_position(american_league_division_df)
# rules for Post Season
for item in sims_team_result_list:
if(item.division_rank == 1):
item.is_in_post_season = True
for i in range(iterations):
season = i + 1
def extra_in_league(l):
def candidates_filter(str_obj=SimulationsTeamResults):
val = True
val = val & (str_obj.iteration == season)
val = val & (str_obj.division_rank != 1)
val = val & (str_obj.league == l)
return val
candidates_list = list(filter(candidates_filter, sims_team_result_list))
candidates_list.sort(key=lambda x: x.league_rank)
teamid_A = candidates_list[0].strID
teamid_B = candidates_list[1].strID
sims_team_result_list[teamid_A].is_in_post_season = True
sims_team_result_list[teamid_B].is_in_post_season = True
extra_in_league("NL")
extra_in_league("AL")
positions_results = [item.to_row() for item in sims_team_result_list]
print("======================================================")
cols=['Row', 'Season', 'TEAM_ID', 'TEAM_NAME', 'LEAGUE', 'DIVISION', 'WINS', 'LOSSES','LEAGUE_RANK','DIV_RANK','POSTSEASON']
final_results_df = pd.DataFrame(positions_results, columns=cols)
print(final_results_df)
print("======================================================")
print("Post Season Tagging Finished")
``` |
{
"source": "josue0ghost/Python-and-MySQL-console-application",
"score": 4
} |
#### File: src/users/actions.py
```python
import users.user as user
import grades.actions as grade
class Actions:
def signup(self):
print("Selected item: signup")
name = input("Your name: ")
lastname = input("Your last name: ")
email = input("Your email: ")
password = input("Choose a password: ")
newUser = user.User(name, lastname, email, password)
reg = newUser.register()
if reg[0] >= 1:
print(f"{reg[1].name}, you've been registered with email {reg[1].email}")
else:
print("Registration failed")
def signin(self):
try:
email = input("Email: ")
password = input("Password: ")
existingUser = user.User('', '', email, password)
login = existingUser.identify()
# id | name | lastname | email | password | date
if email == login[3]:
print(f"Welcome, {login[1]}")
self.mainMenu(login)
except Exception as e:
print(type(e))
print(type(e).__name__)
print("Login failed")
def mainMenu(self, user):
print("""
Available options:
- Create grade (create)
- Show grades (show)
- Delete grade (delete)
- Log out (exit)
""")
action = input("What do you want to do?: ")
gradeActions = grade.Actions()
if action == "create":
gradeActions.create(user)
self.mainMenu(user)
elif action == "show":
gradeActions.show(user)
self.mainMenu(user)
elif action == "delete":
gradeActions.delete(user)
self.mainMenu(user)
elif action == "exit":
exit()
``` |
{
"source": "josue1471515/backend-api-python",
"score": 3
} |
#### File: josue1471515/backend-api-python/sqlConnection.py
```python
from flask_api import FlaskAPI, status, exceptions
from flask_pymongo import PyMongo
from bson import json_util
app = FlaskAPI(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/bear"
mongo = PyMongo(app)
@app.route("/")
def getUsersModel():
online_users = mongo.db.users.find({"userState":True})
return json_util.dumps(online_users,default=json_util.default),status.HTTP_200_OK
``` |
{
"source": "Josue87/wp_hunter",
"score": 2
} |
#### File: modules/php/test.py
```python
from modules._template import Template
class Module(Template):
def __init__(self):
pattern_list = [r"regex1", r"regex2"] # All the expressions you want
super(Module, self).__init__(pattern_list, "test_php")
```
#### File: Josue87/wp_hunter/wp_hunter.py
```python
import requests
from bs4 import BeautifulSoup
from time import sleep
import wget
from zipfile import ZipFile
from subprocess import Popen, PIPE
import importlib
import os
class Module:
def __init__(self):
self.modules_php = []
self.modules_js = []
self.load_modules()
def _load_module(self, pwd):
my_path = pwd.replace("/", ".")
my_path = "modules." + my_path
module = importlib.import_module(my_path)
return module.Module()
def load_modules(self):
# Save compiled modules in a list (avoid loading them several times)
for (p, _, files) in os.walk("./modules/php"):
self.modules_php.extend([self._get_module(f, p) for f in files
if ("_" not in f) and ("_" not in p) and (not f.endswith(".pyc"))])
while None in self.modules_php:
self.modules_php.remove(None)
for (p, _, files) in os.walk("./modules/js"):
self.modules_js.extend([self._get_module(f, p) for f in files
if ("_" not in f) and ("_" not in p) and (not f.endswith(".pyc"))])
while None in self.modules_js:
self.modules_js.remove(None)
def _get_module(self, module_file, pwd):
# Load the module and return it
try:
data = self._load_module(os.path.join(pwd.replace("./modules/", ""), module_file.replace(".py", "")))
except Exception as e:
print(e)
data = None
return data
class Analyze:
def __init__(self):
module_generate = Module()
self.modules_php = module_generate.modules_php
self.modules_js = module_generate.modules_js
def remove_empty_result(self, files):
while "" in files:
files.remove("")
return files
def get_files(self, directory, ext):
result = Popen(["find", directory, "-name", ext], stdout=PIPE, stderr=PIPE)
return self.remove_empty_result(result.stdout.read().decode(errors="ignore").split("\n"))
def get_download_url(self, plugin):
req = f"https://api.wordpress.org/plugins/info/1.0/{plugin}.json"
response = requests.get(req)
try:
data = response.json()
# last_updated = data["last_updated"]
# Check Update to discard old plugins (if we check early)
versions = data["versions"]
last = list(versions.popitem())
if last[0] == "trunk":
last = list(versions.popitem())
return last[1].replace("\\","")
except:
return None
def process_plugin(self, url):
plugin_name = wget.download(url)
print("")
with ZipFile(plugin_name, 'r') as zipObj:
zipObj.extractall('./plugins')
# Remove zip
os.system(f"rm {plugin_name}")
folder_to_analyze = "plugins/" + plugin_name.replace(".zip", "")
folder_to_analyze = "./" + folder_to_analyze.split(".")[0]
php_files = self.get_files(folder_to_analyze, "*.php")
self.process_files(php_files, self.modules_php)
js_files = self.get_files(folder_to_analyze, "*.js")
js_files.extend(self.get_files(folder_to_analyze, "*.html"))
self.process_files(js_files, self.modules_js)
# Remove final .version
os.system(f'rm -rf {folder_to_analyze}')
def process_files(self, files, modules):
for f in files:
print(f)
with open(f) as open_file:
code = open_file.read()
for module in modules:
data = module.check_code(f, code)
if data:
self.write_results(f, data)
def write_results(self, f, vulnerabilities):
print(f"[+] vulnerabilities found in {f}")
for vuln in vulnerabilities:
name = "./results/vulnerabilities.txt"
with open(name, "a+") as dump_result:
dump_result.write(f + "\n")
dump_result.write("-"*len(f) + "\n")
try:
for k, v in vuln.items():
dump_result.write(f"{k}: {v}\n")
dump_result.write("\n")
except:
pass
def start_analysis(self):
header = {"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36"}
response = requests.get("http://plugins.svn.wordpress.org/", headers=header)
code = response.status_code
if code == 200:
plugins = response.text
soup = BeautifulSoup(plugins, 'html.parser')
refs = soup.find_all('a')
print(f"[*] Total plugins {len(refs)}")
print("[*] Starting the analisys")
for a in refs:
plugin = a.get("href").strip("/")
try:
download_url = self.get_download_url(plugin)
if download_url:
## Download
print(download_url)
self.process_plugin(download_url)
except Exception as e:
print(e)
sleep(2)
else:
print("[-] Response code: " + str(code))
if __name__ == "__main__":
print("""
__ ____________ ___ ___ __
/ \ / \______ \/ | \ __ __ _____/ |_ ___________
\ \/\/ /| ___/ ~ \ | \/ \ __\/ __ \_ __ \\
\ / | | \ Y / | / | \ | \ ___/| | \/
\__/\ / |____|____\___|_ /|____/|___| /__| \___ >__|
\/ /_____/ \/ \/ \/
|__ Author: @JosueEncinar
""")
print("[*] Starting the process")
try:
Analyze().start_analysis()
except KeyboardInterrupt:
print("[*] CTRL^C - Bye")
except Exception as e:
print("[-] Something was wrong")
print(e)
``` |
{
"source": "josuearaujo/multi_robot_sim",
"score": 2
} |
#### File: multi_robot_sim/src/client.py
```python
import rospy
import random
from std_msgs.msg import String
import time
def generate_list():
lista = []
coordinatesStream = "20,13.5,18,13.5,16,13.5,14,13.5,19,18,-8.1,18.2,-9.9,18.2,-11.7,18.2,-8.1,13.7,-9.9,3.7,-11.7,13.7,-8.1,9.2,-9.9,9.2,-11.7,9.2,-15.3,18.2,-17.1,18.2,-18.9,18.2,-20.7,18.2,-15.3,13.7,-17.1,13.7,-18.9,13.7,-20.7,13.7,-15.3,9.2,-17.1,9.2,-18.9,9.2,-20.7,9.2,-6.9,14.7,-8.7,14.7,-10.5,14.7,-6.9,10.2,-8.7,10.2,-10.5,10.2,-6.9,5.7,-8.7,5.7,-10.5,5.7,-14.1,14.7,-15.9,14.7,-17.7,14.7,-19.5,14.7,-14.1,10.2,-15.9,10.2,-17.7,10.2,-19.5,10.2,-14.1,5.7,-15.9,5.7,-17.7,5.7,-19.5,5.7,3,14.3,-3,12.6,3,8.2,-3,6.5,10,21,5,21,-11.5,21,-16,21,-20.5,21,-24.5,-18.8,-25,3,-19,3,-13,3".split(',')
for i in xrange(0, len(coordinatesStream), 2):
coord = coordinatesStream[i] + ',' + coordinatesStream[i+1]
lista.append(coord)
print(lista)
return lista
# If the python node is executed as main process (sourced directly)
if __name__ == '__main__':
lista = generate_list()
try:
rospy.init_node('coffee_client', anonymous=True)
print("Iniciou o no coffee_client")
pub = rospy.Publisher('coffe_request_channel', String, queue_size=10)
time.sleep(2)
raw_input("Aperte ENTER para enviar um cafe!")
while not rospy.is_shutdown():
# msg = str(random.randint(-5, 2)) + ',' + str(random.randint(-5, 2))
msg = random.choice(lista)
print('Msg: ' + msg)
pub.publish(msg)
raw_input("Aperte ENTER para enviar outro cafe!")
except rospy.ROSInterruptException:
rospy.loginfo("Test finished.")
```
#### File: multi_robot_sim/src/productor_consumer.py
```python
import threading
import time
import logging
import random
import Queue
import rospy
from std_msgs.msg import String
import actionlib
from move_base_msgs.msg import MoveBaseAction
from multi_robot_sim.msg import RobotDeliveryAction, RobotDeliveryGoal, RobotDeliveryResult
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-9s) %(message)s',)
BUF_SIZE = 100
coffe_requests = Queue.Queue(BUF_SIZE)
robots_status = [0,0] # 0 -> Free 1 -> Busy
class ProducerThread(threading.Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, verbose=None):
super(ProducerThread,self).__init__()
self.target = target
self.name = name
rospy.init_node('producer_listener', anonymous=True)
rospy.Subscriber("coffe_request_channel", String, self.addToQueue)
def addToQueue(self, data):
rospy.loginfo(data.data + ' -- size: ' + str(coffe_requests.qsize()))
coffe_requests.put(data.data)
class ConsumerThread(threading.Thread):
def __init__(self, name, r):
super(ConsumerThread,self).__init__()
self.name = name
self.robots = r
def run(self):
while True:
if not coffe_requests.empty():
# item = coffe_requests.get()
# logging.debug('Getting ' + str(item) + ' : ' + str(coffe_requests.qsize()) + ' items in queue')
for i in range(len(robots_status)):
if(robots_status[i] == 0):
pos = coffe_requests.get().split(',')
pos_x = float(pos[0])
pos_y = float(pos[1])
self.robots[i].setGoal(pos_x, pos_y)
return
class RobotController(threading.Thread):
receive_goal = False
robotDeliveryGoal = RobotDeliveryGoal()
def __init__(self, robot_server_name, robot_number):
super(RobotController,self).__init__()
self.robot_server_name = robot_server_name
self.robot = actionlib.SimpleActionClient(robot_server_name, RobotDeliveryAction)
self.robot_number = robot_number
def run(self):
while(True):
if self.receive_goal:
self.receive_goal = False
self.robot.wait_for_server()
print(self.robot_server_name + " Goal foi enviado! (" + str(self.robotDeliveryGoal.x) + "," + str(self.robotDeliveryGoal.y) + ")")
self.robot.send_goal(self.robotDeliveryGoal)
self.robot.wait_for_result()
result = self.robot.get_result()
print(self.robot_server_name + " Resultado recebido!!")
robots_status[self.robot_number-1] = 0
else:
time.sleep(0.5)
return
def setGoal(self, x, y):
robots_status[self.robot_number-1] = 1
self.robotDeliveryGoal.x = x
self.robotDeliveryGoal.y = y
self.receive_goal = True
print(self.robot_server_name + " Goal foi setado!")
return
# def robots_status_control(data):
# robot_number = int(data.data.split(':')[0][len(data.data.split(':')[0])-1])
# robots_status[robot_number-1] = 0;
if __name__ == '__main__':
r = []
for i in range(len(robots_status)):
robot_server_name = '/robot' + str(i+1) +'/robot_delivery'
r.append(RobotController(robot_server_name, i+1))
r[i].start()
p = ProducerThread(name='producer')
p.start()
c = ConsumerThread('consumer', r)
c.start()
# rospy.Subscriber("robots_status_topic", String, robots_status_control)
rospy.spin()
```
#### File: multi_robot_sim/src/states_robot1.py
```python
import rospy
import smach
import smach_ros
import actionlib
from actionlib_msgs.msg import GoalStatus
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
import random
import time
from std_msgs.msg import String
from multi_robot_sim.msg import RobotDeliveryAction, RobotDeliveryGoal, RobotDeliveryResult
# define state Cozinha
class Cozinha(smach.State):
request = False
new_goal = MoveBaseGoal()
def __init__(self):
smach.State.__init__(self, outcomes=['entregarPedido'],
output_keys=['goal'])
def execute(self, userdata):
rospy.loginfo('Executing state Cozinha')
while(True):
if(self.request):
self.request = False
userdata.goal = self.new_goal
return 'entregarPedido'
else:
time.sleep(0.5)
else:
print('cheguei aqui')
def setGoal(self, pos_x, pos_y):
self.request = True
self.new_goal.target_pose.header.frame_id = "map"
self.new_goal.target_pose.header.stamp = rospy.Time.now()
self.new_goal.target_pose.pose.position.x = pos_x
self.new_goal.target_pose.pose.position.y = pos_y
self.new_goal.target_pose.pose.orientation.w = 1
def getPos(self, data):
pos = data.data.split(',')
self.destino_x = int(pos[0])
self.destino_y = int(pos[1])
self.request = True
# define state RealizandoEntrega
class RealizandoEntrega(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['chegouDestino','retornarCozinha'],
input_keys=['goal'])
self.move_base = actionlib.SimpleActionClient('robot1/move_base', MoveBaseAction)
def execute(self, userdata):
rospy.loginfo('Executing state RealizandoEntrega')
self.move_base.wait_for_server()
self.move_base.send_goal(userdata.goal)
self.move_base.wait_for_result()
state = self.move_base.get_state()
if state == GoalStatus.SUCCEEDED:
return 'chegouDestino'
else:
return 'retornarCozinha'
# define state Destino
class Destino(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['retornarCozinha'])
def execute(self, userdata):
rospy.loginfo('Executing state Destino')
time.sleep(5)
return 'retornarCozinha'
# define state RetornandoCozinha
class RetornandoCozinha(smach.State):
def __init__(self, deliveryRobot):
smach.State.__init__(self, outcomes=['chegouCozinha', 'retornarCozinha', 'final'])
self.move_base = actionlib.SimpleActionClient('robot1/move_base', MoveBaseAction)
self.goal = MoveBaseGoal()
self.goal.target_pose.header.frame_id = "map"
self.goal.target_pose.header.stamp = rospy.Time.now()
self.goal.target_pose.pose.position.x = 0
self.goal.target_pose.pose.position.y = 21
self.goal.target_pose.pose.orientation.w = 1
self.deliveryRobot = deliveryRobot
def execute(self, userdata):
rospy.loginfo('Executing state RetornandoCozinha')
self.move_base.wait_for_server()
self.move_base.send_goal(self.goal)
self.move_base.wait_for_result()
state = self.move_base.get_state()
msg = "Finalizado com status: " + str(state)
if state == GoalStatus.SUCCEEDED:
result = state
self.deliveryRobot.setResult(result)
return 'chegouCozinha'
else:
return 'retornarCozinha'
return 'final'
class DeliveryRobot():
waiting_result = True
robotDeliveryResult = RobotDeliveryResult()
def __init__(self, stateCozinha):
self._as = actionlib.SimpleActionServer("robot1/robot_delivery", RobotDeliveryAction, execute_cb=self.send_coffe, auto_start = False)
self._as.start()
self.stateCozinha = stateCozinha
self.robotDeliveryResult.robot = 1 #robot1
def send_coffe(self, goal):
self.stateCozinha.setGoal(goal.x,goal.y)
while(True):
if not self.waiting_result:
self.waiting_result = True
self._as.set_succeeded(self.robotDeliveryResult)
return
else:
time.sleep(0.5)
def setResult(self, result):
self.robotDeliveryResult.status = result
self.waiting_result = False
return
def main():
rospy.init_node('state_machine_robot1')
# Create a SMACH state machine
sm = smach.StateMachine(outcomes=['FIM'])
stateCozinha = Cozinha()
deliveryRobot = DeliveryRobot(stateCozinha)
stateRetornandoCozinha = RetornandoCozinha(deliveryRobot)
# Open the container
with sm:
# Add states to the container
smach.StateMachine.add('COZINHA', stateCozinha,
transitions={'entregarPedido':'REALIZANDO_ENTREGA'})
smach.StateMachine.add('REALIZANDO_ENTREGA', RealizandoEntrega(),
transitions={'chegouDestino':'DESTINO', 'retornarCozinha':'RETORNANDO_COZINHA'},
remapping={'goal':'goal'})
smach.StateMachine.add('DESTINO', Destino(),
transitions={'retornarCozinha':'RETORNANDO_COZINHA'})
smach.StateMachine.add('RETORNANDO_COZINHA', stateRetornandoCozinha,
transitions={'chegouCozinha':'COZINHA', 'retornarCozinha':'RETORNANDO_COZINHA', 'final':'FIM'})
# Create and start the introspection server
sis = smach_ros.IntrospectionServer('sm_robot1', sm, '/SM_ROBOT1')
sis.start()
# Execute SMACH plan
outcome = sm.execute()
rospy.spin()
sis.stop()
if __name__ == '__main__':
main()
``` |
{
"source": "josuearaujo/sistema-alocador-para-robos",
"score": 2
} |
#### File: sistema-alocador-para-robos/src/client.py
```python
import rospy
import random
from std_msgs.msg import String
import time
def generate_list():
lista = []
coordinatesStream = "0,0,0,-1,0,-2,0,-5,1,0,1,2,1,-5,2,0,2,1,2,-5,-1,2,-1,0,-1,-1,-1,-2,-1,-3,-1,-4,-1,-5,-2,2,-2,1,-2,0,-2,-1,-2,-2,-2,-5,-4,-5,-5,-5,-3,-4,-4,-4,-5,-4,-3,-3,-4,-3,-5,-3,-4,-2,-5,-2".split(',')
for i in xrange(0, len(coordinatesStream), 2):
coord = coordinatesStream[i] + ',' + coordinatesStream[i+1]
lista.append(coord)
print(lista)
return lista
# If the python node is executed as main process (sourced directly)
if __name__ == '__main__':
lista = generate_list()
try:
rospy.init_node('coffee_client', anonymous=True)
print("Iniciou o no teste_pub")
pub = rospy.Publisher('coffe_request_channel', String, queue_size=10)
time.sleep(2)
while not rospy.is_shutdown():
# msg = str(random.randint(-5, 2)) + ',' + str(random.randint(-5, 2))
msg = random.choice(lista)
print('Msg: ' + msg)
pub.publish(msg)
raw_input("Aperte ENTER para enviar outro cafe!")
except rospy.ROSInterruptException:
rospy.loginfo("Test finished.")
```
#### File: sistema-alocador-para-robos/src/deprecated_states_robot2.py
```python
import rospy
import smach
import smach_ros
import actionlib
from actionlib_msgs.msg import GoalStatus
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
import random
import time
# define state Cozinha
class Cozinha(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['entregarPedido'],
output_keys=['goal'])
def execute(self, userdata):
rospy.loginfo('Executing state Cozinha')
new_goal = MoveBaseGoal()
new_goal.target_pose.header.frame_id = "map"
new_goal.target_pose.header.stamp = rospy.Time.now()
new_goal.target_pose.pose.position.x = random.randint(-5, 2)
new_goal.target_pose.pose.position.y = random.randint(-5, 2)
new_goal.target_pose.pose.orientation.w = 1
userdata.goal = new_goal
return 'entregarPedido'
# define state RealizandoEntrega
class RealizandoEntrega(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['chegouDestino','retornarCozinha'],
input_keys=['goal'])
self.client = actionlib.SimpleActionClient('robot2/move_base', MoveBaseAction)
def execute(self, userdata):
rospy.loginfo('Executing state RealizandoEntrega')
self.client.wait_for_server()
self.client.send_goal(userdata.goal)
self.client.wait_for_result()
state = self.client.get_state()
if state == GoalStatus.SUCCEEDED:
return 'chegouDestino'
else:
return 'retornarCozinha'
# define state Destino
class Destino(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['retornarCozinha'])
def execute(self, userdata):
rospy.loginfo('Executing state Destino')
time.sleep(5)
return 'retornarCozinha'
# define state RetornandoCozinha
class RetornandoCozinha(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['chegouCozinha', 'retornarCozinha', 'final'])
self.client = actionlib.SimpleActionClient('robot2/move_base', MoveBaseAction)
self.goal = MoveBaseGoal()
self.goal.target_pose.header.frame_id = "map"
self.goal.target_pose.header.stamp = rospy.Time.now()
self.goal.target_pose.pose.position.x = -1
self.goal.target_pose.pose.position.y = 1
self.goal.target_pose.pose.orientation.w = 1
def execute(self, userdata):
rospy.loginfo('Executing state RetornandoCozinha')
self.client.wait_for_server()
self.client.send_goal(self.goal)
self.client.wait_for_result()
state = self.client.get_state()
if state == GoalStatus.SUCCEEDED:
return 'chegouCozinha'
else:
return 'retornarCozinha'
return 'final'
def main():
rospy.init_node('state_machine_robot2')
# Create a SMACH state machine
sm = smach.StateMachine(outcomes=['FIM'])
stateCozinha = Cozinha()
# Open the container
with sm:
# Add states to the container
smach.StateMachine.add('COZINHA', stateCozinha,
transitions={'entregarPedido':'REALIZANDO_ENTREGA'})
smach.StateMachine.add('REALIZANDO_ENTREGA', RealizandoEntrega(),
transitions={'chegouDestino':'DESTINO', 'retornarCozinha':'RETORNANDO_COZINHA'},
remapping={'goal':'goal'})
smach.StateMachine.add('DESTINO', Destino(),
transitions={'retornarCozinha':'RETORNANDO_COZINHA'})
smach.StateMachine.add('RETORNANDO_COZINHA', RetornandoCozinha(),
transitions={'chegouCozinha':'COZINHA', 'retornarCozinha':'RETORNANDO_COZINHA', 'final':'FIM'})
# Create and start the introspection server
sis = smach_ros.IntrospectionServer('sm_robot2', sm, '/SM_ROBOT2')
sis.start()
# Execute SMACH plan
outcome = sm.execute()
rospy.spin()
sis.stop()
if __name__ == '__main__':
main()
``` |
{
"source": "Josuebmota/TrabalhoIA",
"score": 4
} |
#### File: TrabalhoIA/Auxilares/FuncoesAuxliares.py
```python
class Auxiliar:#Ira auxiliar as buscas
#--------------------------Auxilia nas Buscas sem informação-----------------------------------
def expande(self, no, problema, tipo): #Expandira o nó
Conjfilhos = [] #Conjunto de Filhos
possibilidades = problema.acoes(no, tipo) #Possibilidades de filhos
if(possibilidades !=[]):
for acoes in range(len(possibilidades)): #Caminhos possiveis
nofilho = No()
nofilho._init_(possibilidades[acoes],no,no.custo + problema.custo_do_passo, no.profundidade + 1)
Conjfilhos.append(nofilho) #Adicionando o filhos
return Conjfilhos
estado_inicial = [] # gerar matriz inicial
indice = 8
for l in range(indice):
linha = []
for c in range(indice):
linha.append("'x'")
estado_inicial.append(linha)
def matrizprint(self,matriz): #Printar a matriz
for i in range(len(matriz)):
for j in range(len(matriz)):
print(matriz[i][j], end="")
print()
print("\n")
def caminhos(self, no): #Mostrar o caminho, caso necessario
caminho = []
while(no!=None):
caminho.append(no)
no = no.pai
return caminho
def detectaQs(self, auxMatriz, linha, coluna): #Funcao para detectar se tem ataques
cont = 0
for l in range(len(auxMatriz)):
if(auxMatriz[l][coluna] == auxMatriz[linha][coluna]):
cont+=1
for c in range(len(auxMatriz)):
if(linha == l):
if(auxMatriz[linha][c] == auxMatriz[linha][coluna]):
cont+=1
#Principal
#For seguindo
c=coluna
for l in range(linha,len(auxMatriz), 1):
if(c!=len(auxMatriz)):
if(auxMatriz[linha][coluna] == auxMatriz[l][c]):
cont+=1
c+=1
#For voltando
c = coluna
for l in range(linha, -1, -1):
if(c!=-1):
if(auxMatriz[linha][coluna] == auxMatriz[l][c]):
cont+=1
c-=1
#Secundaria
#For voltando
c= coluna
for l in range(linha, len(auxMatriz), 1):
if(c!=-1):
if(auxMatriz[linha][coluna] == auxMatriz[l][c]):
cont+=1
c-=1
#For seguindo
c = coluna
for l in range(linha, -1, -1):
if(c!=len(auxMatriz)):
if(auxMatriz[linha][coluna] == auxMatriz[l][c]):
cont+=1
c+=1
return cont-6
#--------------------------Auxilia nas Buscas Locais-----------------------------------
def achaQ(self, matriz, col): #Funcao para achar a linha da rainha
for linha in range(len(matriz)):
if (matriz[linha][col]=="'Q'"):
return linha
def ColunaX(self, matriz, coluna): #Funcao para deletar uma coluna
for linha in range(len(matriz)):
matriz[linha][coluna]= "'x'"
return matriz
def conflitos(self,tabuleiro): #Achar os conflitos indiretos e diretos
conflitos = copy.deepcopy(tabuleiro)
for col in range(len(conflitos)): #Coluna por linha
matriz = copy.deepcopy(tabuleiro)
matriz = Auxiliar.ColunaX(self,matriz, col)#zera a coluna
for lin in range(len(conflitos)):
matriz[lin][col] = "'Q'"#Testar Q's
Batidas = Auxiliar.detectaQs(self,matriz,lin, col)#Contando as batidas (direta)
matriz = Auxiliar.ColunaX(self,matriz, col) #Zerar colunas
aux = copy.deepcopy(matriz) #Matriz auxiliar
for proxcol in range(len(conflitos)):#Contar as batidas das proximas colunas(indireta)
if (proxcol !=col):#Pra ele pular a coluna em que ele está
linha = Auxiliar.achaQ(self,aux,proxcol) #Achar o Q da linha
Batidas = Batidas + Auxiliar.detectaQs(self,aux,linha, proxcol)
aux = Auxiliar.ColunaX(self,aux, proxcol)#Zera a coluna
conflitos[lin][col] = Batidas#Matriz de batidas
Batidas=0
return conflitos
def melhorfilho(self,matriz):#Colocar os melhores filhos na lista
menor = 100
for i in range(len(matriz)):
for j in range(len(matriz)):
if(menor>matriz[i][j]):
menor=matriz[i][j]
liMenor=[]
for i in range(len(matriz)):
for j in range(len(matriz)):
if(matriz[i][j]==menor):
liMenor.append(matriz[i][j])
return liMenor, menor
def gerarCusto(self,conflitos, matriz):
return conflitos[Auxiliar.achaQ(self,matriz,0)][0]
def vizinhos(self, matriz):
vizinhos =[]
for i in range(len(matriz)):
for j in range(len(matriz)):
if(matriz[i][j]!="'Q'"):
aux = copy.deepcopy(matriz)
aux = Auxiliar.ColunaX(self,aux,j)
aux[i][j]="'Q'"
vizinhos.append(aux)
return vizinhos
Batidas = []
visitados = []
def expandelocal(self, no):
aux = copy.deepcopy(no.estado)
liMenor, menor = Auxiliar.melhorfilho(self,Auxiliar.Batidas)#Melhor filho
select = randrange(0,len(liMenor))#Selecionar um dos melhores filhos
contador = -1
for i in range(len(aux)):
for j in range(len(aux)):
if(menor == Auxiliar.Batidas[i][j]):#Procurar o melhor filho selecionado
contador+=1 #Depende da quantiadade de melhores filhos
if(contador==select):#Achou
linha = Auxiliar.achaQ(self,aux,j)#Pega linha em que a rainha está
aux[linha][j] = "'x'" #Faz a troca
aux[i][j] = "'Q'"
Auxiliar.Batidas = Auxiliar.conflitos(self,aux)
nofilho = No()
nofilho._init_(aux,no,Auxiliar.gerarCusto(self, Auxiliar.Batidas, aux), no.profundidade + 1)
return nofilho
def Peturbar(self, no):
vizinhos = Auxiliar.vizinhos(self,no.estado)
while(True):
select = randrange(0,len(vizinhos))
if(not vizinhos[select] in Auxiliar.visitados):
Auxiliar.visitados.append(vizinhos[select])
break
Proximo = No()
Proximo._init_(vizinhos[select],no,Auxiliar.gerarCusto(self, Auxiliar.conflitos(self,vizinhos[select]),vizinhos[select]),no.profundidade + 1)
return Proximo
def selectMask(self):
a = randrange(0,2)
if(a==0):
return [0,0,0,0,1,1,1,1]
if(a==1):
return [1,1,1,0,0,0,0,0]
if(a==2):
return [1,1,0,0,0,0,1,1]
```
#### File: TrabalhoIA/Auxilares/ProblemFeature.py
```python
import copy
from random import*
class Problema: #Caracteristicas do problema que será executado
def _init_(self, estado_inicial, acoes, teste_objetivo, custo_do_passo):
self.estado_inicial = estado_inicial
self.acoes = acoes
self.teste_objetivo = teste_objetivo
self.custo_do_passo = custo_do_passo
class No: #Caracteristicas da Posicao
def _init_(self, estado, pai, custo, profundidade):
self.estado = estado
self.pai = pai
self.custo = custo
self.profundidade = profundidade
```
#### File: TrabalhoIA/Problemas/Rainhas.py
```python
from Auxilares.ProblemFeature import No
from Auxilares.FuncoesAuxliares import Auxiliar
from random import randrange
import copy
class Rainhas:
# variaveis usadas na busca local
estado_inicial = [] # gerar matriz inicial
indice = 8
for l in range(indice):
linha = []
for c in range(indice):
linha.append("'x'")
estado_inicial.append(linha)
def teste_objetivo(self, no, tipo):
if(tipo == "cega"): #Teste para buscas cegas
matriz = no.estado
cont = 0
for l in range(len(matriz)):
for c in range(len(matriz)):
if (matriz[l][c]=="'Q'"):
cont+=1
if(cont == len(matriz)):
return True
return False
def acao(self, no, Busca):
vetor = []#Vetor de filhos
if(Busca == "cega"): # Se for busca cega
matriz = no.estado
coluna = no.profundidade
for linha in range(len(matriz)):
matriz[linha][coluna] = "'Q'"
Batidas = Auxiliar.detectaQs(self, matriz, linha, coluna)
if(Batidas==0): #Nao ocorreu conflitos
aux = copy.deepcopy(matriz)
vetor.append(aux) #Vetor de filhos
matriz[linha][coluna] = "'x'"
return vetor#Retorna os filhos
``` |
{
"source": "josuebrunel/MySDQ",
"score": 3
} |
#### File: MySDQ/mysdq/query.py
```python
from collections import defaultdict
import itertools
import operator
import re
class Xoperator(object):
def __init__(self):
self.__dict__[''] = operator.eq
self.__dict__['in'] = self._in
self.__dict__.update({
fname: func for fname, func in operator.__dict__.items() if callable(func) and not fname.startswith('_')
})
@staticmethod
def isnum(val):
try:
int(val)
return True
except (ValueError,):
pass
return False
def icontains(self, left, right):
return operator.contains(left.lower(), right.lower())
def _in(self, left, right):
return left in right
def not_in(self, left, right):
return left not in right
def startswith(self, left, right):
return left.startswith(right)
def endswith(self, left, right):
return left.endswith(right)
def regex(self, string, pattern):
return re.search(pattern, string)
def iregex(self, string, pattern):
return self.regex(string.lower(), pattern)
class DictQuerer(object):
DELIMITOR = '__'
def __init__(self, dataset, **kwargs):
self.dataset = dataset
self._xoperator = Xoperator()
def __getitem__(self, idx):
return self.dataset[idx]
def __len__(self):
return len(self.dataset)
def __str__(self):
return '%s' % self.dataset
def __repr__(self):
return '<%s: %s >' % (self.__class__.__name__, str(self))
def _lookup(self, datum, key, value, **kwargs):
keyname, _, op = key.partition(self.DELIMITOR)
if self.DELIMITOR in op:
if self._xoperator.isnum(keyname):
keyname = int(keyname)
return self._lookup(datum[keyname], op, value, **kwargs)
if not getattr(self._xoperator, op, None):
# handle list with indexed element
if isinstance(datum, (list,)) and self._xoperator.isnum(keyname):
keyname = int(keyname)
return self._lookup(datum[keyname], '%s__eq' % op, value, **kwargs)
# handle list
if getattr(self._xoperator, op, None) and isinstance(datum, (list,)):
for item in datum:
res = self._lookup(item, key, value, **kwargs)
if res:
break
return res
if kwargs.get('get', False):
return datum.get(keyname)
return getattr(self._xoperator, op)(datum.get(keyname), value)
def filter(self, *args, **kwargs):
result = []
for datum in self.dataset:
tests = []
for key, value in kwargs.items():
tests.append(self._lookup(datum, key, value))
if all(tests):
result.append(datum)
return DictQuerer(result)
def values(self, *args):
data = []
for datum in self.dataset:
cdata = {}
for key in args:
cdata[key] = self._lookup(datum, key, datum.get(key, None), get=True)
data.append(cdata)
return data
def apply(self, func=lambda x: x, *args):
for datum in self.dataset:
for key in args:
datum[key] = func(datum[key])
return self
def transform(self, func=lambda x: x):
return map(func, self.dataset)
def exists(self):
return bool(len(self.dataset))
def count(self):
return len(self)
def get(self, *args, **kwargs):
result = self.filter(*args, **kwargs)
if not result:
return None
if len(result) > 1:
raise Exception('Multiple values returned')
return result[0]
def delete(self, *args, **kwargs):
if kwargs:
result = self.filter(*args, **kwargs)
if result.exists():
self.dataset = [datum for datum in self.dataset if datum not in result.dataset]
return True
else:
self.dataset = []
return True
return False
def order_by(self, *args, **kwargs):
res = sorted(self.dataset, key=operator.itemgetter(*args))
if not kwargs.get('asc', True):
res = reversed(res)
return DictQuerer(list(res))
def group_by(self, *args):
result = defaultdict(list)
res = sorted(self.dataset, key=operator.itemgetter(*args))
for key, group in itertools.groupby(res, operator.itemgetter(*args)):
result[key].extend(list(group))
return result
def first(self):
return self[0]
def last(self):
return self[-1]
```
#### File: MySDQ/tests/conftest.py
```python
import json
import os
import pytest
@pytest.fixture
def data(request):
filepath = os.path.join(os.path.dirname(__file__), 'users.json')
return json.load(open(filepath))
``` |
{
"source": "josuebrunel/paco_workshop",
"score": 3
} |
#### File: server/app/__init__.py
```python
import os
import pdb
from flask import Flask, request, redirect, url_for
from werkzeug import secure_filename
app = Flask(__name__)
app.config.from_object('config')
@app.route('/')
def index():
return "Working"
@app.route('/upload_file', methods = ['GET','POST'])
def upload_file():
if request.method == 'POST':
fname = request.files['filename']
if fname :
file_name = secure_filename(fname.filename)
path_location = os.path.join(app.config['UPLOAD_FOLDER'], file_name)
fname.save(path_location)
print("File {0} uploaded successfully".format(file_name))
return "File uploaded OK"
else:
print("{0} is not a valid method for this resource".format(request.method))
return "Invalid method"
``` |
{
"source": "josuebrunel/pymixer",
"score": 3
} |
#### File: pymixer/pymixer/client.py
```python
import time
import webbrowser
from rauth import OAuth2Service, OAuth2Session
MIXER_BASE_URL = 'https://mixer.com/api/v1/'
MIXER_AUTHORIZE_URL = 'https://mixer.com/oauth/authorize'
MIXER_TOKEN_URL = 'https://mixer.com/api/v1/oauth/token'
class TokenJar(object):
def __init__(self, *args, **kwargs):
self.__dict__.update(kwargs)
def __str__(self):
return '{self.access_token}:{self.expires_at}'.format(self=self)
def __repr__(self):
return '<{}>'.format(self.__str__())
def is_valid(self):
return (self.expires_at - time.time()) > 1
class Client(object):
def __init__(self, client_id, client_secret, redirect_uri, name=None):
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
self.name = name if name else 'Mixer'
self.token = None
self.service = OAuth2Service(
client_id=client_id, client_secret=client_secret, name=self.name,
authorize_url=MIXER_AUTHORIZE_URL, access_token_url=MIXER_TOKEN_URL,
base_url=MIXER_BASE_URL)
def __str__(self):
return '{self.name}: {self.client_id}'.format(self=self)
def __repr__(self):
return '<{}>'.format(self.__str__())
@property
def authorize_url(self):
return self.service.get_authorize_url(
response_type='code', redirect_uri=self.redirect_uri)
def get_access_data(self, code):
code = code if code else self.code
params = {
'code': code, 'grant_type': 'authorization_code',
'redirect_uri': self.redirect_uri
}
response = self.service.get_raw_access_token(data=params)
data = response.json()
if response.status_code == 200 and data.get('access_token'):
data['expires_at'] = time.time() + data['expires_in']
self.token = TokenJar(**data)
return data
def get_session(self):
return OAuth2Session(
self.client_id, self.client_secret, self.token.access_token, service=self.service)
def cli(self):
# get authorization code
webbrowser.open(self.authorize_url)
code = input('Paste code: ')
# get access token
return self.get_access_data(code)
```
#### File: pymixer/tests/test_client.py
```python
import json
import time
from urllib import parse
from pymixer import Client
import pytest
import mock
class FakeResponse(mock.Mock):
def json(self):
return json.loads(self.content)
@pytest.fixture
def client():
client = Client('x-client_id-x', 'x-client_secret-x', 'https://httpbin.org/get', 'test')
assert '<test: x-client_id-x>' in repr(client)
return client
def test_authorize_url(client):
authorize_url = client.authorize_url
parsed_url = parse.urlparse(authorize_url)
params = parse.parse_qs(parsed_url.query)
assert params['client_id'][0] == client.client_id
assert params['response_type'][0] == 'code'
assert params['redirect_uri'][0] == client.redirect_uri
def test_authorization_grant(client):
with mock.patch('pymixer.client.OAuth2Session.send') as mock_send:
def fake_response(self, *args, **kwargs):
data = {
'access_token': 'x-access_token-x',
'refresh_token': 'x-refresh_token-x',
'token_type': 'Bearer',
'expires_in': 2
}
return FakeResponse(content=json.dumps(data), status_code=200, request=self)
mock_send.side_effect = fake_response
client.get_access_data('ab1234cd')
assert '<x-access_token-x' in repr(client.token)
assert client.token.access_token == 'x-access_token-x'
assert client.token.refresh_token == 'x-refresh_token-x'
assert client.token.is_valid() is True
time.sleep(1)
assert client.token.is_valid() is False
session = client.get_session()
assert session.access_token == client.token.access_token
assert session.service == client.service
``` |
{
"source": "josuebrunel/spewe",
"score": 2
} |
#### File: josuebrunel/spewe/setup.py
```python
import os
from setuptools import setup, find_packages
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = '0.1'
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="spewe",
version=__version__,
description="Stupid Python Web Framework",
long_description=read("README.rst"),
author=__author__,
author_email=__email__,
url="https://github.com/josuebrunel/spewe",
download_url="https://github.com/josuebrunel/spewe/archive/{0}.tar.gz".format(__version__),
keywords=['web', 'framework', 'wsgi', 'simple'],
packages=find_packages(),
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Development Status :: 5 - Production/Stable',
'Topic :: Software Development :: Libraries :: Python Modules',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License'
],
platforms=['Any'],
license='MIT',
)
```
#### File: spewe/spewe/http.py
```python
import cgi
try:
from http.cookies import SimpleCookie
except (ImportError,):
from Cookie import SimpleCookie
try:
from urllib.parse import parse_qs, urljoin
except (ImportError,):
from urlparse import parse_qs, urljoin
import json
import wsgiref
from wsgiref.headers import Headers
HTTP_SAFE_METHODS = ['HEAD', 'GET', 'OPTIONS']
class HttpStatus(object):
def __init__(self):
self._statuses = {}
try:
from http.server import HTTPStatus
self._statuses = {status.value: status.phrase for status in HTTPStatus}
except (ImportError,):
import httplib
self._statuses = httplib.responses
for value, phrase in self._statuses.items():
phrase = 'HTTP_%d_%s' % (value, phrase.upper().replace(' ', '_'))
setattr(self, phrase, value)
def describe(self, status_code):
return '%d %s' % (status_code, self._statuses[status_code].upper())
status = HttpStatus()
class XFormFile(object):
def __init__(self, filename, content, content_type=None, encoding=None):
self.filename = filename
self.content = content.decode()
self.content_type = content_type
self.encoding = encoding
def __repr__(self):
return '<XFormFile: %s>' % self.filename
class Request(object):
def __init__(self, env):
self._environ = env
self.scheme = wsgiref.util.guess_scheme(env)
self.method = env.get('REQUEST_METHOD', None)
self.path = env.get('PATH_INFO', None)
self._full_path = wsgiref.util.request_uri(env, include_query=False)
self.query_string = env.get('QUERY_STRING', None)
self.content_type = env.get('CONTENT_TYPE', None)
self.content_length = env.get('CONTENT_LENGTH', None)
self.server_name = env.get('SERVER_NAME', None)
self.server_port = env.get('SERVER_PORT', None)
self.server_protocol = env.get('SERVER_PROTOCOL', None)
self.remote_address = env.get('REMOTE_ADDR', None)
self.remote_host = env.get('REMOTE_HOST', None)
self.params = parse_qs(self.query_string)
self.form = {}
self.files = {}
self.body = ''
if self.method not in HTTP_SAFE_METHODS:
self.form, self.files = self._parse_multipart()
self.body = self._get_body()
self.headers = Headers(
list({key: value for key, value in env.items() if key.startswith('HTTP')}.items()))
def __str__(self):
return '%s - %s' % (self.method, self.get_full_path())
__repr__ = __str__
def _get_body(self):
fp = self._environ['wsgi.input']
if not fp:
return ''
fp.seek(0)
body = fp.read()
fp.seek(0)
return body.decode()
def _parse_multipart(self):
form = {}
files = {}
if self.content_type == 'application/json':
return form, files
fs = cgi.FieldStorage(fp=self._environ['wsgi.input'],
environ=self._environ)
for field in fs.list:
if field.filename:
xfile = XFormFile(field.filename, field.value, field.type,
getattr(field, 'encoding', 'utf-8'))
files.setdefault(field.name, xfile)
else:
form.setdefault(field.name, field.value)
return form, files
@property
def json(self):
if self.content_type == 'application/json':
return json.loads(self.body)
def get_full_path(self):
return self._full_path
def build_absolute_uri(self, location):
return urljoin(self._full_path, location)
class BaseResponse(object):
content_type = None
status_code = 200
def __init__(self, data='', status_code=None, content_type=None, **kwargs):
self.data = data
self.headers = Headers([])
self.cookies = SimpleCookie()
if status_code:
self.status_code = status_code
if content_type:
self.content_type = content_type
if self.content_type:
self.headers.add_header('Content-Type', self.content_type)
def add_header(self, name, value, **kwargs):
self.headers.add_header(name, value, **kwargs)
def set_cookie(self, name, value, path='/', expire=None, httponly=None):
pass
def delete_cookies(self):
pass
class Response(BaseResponse):
content_type = 'text/html; charset=UTF8'
class JsonResponse(BaseResponse):
content_type = 'application/json'
def __init__(self, data, status_code=200, **kwargs):
data = json.dumps(data)
super(JsonResponse, self).__init__(data, status_code=status_code, **kwargs)
class TemplateResponse(BaseResponse):
def __init__(self, context):
self.context = context
class ResponseNoContent(BaseResponse):
status_code = 204
content_type = None
class ResponseRedirect(Response):
status_code = 302
def __init__(self, url):
super(ResponseRedirect, self).__init__()
self.add_header('Location', url)
class ResponsePermanentRedirect(ResponseRedirect):
status_code = 301
```
#### File: tests/myapp/views.py
```python
from spewe import Spewe
from spewe.http import Response, JsonResponse, TemplateResponse
testapp = Spewe()
@testapp.route('/none/')
def none(request):
return None
@testapp.route('/index')
def index(request):
user = request.params.get('user', ['world'])
origin = request.params.get('from', ['universe'])
return "Hello %s from %s !" % (user[0], origin[0])
@testapp.route('/notemplate/', template='none.html')
def no_template(request, *args, **kwargs):
return TemplateResponse({'none': 'none is none'})
@testapp.route('/login', methods=['POST'], template='login.html')
def login(request, *args, **kwargs):
form = request.form
context = kwargs['context']
if form['username'] == 'loking' and form['password'] == '<PASSWORD>':
context['authenticated'] = True
context['username'] = form['username']
else:
context['authenticated'] = False
context['error_message'] = "Invalid credentials"
return TemplateResponse(context)
@testapp.route(r'^/users/(?P<uuid>[\w,-]+)/$')
def users(request, uuid, **kwargs):
return Response(uuid)
@testapp.route(r'^/users/(?P<uuid>[\w,-]+)/notes/', methods=['GET', 'POST'], template='notes.html')
def notes(request, uuid, *args, **kwargs):
if request.method == 'GET':
return TemplateResponse({})
context = kwargs.get('context', {})
return TemplateResponse(context)
@testapp.route(r'^/api/users/$', methods=['get', 'post'])
def api_users(request):
users = [{'uuid': 'abcd' * 8, 'username': 'cloking', 'email': '<EMAIL>'}]
if request.method == 'GET':
return JsonResponse(users)
if request.json:
data = request.json
data['uuid'] = 'aabb' * 8
users.append(data)
return JsonResponse(users)
```
#### File: spewe/tests/utils.py
```python
import os
def get_test_dir():
return os.path.dirname(os.path.abspath(__file__))
def get_test_app_template(tplname):
return os.path.join(get_test_dir(), 'myapp', 'templates', tplname)
``` |
{
"source": "josuebrunel/yfs",
"score": 3
} |
#### File: yfs/fantasy_sport/utils.py
```python
import json
from xml.dom import minidom
def pretty_json(data):
"""Return a pretty formatted json
"""
data = json.loads(data.decode('utf-8'))
return json.dumps(data, indent=4, sort_keys=True)
def pretty_xml(data):
"""Return a pretty formated xml
"""
parsed_string = minidom.parseString(data.decode('utf-8'))
return parsed_string.toprettyxml(indent='\t', encoding='utf-8')
``` |
{
"source": "Josue-cloudU/Biblioteca",
"score": 3
} |
#### File: Biblioteca/accounts/forms.py
```python
from django import forms
from django.forms import ModelForm
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
class CustomUserForm(UserCreationForm):
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', '<PASSWORD>', '<PASSWORD>',)
def clean_email(self):
email = self.cleaned_data["email"]
if User.objects.filter(email=email).exists():
raise forms.ValidationError('Correo ya esta registrado')
return email
class CustomUserStaffForm(UserCreationForm):
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', '<PASSWORD>', '<PASSWORD>', 'is_staff')
def clean_email(self):
email = self.cleaned_data["email"]
if User.objects.filter(email=email).exists():
raise forms.ValidationError('Correo ya esta registrado')
return email
class UpdateUserForm(UserCreationForm):
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'is_active', 'is_staff')
widgets = {
'password1': forms.TextInput(
attrs = {
'class': 'form-control',
'readonly': True
}
),
'password2': forms.TextInput(
attrs = {
'class': 'form-control',
'readonly': True
}
)
}
def clean_email(self):
email = self.cleaned_data["email"]
if User.objects.filter(email=email).exists():
raise forms.ValidationError('Correo ya esta registrado')
return email
```
#### File: Biblioteca/accounts/middleware.py
```python
import datetime
from datetime import timedelta
from libro.models import Reservas
class PruebaMiddleware:
def __init__(self,get_response):
self.get_response = get_response
def __call__(self,request):
response = self.get_response(request)
return response
# middleware que ayuda a poner las reservas que se vencieron en false
def process_view(self,request,view_func,view_args,view_kwargs):
if request.user.is_authenticated:
fecha_actual = datetime.date.today()
reservas = Reservas.objects.filter(estado = True,user = request.user)
for reserva in reservas:
fecha_vencimiento = reserva.fecha_creacion + timedelta(days = 7)
if fecha_actual > fecha_vencimiento:
reserva.estado = False
reserva.save()
``` |
{
"source": "JosueDLA/BasicModule",
"score": 4
} |
#### File: BasicModule/jdla_basic/basic_operations.py
```python
def addition(a, b):
"""
Adds two given values
Parameters:
a (int): First value
b (int): Second value
Returns:
int: sum result
"""
return a + b
def substraction(a, b):
"""
Substract one operator from another one
Parameters:
a (int): First value
b (int): Second value
Returns:
int: subtraction result
"""
return a - b
def multiplication(a, b):
"""
Multiplies one operator to another one
Parameters:
a (int): First value
b (int): Second value
Returns:
int: multiplication result
"""
return a * b
def division(a, b):
"""
Divides one operator from another one
Parameters:
a (int): First value
b (int): Second value
Returns:
int: division result
"""
if b == 0:
raise ValueError('Can not divide by zero')
return a / b
def modulus(a, b):
"""
Divides one operator from another one
Parameters:
a (int): First value
b (int): Second value
Returns:
int: division remainder
"""
return a % b
``` |
{
"source": "JosueDLA/CVTools",
"score": 3
} |
#### File: CVTools/cv_tools/frame.py
```python
from . import contour
import numpy as np
import cv2
class Frame(np.ndarray):
def __new__(cls, frame, height, width):
frame = cv2.resize(frame, (width, height))
obj = np.asarray(frame).view(cls)
obj.height = height
obj.width = width
return obj
def __array_finalize__(self, obj):
if obj is None:
return
self.height = getattr(obj, 'height', None)
self.width = getattr(obj, 'width', None)
def filter_frame(self):
"""
Filter frame with multiple techniques to make contour detection easier.
"""
# GrayScale the frame
gray = cv2.cvtColor(self, cv2.COLOR_BGR2GRAY)
# White frame for outpur contour video
white = self.create_white_frame()
# Edge detection with blured image
blurred = cv2.GaussianBlur(gray, (3, 3), 0)
#bilateral = cv2.bilateralFilter(gray, 11, 17, 17)
canny = cv2.Canny(blurred, 20, 40)
# Dilate lines for earsier contour detection
kernel = np.ones((3, 3), np.uint8)
dilated = cv2.dilate(canny, kernel, iterations=2)
return white, dilated
def get_contours(self, dilated):
(contours, hierarchy) = cv2.findContours(
dilated.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
hierarchy = hierarchy[0]
contours, hierarchy = contour.remove_parent_contour(
contours, hierarchy)
return contours, hierarchy
def sort_contours():
pass
def create_white_frame(self):
white_frame = 0 * np.ones((self.height, self.width, 3), np.uint8)
return white_frame
```
#### File: CVTools/cv_tools/video.py
```python
from .frame import Frame
import numpy as np
import cv2
class Video:
def __init__(self, path, height, width):
self.path = path
self.height = height
self.width = width
def select(self):
video = cv2.VideoCapture(self.path)
return video
def read(self, video_capture):
ret, frame = video_capture.read()
frame = Frame(frame, self.height, self.width)
return ret, frame
def write(self, output_path):
#cv2.VideoWriter(name, codec, fps, resolution)
fourcc = cv2.VideoWriter_fourcc(*'MPEG')
out = cv2.VideoWriter(output_path, fourcc, 30,
(self.width, self.height))
return out
``` |
{
"source": "JosueDLA/RubikSolver",
"score": 3
} |
#### File: RubikSolver/rubik_solver/cube.py
```python
import numpy as np
import cv2
class Cube:
def __init__(self, U, R, F, D, L, B):
self.U = U
self.R = R
self.F = F
self.D = D
self.L = L
self.B = B
def __str__(self):
pass
def get_string(self):
cube = "".join(self.U + self.R + self.F + self.D + self.L + self.B)
return cube
def get_faces(self):
faces = []
faces.append(self.U[4])
faces.append(self.R[4])
faces.append(self.F[4])
faces.append(self.D[4])
faces.append(self.L[4])
faces.append(self.B[4])
return faces
@staticmethod
def sort_contours(contours, method="left-to-right"):
"""
Returns contours sorted from right to left and top to bottom
"""
# Get bound box for each contour
bounding_boxes = [cv2.boundingRect(
contour) for contour in contours]
# Zip contour and bounding boxes
squares = zip(contours, bounding_boxes)
squares = list(squares)
if len(contours) >= 7:
# Sort squares by Y value
squares.sort(key=lambda b: b[1][1])
first_row = squares[:3]
second_row = squares[3:6]
third_row = squares[6:9]
# Sort squares by X value
first_row.sort(key=lambda b: b[1][0])
second_row.sort(key=lambda b: b[1][0])
third_row.sort(key=lambda b: b[1][0])
squares = first_row + second_row + third_row
contours, bounding_boxes = zip(*squares)
return contours, bounding_boxes
``` |
{
"source": "JosueDLA/ShapeDetection",
"score": 4
} |
#### File: ShapeDetection/shape_detection/shape.py
```python
import math
class Point(tuple):
"""Extends 'tuple', Point represents a 2d position."""
def __new__(cls, x: int, y: int) -> tuple:
return tuple.__new__(cls, (x, y))
def __init__(self, x: int, y: int):
self._x = x
self._y = y
@property
def x(self) -> int:
"""X coordinate value."""
return self._x
@property
def y(self) -> int:
"""Y coordinate value."""
return self._y
def __str__(self) -> str:
return(f"({self.x}, {self.y})")
def get_distance(point_a: Point, point_b: Point) -> float:
"""Get the distance between two points.
Args:
point_a (Point): start point.
point_b (Point): end point.
Returns:
float: Distance.
Notes:
See https://www.mathsisfun.com/algebra/distance-2-points.html for more info.
"""
distance = math.sqrt(
(math.pow((point_a.x-point_b.x), 2) + math.pow((point_a.y-point_b.y), 2)))
return distance
def value_approximation(value_a: float, value_b: float, value_threshold: float) -> bool:
"""Compare two numbers to check if they are roughly the same.
Args:
value_a (float): First number.
value_b (float): Second number.
value_threshold (float): Approximation threshold.
Returns:
bool: Whether or not the numbers are the same.
"""
position = round(value_a/float(value_b), 1)
flag = True if position >= (
1-value_threshold) and position <= (1+value_threshold) else False
return flag
``` |
{
"source": "JosueGauthier/Surface-de-Bezier-Python",
"score": 3
} |
#### File: JosueGauthier/Surface-de-Bezier-Python/convexe.py
```python
import numpy as np
def convexe(X,Y):
n = np.shape(X)[0]
h = 0
seuil = 1*(10**(-6))
for k in range(1,n-1):
xx = X[k-1]-X[k+1]
yy = Y[k-1]-Y[k+1]
if (abs(xx) > seuil):
yy = Y[k-1]*(X[k]-X[k+1])-Y[k+1]*(X[k]-X[k-1])
yk=yy/xx
h = max(h,abs(yk-Y[k]))
elif (abs(yy) > seuil):
xx = X[k-1]*(Y[k]-Y[k+1])-X[k+1]*(Y[k]-Y[k-1])
xk=xx/yy
h = max(h,abs(xk-X[k]))
return(h)
```
#### File: JosueGauthier/Surface-de-Bezier-Python/coox.py
```python
import numpy as np
from cast3d import cast3d
def coox(t1,t2,XP,YP,ZP):
np1 = np.shape(XP)[0]
np2 = np.shape(XP)[1]
xx1 = np.zeros([np1,1])
yy1 = np.zeros([np1,1])
zz1 = np.zeros([np1,1])
# construction des points P_q1
for k1 in range(0,np1):
#initialisation : points P(k1,1:np2)
xx2 = np.zeros([np2,1])
yy2 = np.zeros([np2,1])
zz2 = np.zeros([np2,1])
for k2 in range(0,np2):
xx2[k2]=XP[k1][k2]
yy2[k2]=YP[k1][k2]
zz2[k2]=ZP[k1][k2]
[x,y,z]=cast3d(t2,xx2,yy2,zz2)
xx1[k1]=x
yy1[k1]=y
zz1[k1]=z
[x,y,z]=cast3d(t1,xx1,yy1,zz1)
return(x,y,z)
```
#### File: JosueGauthier/Surface-de-Bezier-Python/drectan.py
```python
def drectan(XP,YP):
xmin=min(XP)
xmax=max(XP)
ymin=min(YP)
ymax=max(YP)
return(xmin,xmax,ymin,ymax)
```
#### File: JosueGauthier/Surface-de-Bezier-Python/rbezier.py
```python
from drectan import drectan
def rbezier(XP1,YP1,XP2,YP2):
(xmin1,xmax1,ymin1,ymax1)=drectan(XP1,YP1)
(xmin2,xmax2,ymin2,ymax2)=drectan(XP2,YP2)
xmi=max(xmin1,xmin2)
xma=min(xmax1,xmax2)
ymi=max(ymin1,ymin2)
yma=min(ymax1,ymax2)
inter=0
seuil=1*10**(-6)
if (xma-xmi > seuil) :
inter=inter+1
if (yma-ymi > seuil) :
inter=inter+1
return(inter,xmi,xma,ymi,yma)
```
#### File: JosueGauthier/Surface-de-Bezier-Python/tbezier.py
```python
import numpy as np
import matplotlib.pyplot as plt
def tbezier(X,Y,color,XP,YP,pchar,pcolor,ptrait,title) :
ligne= pcolor + ptrait #concatene couleur et forme de trait pour obtenir un champ 'b--' eg
plt.plot(X,Y,color)
plt.plot(XP,YP,ligne)
a = np.shape(XP) # a taille en liste
nb = a[0] # nb a en int coresspond au nombre de points de controles.
for k in range(0,nb) :
kk = k
charrIndice = str(kk) # trnasforme en str l'indice 1 eg '1'
numP = pchar + charrIndice #forme un indice type 'P1'
epsx = 0.1
epsy = 0.2
if (k==0) :
epsx =0.0
epsy =-0.2
if (k==nb-1) :
epsx =0.1
epsy =0.0
plt.text(XP[k]+epsx, YP[k]+epsy, numP)
plt.title(title)
plt.show()
``` |
{
"source": "JosueHernandezR/An-lisis-de-Algoritmos",
"score": 4
} |
#### File: Practica10/Codificar/main.py
```python
from collections import Counter
from huffman import *
from store import *
TEST = "This is a test for Huffman's algorithm."
def getText ( ):
with ( open ( "Files/Original.txt", "r" ) ) as f:
txtin = f.read ( ).rstrip ( "\n" )
content = TEST if ( txtin == "" ) else txtin
return content
def main ( ):
txtin = getText ( )
huffman = Huffman ( txtin )
huffman.setFrequency ( )
huffman.setTree ( )
huffman.setCodes ( )
huffman.encode ( )
store ( huffman.result, huffman.frequency, huffman.probability, huffman.codes )
if ( __name__ == "__main__" ):
main ( )
```
#### File: Practica10/Decodificar/main.py
```python
from deco import *
import pickle
import ast
def getParameters ( ):
# Obtener la secuencia binaria comprimida.
compressed = pickle.load ( open ( "../Codificar/Files/Encoded File.txt", "rb" ) )
# Obtenga el diccionario con la codificación de cada símbolo.
f = open ( "../Codificar/Files/Dictionary.dic", "r" )
dictionary = f.read ( )
dictionary = ast.literal_eval ( dictionary )
aux = { }
for key, element in dictionary.items ( ):
aux [ element ] = key
f.close ( )
return compressed, aux
def main ( ):
compressed, dictionary = getParameters ( )
decompressed = decode ( dictionary, compressed )
with ( open ( "Files/Decoded File.txt", "w" ) ) as f:
f.write ( decompressed )
if ( __name__ == "__main__" ):
main ( )
```
#### File: An-lisis-de-Algoritmos/Practica11/main.py
```python
import grafica as plt
import global_variables as gb
def printer ( flag, C ):
if ( flag == -1 ):
print ( "\n\n\tLa solución {} no es un ciclo hamiltoniano.\n".format ( C ) )
exit ( 0 )
print ( "\n\n\tLa solucion {} es un ciclo hamiltoniano.\n".format ( C ) )
def verify_hamiltonian ( graph, certificate ):
gb.t += 1
for i in range ( len ( graph ) ):
gb.t += 1
vecinos = graph [ certificate [ i ] ]
gb.t += 1
if ( certificate [ i + 1 ] not in vecinos ):
printer ( -1, certificate )
gb.parametros.append ( ( len ( certificate [ : i ] ), gb.t + vecinos.index ( certificate [ i + 1 ] ) + 1 ) )
printer ( 0, certificate )
print ( gb.parametros )
def main ( ):
graph = { 1 : [ 20 , 2 , 5 ] , 2 : [ 1 , 18 , 3 ] , 3 : [ 2 , 16 , 4 ] , 4 : [ 3 , 5 , 14 ] ,
5 : [ 4 , 6 , 1 ] , 6 : [ 5 , 13 , 7 ] , 7 : [ 8 , 6 , 20 ] , 8 : [ 7 , 9 , 12 ] ,
9 : [ 8 , 19 , 10 ] , 10: [ 9 , 17 , 11 ] , 11: [ 10 , 15 , 12 ] , 12: [ 8 , 13 , 11 ] ,
13: [ 12 , 6 , 14 ] , 14: [ 13 , 4 , 15 ] , 15: [ 14 , 11 , 16 ] , 16: [ 15 , 17 , 3 ] ,
17: [ 10 , 16 , 18 ] , 18: [ 17 , 2 , 19 ] , 19: [ 9 , 18 , 20 ] , 20: [ 1 , 19 , 7 ]
}
certificate = [ 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16 , 17 ,
18 , 19 , 20 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 ]
verify_hamiltonian ( graph, certificate )
plt.plot ( )
if ( __name__ == "__main__" ):
main ( )
```
#### File: Practica1/Euclides/fibonacci.py
```python
def fibonacci ( ):
a, b = 1, 1
while ( True ):
#yield:Se usa para retornar "generators", objetos iteradores que se comportan de manera muy similar a una lista.
yield a
a, b = b, a + b
# Lista.
def fibolist ( limit ):
n, fibo = 0, [ ]
# Crea una lista de números de Fibonacci usando un generador.
for i in fibonacci ( ):
if ( n >= limit ): break
fibo.append ( i )
n += 1
# Devolución.
return fibo
```
#### File: Practica1/Suma binaria/main.py
```python
from SumaBinaria import binarysum
from random import random
from Grafica import graph
# n y m son de orden Dos a la potencia de n (2 ^ (n)) puede ser (n> m) o (n = m) pero nunca (n <m).
# c: Almacena la suma binaria de 'a' y 'b'.
# a: almacena la primera lista binaria.
# b: almacena la segunda lista binaria.
# n: es el tamaño de ambas listas.
def display ( a, b, c, counter ):
DATAFORMAT = ""
if ( len ( c ) > len ( a ) ): DATAFORMAT = " "
print ( "\n\tA: " + DATAFORMAT, a )
print ( "\tB: " + DATAFORMAT, b )
print ( "\tC: ", c )
print ( "\n\tOrden de la suma: ", int ( pow ( 2, len ( c ) - 1 ) ), "\n" )
print ( "\tTamaño de lista 'Suma': ", len ( c ), "\t\tTime: ", counter, "\n" )
def generate ( ):
a, b, n, m = [ ], [ ], 0, 1
# Defina el tamaño de las listas a-b (n-m respectivamente).
while ( n < m ):
rnd = ( int ( random ( ) * 5 ) + 1 )
n = int ( pow ( 2, rnd ) )
print("\tTamaño de lista n", n)
rnd = ( int ( random ( ) * 5 ) + 1 )
m = int ( pow ( 2, rnd ) )
print("\tTamaño de lista m", m)
# Genere ambos números binarios como listas enteras.
for i in range ( n ):
a.insert ( 0, ( ( int ( random ( ) * 2 ) + 0 ) ) )
if ( i >= m ):
b.insert ( 0, 0 )
else:
b.insert ( 0, ( ( int ( random ( ) * 2 ) + 0 ) ) )
# Return valores.
return a, b
def main ( ):
a, b = generate ( )
c, count = binarysum ( a, b )
display ( a, b, c, count )
graph ( len ( c ), count )
main ( )
```
#### File: Fibonacci/Iterativo/fibonacci.py
```python
def fibonacci ( n ):
count, fibo, a, b, f = 1, 1, 1, 0, [ ]
for i in range ( 1, n ):
count += 1
fibo = a + b
count += 1
f.append ( fibo )
count += 1
b = a
count += 1
a = fibo
count += 1
count += 1
print("contador: ", count)
return fibo, count, f
```
#### File: Fibonacci/Iterativo/grafica.py
```python
import matplotlib.pyplot as plt
import numpy as np
def graph ( count, fibo, f, n ):
# Título de la ventana
plt.figure ( "Fibonacci Iterative Algorithm" )
# Título de la grafica
plt.title ( "Fibonacci ( " + str ( n ) + " ): " + str ( fibo ) )
# Parámetro del tiempo ( t ) de la gráfica.
t = np.arange ( 0, count, ( count / ( len ( f ) + 1 ) ) )
_t = list ( map ( ( lambda x: x * ( 5 / 2 ) ), t ) )
_f = np.arange ( 0, len ( f ) + 1 )
# Nombre de los ejes.
plt.xlabel ( "Time ( t )", color = ( 0.3, 0.4, 0.6 ), size = "large" )
plt.ylabel ( "Fibonacci ( f )", color = ( 0.3, 0.4, 0.6 ), size = "large" )
# Plot.
plt.plot ( _f, _t, "b^", label = "g( n ) = ( 5/2 )( n )" )
plt.plot ( _f, t, "ro", label = "T( n ) = ( n )" )
plt.plot ( _f, _t, "b--")
plt.plot ( _f, t, "r--")
plt.legend ( loc = "lower right" )
plt.show ( )
```
#### File: N-Cube Sum/Recursivo/main.py
```python
from grafica import grafica
from cubo import cubo
def main ( ):
parameters = [ ]
n = int ( input ( "\n\tNumber to calculate firts n-Cubes: " ) )
# cube (n): Devuelve una lista de tuplas con la suma de los números a la
# potencia '3' y el tiempo computacional del algoritmo [(C (n), T (n))].
for i in range ( 1, n + 1 ):
parameters.append ( cubo ( i, 0 ) )
print ( "\n\tCubesum ( ", n, " ): ", parameters [ len ( parameters ) - 1 ] [ 0 ], "\n" )
grafica ( parameters [ len ( parameters ) - 1 ], parameters, n )
main ( )
```
#### File: Practica3/MergeSort/Merge.py
```python
import globalvariables as gb
def onlymerge(izq, der):
"""
Merge se encargara de intercalar los elementos de las dos
divisiones.
"""
i, j = 0, 0 # Variables de incremento
result = [] # Lista de resultado
gb.time = 0 #Contador
gb.time += 1
# Intercalar ordenadamente
while(i < len(izq) and j < len(der)):
gb.time += 1
if (izq[i] < der[j]):
result.append(izq[i])
gb.time += 1
i += 1
gb.time += 1
else:
result.append(der[j])
gb.time += 1
j += 1
gb.time += 1
gb.time += 1
# Agregamos los resultados a la lista
result.extend(izq[i:])
gb.time += 1
result.extend(der[j:])
gb.time += 1
# Retornamos el resultados
return result, (len(result), gb.time)
```
#### File: Practica4/QuickSort/grafica.py
```python
import matplotlib.pyplot as plt
import numpy as np
import gb
"""
Variables globales:
proposed2: Función propuesta para el algoritmo Quicktime. Dependiendo
si el usuario elige ordenar el peor de los casos, esta
variable tomará el valor de "g (n) = 3/2 n ^ 2".
En otro caso "g (n) = n log (n)".
proposed1: Función propuesta para el algoritmo de Partición. Dependiendo
si el usuario elige ordenar el peor de los casos, esta
variable tomará el valor de "g (n) = 3/2 n".
En otro caso "g (n) = n".
function2: esta función para Quicksort se mostrará solo si el usuario
elige ordenar el peor de los casos y tomará el valor
de: "T (n) = n ^ 2".
function1: esta función para Partición se mostrará solo si el usuario
elige ordenar el peor de los casos y tomará el valor
de: "T (n) = n".
g2: esta lista almacenará los valores de la función propuesta para Quicksort.
g1: Esta lista almacenará los valores de la función propuesta para Partición.
"""
proposed2 = ""
proposed1 = ""
function2 = ""
function1 = ""
g2 = [ ]
g1 = [ ]
"""
Función nlogn referencias a parámetros de registro (n ^ n) o puntos de función
para trazar. Esta es la función propuesta en el gráfico, donde T (n) es el
tiempo computacional de nuestro algoritmo y g (n) la función propuesta
tal que T (n) en ϴ (g (n)).
"""
def nlogn ( ):
global g2, aux
f = open ( "n log ( n ).txt", "r" )
aux = f.readlines ( )
g2 = list ( map ( lambda x: float ( x ) * 5/4, aux [ : len ( gb.n ) + 1 ] ) )
f.close ( )
"""
Las etiquetas de función están controladas implícitamente por menu.py, dependiendo del valor de
gb.flag (verdadero o falso) asignará valor a las variables globales de cadena y
ayudarán a distinguir el tiempo propuesto y el algoritmo computacional
en el gráfico
"""
def labels ( ):
global proposed2, proposed1, function2, function1, g1
g1 = list ( map ( lambda x: 3/2 * x [ 1 ], gb._parameters ) )
nlogn ( )
if ( gb.flag ):
# Worst case labels assignation.
proposed2 = "g( n ) = ( 3/2 ) n^2"
proposed1 = "g( n ) = ( 3/2 ) n"
function2 = "T( n ) = n^2"
function1 = "T( n ) = n"
else:
g1 = list ( np.arange ( 6, max ( g1 ) + 6, max ( g1 ) / len ( gb.n ) ) )
# Any other case labels assignation.
proposed2 = "g( n ) = n log ( n )"
proposed1 = "g( n ) = n"
function2 = None
function1 = None
def graph ( ):
labels ( )
# Window title.
plt.figure ( "Algoritmo Quicksort", figsize = ( 14, 7 ) )
# Right graph: Temporal complexity of Partition.
plt.subplot ( 1, 2, 2 )
# Figure title.
plt.title ( "Partition ( " + str ( gb._parameters [ -1 ] [ 0 ] + 1 ) + ", " + str ( gb._parameters [ -1 ] [ 1 ] ) + " )" )
# Parameter Time ( t ).
_t = list ( map ( lambda x: x [ 1 ], gb._parameters ) )
# Parameter Size ( n ).
_s = list ( map ( lambda x: x [ 0 ] + 1, gb._parameters ) )
# Axes names.
plt.xlabel ( "Tamaño ( n )", color = ( 0.3, 0.4, 0.6 ), size = "large" )
plt.ylabel ( "Tiempo Partition ( t )", color = ( 0.3, 0.4, 0.6 ), size = "large" )
# Plot.
plt.plot ( _s, _t, "#778899", linewidth = 3, label = function1 )
plt.plot ( _s, g1, "#800000", linestyle = "--", label = proposed1 )
plt.legend ( loc = "upper left" )
# Left graph: Temporal complexity of Quicksort.
plt.subplot ( 1, 2, 1 )
# Figure title.
plt.title ( "Quicksort ( " + str ( gb.parameters [ -1 ] [ 0 ] ) + ", " + str ( gb.parameters [ -1 ] [ 1 ] ) + " )" )
# Parameter Time ( t ).
t = list ( map ( lambda x: x [ 1 ], gb.parameters ) )
# Parameter Size ( n ).
s = list ( map ( lambda x: x [ 0 ], gb.parameters ) )
# Axes names.
plt.xlabel ( "Size ( n )", color = ( 0.3, 0.4, 0.6 ), size = "large" )
plt.ylabel ( "Quicksort Time ( t )", color = ( 0.3, 0.4, 0.6 ), size = "large" )
# Plot.
plt.plot ( s, t, "#778899", linewidth = 3, label = function2 )
plt.plot ( s, g2, "#800000", linestyle = "--", label = proposed2 )
plt.legend ( loc = "upper left" )
plt.show ( )
```
#### File: Practica4/QuickSort/partition.py
```python
import gb
def partition ( n, p, r ):
x = n [ r ] # pivot
gb._time += 1
i = p # border
gb._time += 1
for j in range ( p, r ):
gb._time += 1
if ( n [ j ] < x ):
aux = n [ j ]
gb._time += 1
n [ j ] = n [ i ]
gb._time += 1
n [ i ] = aux
gb._time += 1
i += 1
gb._time += 1
gb._time += 1
aux = n [ i ]
gb._time += 1
n [ i ] = n [ r ]
gb._time += 1
n [ r ] = aux
gb._time += 1
# Suma la complejidad temporal de la Partición '_time' a la temporal
# complejidad de Quicksort 'time'.
gb.time += gb._time
if ( r > gb._parameters [ len ( gb._parameters ) - 1 ][ 0 ] ):
gb._parameters.append ( ( r, gb._time ) )
gb._time = 0
return i
```
#### File: An-lisis-de-Algoritmos/Practica7/operaciones_matrices.py
```python
def suma ( A, B ):
n = len ( A )
C = [ [ A [ i ] [ j ] + B [ i ] [ j ] for j in range ( n ) ] for i in range ( n ) ]
# Return statement.
return C
def resta ( A, B ):
n = len ( A )
C = [ [ A [ i ] [ j ] - B [ i ] [ j ] for j in range ( n ) ] for i in range ( n ) ]
# Return statement.
return C
```
#### File: An-lisis-de-Algoritmos/Practica9/plagio.py
```python
import collections
def lcs(s1, s2):
# Crea listas a partir de los textos introducidos.
tokens1, tokens2 = s1.split(), s2.split()
cache = collections.defaultdict(dict)
# Empieza a crear las matrices
for i in range(-1, len(tokens1)):
for j in range(-1, len(tokens2)):
if i == -1 or j == -1:
cache[i][j] = 0
else:
if tokens1[i] == tokens2[j]:
cache[i][j] = cache[i - 1][j - 1] + 1
else:
#Devuelve el elemento más grande
cache[i][j] = max(cache[i - 1][j], cache[i][j - 1])
return cache[len(tokens1) - 1][len(tokens2) - 1]
# Lee y guarda el texto de los archivos en una variable
file1=open("prueba51.txt","r")
text1=file1.read()
file2=open("prueba52.txt","r")
text2=file2.read()
# Ejecución de las funciones
original = len(text1.split())
copia = lcs(text1,text2)
print("El número de palabras similares son: ",copia)
# Calculo del porcentaje
porcentaje = float((copia*100)/original)
print("El porcentaje de plagio es: ",round(porcentaje,2), "%")
``` |
{
"source": "JosueHernandezR/Evolutionary_Computing",
"score": 4
} |
#### File: Evolutionary_Computing/Tarea 2/mochila.py
```python
import numpy as np
# Basado en programación dinámica
# Programa para el problema de mochila 0-1
# Devuelve el valor máximo que puede
# ser puesto en una mochila de capacidad W
def knapSack(W, wt, val):
#Filas
n = len (val) + 1
# Columnas
W = W + 1
K = np.zeros((n, W))
# print(n,W)
# Esto se usa para iterar listas (Lists comprehensions)
#K = np.array([[0 for x in range(W+1)] for x in range(n+1)])
# for y in range(W):
# K[0,y] = y
# for x in range(n):
# K[x,0] = x
# print(K)
for i in range(1,n):
# print("Iteración i: ",i)
for w in range(1,W):
# print("Iteración w: ", w)
if i == 0 or w == 0:
K[i][w] = 0
# print("Caso if:", i)
# print(K)
elif wt[i-1] <= w:
K[i][w] = max(val[i-1] + K[i-1][w-wt[i-1]],
K[i-1][w])
# print("Caso else if:", i)
# print(K)
else:
K[i][w] = K[i-1][w]
# print("Caso else:", i)
# print(K)
print("La matriz resultante es: ")
print(K)
return K[n-1][W-1]
val = [20, 10, 5]
wt = [4, 8, 3]
W = 5
print("El resultado es:",knapSack(W, wt, val))
``` |
{
"source": "JosueHernandezR/TTR",
"score": 2
} |
#### File: api_v1/endpoints/answer_option.py
```python
from typing import Any, List
from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.orm import Session
from sqlalchemy.sql.operators import desc_op
from app import crud, models, schemas
from app.api import deps
router = APIRouter()
@router.post("/create-answer-option", response_model=schemas.AnswerOption)
def createAnswerOption(
*,
db: Session = Depends(deps.get_db),
obj_in: schemas.AnswerOptionCreate,
current_user: models.User = Depends(deps.get_current_active_user),
option_id: int,
option_question_id: int,
option_question_survey_id: int,
) -> Any:
answer_option = crud.answer_option.createAnswerOption(
db=db,
obj_in=obj_in,
respondent_id=current_user.id,
option_id=option_id,
option_question_id=option_question_id,
option_question_survey_id=option_question_survey_id,
)
return answer_option
@router.post("/create-result-aceptacion", response_model=schemas.SurveyAceptacion)
def createAceptacion(
*,
db: Session = Depends(deps.get_db),
obj_in: schemas.SurveyAceptacionCreate,
current_user: models.User = Depends(deps.get_current_active_user),
survey_id: int,
) -> Any:
survey = crud.survey.get(db=db, id=survey_id)
survey_weight_max = survey.weight_total
sum_result = crud.answer_option.weight_total_by_survey_and_respondent(
db=db, option_question_survey_id=survey_id,
respondent_id=current_user.id,
)
aceptacion = crud.survey_aceptacion.createSurveyAceptacion(
db=db,
obj_in=obj_in,
survey_id=survey_id,
respondent_id=current_user.id,
sum_weight_answers=sum_result,
survey_weigth_max=survey_weight_max
)
return aceptacion
@router.get("/get-answers-by-survey", response_model=List[schemas.AnswerOption])
def read_answers_by_survey(
option_question_survey_id:int,
current_user: models.User = Depends(deps.get_current_active_user),
db: Session = Depends(deps.get_db),
skip: int = 0,
limit: int = 100,
) -> Any:
answers_by_survey = crud.answer_option.get_multi_by_survey(
db = db,
option_question_survey_id=option_question_survey_id,
skip=skip,
limit=limit
)
if not answers_by_survey:
raise HTTPException(status_code=404, detail="Respuestas no encontradas")
return answers_by_survey
@router.get("/get-answers-by-survey-and-respondent", response_model=List[schemas.AnswerOption])
def read_answers_by_survey_and_respondent(
option_question_survey_id:int,
current_user: models.User = Depends(deps.get_current_active_user),
db: Session = Depends(deps.get_db),
skip: int = 0,
limit: int = 100,
) -> Any:
answers_by_survey_and_respondent = crud.answer_option.get_multi_by_survey_and_respondent(
db= db,
option_question_survey_id=option_question_survey_id,
respondent_id=current_user.id,
skip=skip,
limit=limit,
)
if not answers_by_survey_and_respondent:
raise HTTPException(status_code=404, detail="Respuestas no encontradas")
return answers_by_survey_and_respondent
```
#### File: api_v1/endpoints/question.py
```python
from typing import Any, List
from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.orm import Session
from app import crud, models, schemas
from app.api import deps
router = APIRouter()
@router.post("/", response_model=schemas.Question)
def create_question(
*,
db: Session = Depends(deps.get_db),
question_in: schemas.QuestionCreate,
current_user: models.User = Depends(deps.get_current_active_user),
survey_id: int,
) -> Any:
question = crud.question.create_question(db=db, question=question_in, survey_id=survey_id)
return question
@router.get("/{id}", response_model=schemas.Question)
def read_question(
*,
db: Session = Depends(deps.get_db),
id: int,
current_user: models.User = Depends(deps.get_current_active_user),
) -> Any:
"""
Get question by ID.
"""
question = crud.question.get(db=db, id=id)
if not question:
raise HTTPException(status_code=404, detail="Question not found")
# Desarrollar la funcion de obtener encuesta con el id
return question
@router.put("/{id}", response_model=schemas.Question)
def update_question(
*,
db: Session = Depends(deps.get_db),
id: int,
question_in: schemas.QuestionUpdate,
current_user: models.User = Depends(deps.get_current_active_user),
) -> Any:
"""
Update a question.
"""
question = crud.question.get(db=db, id=id)
if not question:
raise HTTPException(status_code=404, detail="Question not found")
# Desarrollar la funcion de obtener encuesta con el id
question = crud.question.update(db=db, db_obj=question, obj_in=question_in)
return question
@router.delete("/{id}", response_model=schemas.Question)
def delete_question(
*,
db: Session = Depends(deps.get_db),
id: int,
current_user: models.User = Depends(deps.get_current_active_user),
) -> Any:
"""
Delete a question.
"""
question = crud.question.get(db=db, id=id)
if not question:
raise HTTPException(status_code=404, detail="Question not found")
# Desarrollar la funcion de obtener encuesta con el id
question = crud.question.remove(db=db, id=id)
return question
```
#### File: api_v1/endpoints/utils.py
```python
from typing import Any
from fastapi import APIRouter, Depends
from pydantic.networks import EmailStr
from app import models, schemas
from app.api import deps
from app.utils import send_test_email
router = APIRouter()
@router.post("/test-email/", response_model=schemas.Msg, status_code=201)
def test_email(
email_to: EmailStr,
current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
"""
Test emails.
"""
send_test_email(email_to=email_to)
return {"msg": "Test email sent"}
```
#### File: app/crud/crud_question.py
```python
from typing import Dict, Any, Optional, Union, List
from fastapi.encoders import jsonable_encoder
from app import crud, models, schemas
from app.crud.base import CRUDBase
from app.models.question import Question
from app.schemas.question import QuestionCreate, QuestionUpdate
from sqlalchemy.orm import Session
class CRUDQuestion(CRUDBase[Question,QuestionCreate, QuestionUpdate]):
def get_by_question_id(self, db: Session, *, id:int) -> Optional[Question]:
return db.query(self.model).filter(Question.id == id).first()
def create_question(
self, db: Session, *, question: QuestionCreate, survey_id:int
) -> Question:
obj_in_data = jsonable_encoder(question)
db_obj = self.model(**obj_in_data, survey_id = survey_id)
db.add(db_obj)
db.commit()
db.refresh(db_obj)
# for question_option in question.question_options:
# db_question_option = models.Question_option(**question_option, question_id=db_obj.id)
# db.add(db_question_option)
# db.commit()
return db_obj
def get_questions_by_survey(
self,
db: Session,
*,
survey_id: int
) ->List[Question]:
return(
db.query(self.model)
.filter(Question.survey_id == survey_id)
.all()
)
# def update_survey(self, db: Session, *, db_obj: Question, obj_in: Union[QuestionUpdate, Dict[str, Any]],) -> Question:
# if isinstance(obj_in, dict):
# update_data = obj_in
# else:
# update_data = obj_in.dict(exclude_unset=True)
# return super().update(db, db_obj=db_obj, obj_in=update_data)
question = CRUDQuestion(Question)
```
#### File: app/crud/crud_survey_aceptacion.py
```python
from typing import Optional, Union, List, Dict, Any
from fastapi.encoders import jsonable_encoder
from app.crud.base import CRUDBase
from app.models.survey_aceptacion import Survey_Aceptacion
from app.schemas.survey_aceptacion import SurveyAceptacionCreate, SurveyAceptacionUpdate
from sqlalchemy.orm import Session
class CRUDSurveyAceptacion(CRUDBase[Survey_Aceptacion, SurveyAceptacionCreate, SurveyAceptacionUpdate]):
def createSurveyAceptacion(
self,
db: Session,
*,
obj_in: SurveyAceptacionCreate,
survey_id: int,
respondent_id: int,
sum_weight_answers: int,
survey_weigth_max: int
) -> Survey_Aceptacion:
if(sum_weight_answers/survey_weigth_max) > 0.50:
aceptacion = 1
else:
aceptacion = 0
obj_in_data = jsonable_encoder(obj_in)
db_obj = self.model(**obj_in_data, survey_id = survey_id, respondent_id = respondent_id, aceptacion = aceptacion)
db.add(db_obj)
db.commit()
db.refresh(db_obj)
return db_obj
def get_all_aceptacion_by_survey(
self,
db: Session,
*,
survey_id: int,
) -> List[Survey_Aceptacion]:
return(
db.query(self.model)
.filter(Survey_Aceptacion.survey_id == survey_id, Survey_Aceptacion.aceptacion == 1)
.all()
)
def get_all_participants_by_survey(
self,
db: Session,
*,
survey_id: int,
) -> List[Survey_Aceptacion]:
return(
db.query(self.model)
.filter(Survey_Aceptacion.survey_id == survey_id)
.all()
)
survey_aceptacion = CRUDSurveyAceptacion(Survey_Aceptacion)
```
#### File: app/app/ecuaciones.py
```python
from typing import Any, List
import numpy as np
import math
from decimal import Decimal
class Ecuaciones:
n: int = 0 # n = número de iteraciones
resultados: List = [] # vector de resultados de la ecuación general[Ecuación 6]
pcr: Decimal = 0.0 # cálculo por medio de aproximación de pc,r [Ecuación 5]
lambdar: Decimal = 0.0 # cálculo de λr [Ecuación 8]
pndr: Decimal = 0.0 # cálculo del umbral inferior de credibilidad [Ecuación 12]
pnfr: Decimal = 0.0 # cálculo del umbral superior de credibilidad [Ecuación 13]
def __init__(self) -> None:
self.reset()
def factorial(self, numero: int):
if numero <= 0:
return 1
factorial = 1
while numero > 0:
factorial = factorial * numero
numero -= 1
return factorial
def evalua_ecuacion_general(self, r: int, l: int, pn: Decimal):
return (
((self.factorial(r))/((self.factorial(l)) * (self.factorial(r-l)))) *
(math.pow(pn, l)) *
(math.pow((1-pn), (r-l)))
)
def evalua_ecuacion_general_derivada(self, r: int, l: int, pcr: Decimal):
return (
((self.factorial(r))/(self.factorial(l) * self.factorial(r-l))) *
((math.pow(pcr, l) * (r-l) * math.pow((1-pcr), (r-l-1)) * (-1)) +
(math.pow((1-pcr), (r-l)) * l * math.pow(pcr, (l-1))))
)
def set_lamdar(self, pcr: Decimal, r: int) -> None:
m: int = 0
l: int = 0
if (r % 2) == 0:
m = (r / 2) + 1
else:
m = (r + 1) / 2
l = m
for l in np.arange(l, r+1):
self.lambdar += self.evalua_ecuacion_general_derivada(r, l, pcr)
def set_pndr(self, lambdar: Decimal, pcr: Decimal, n: int) -> None:
# Para n subniveles
self.pndr = (pcr) * (1 - (math.pow(lambdar, (-1 * n))))
# Umbral superior de credibilidad
def set_pnfr(self, pndr: Decimal, lamdar: Decimal, n: int) -> None:
# Para n subniveles
self.pnfr = pndr + math.pow(lamdar, (-1 * n))
def set_pcr(self, r: int):
i: Decimal = 0.0
iaux: int = 0
m: int = 0
l: int = 0
primera: int = 1
pr: Decimal = 0.0
praux: Decimal = 0.0
resta: Decimal = -1.0
resta2: Decimal = 0.0
if (r == 2):
self.pcr = 1.0
else:
if ((r % 2) == 0): # Par
if r == 4:
self.pcr = 0.77
elif r == 6:
self.pcr = 0.65
elif r == 8:
self.pcr = 0.60
elif r == 10:
self.pcr = 0.58
elif r == 12:
self.pcr = 0.56
elif r == 14:
self.pcr = 0.55
elif r == 16:
self.pcr = 0.54
elif r == 18:
self.pcr = 0.54
elif r == 20:
self.pcr = 0.53
else:
return "No hay calculos para estos valores"
else:
if ((r % 2) != 0): # Impar
self.pcr = 0.50
def set_resultados_ecuacion_general(self, n: int, pn: Decimal, r: int) -> None: # Ecuacion 6 Grupo de votacion de tamaño r
pr: Decimal = 0.0
aux: Decimal = 0.0
m: int = 0
l: int = 0
i: int = 0
lista_resultados: List = []
if (r % 2) == 0:
m = (r / 2) + 1
else:
m = (r + 1) / 2
l = m
# Calcula eliminacion total
if n == -1:
self.n = 0
while pn > 0.01:
try:
for l in np.arange(l, r + 1):
pr += self.evalua_ecuacion_general(r, l, pn)
lista_resultados.append(str(pr))
self.n += 1
if pn == pr:
break
else:
pn = pr
pr = 0
i += 1
except:
raise Exception("Ocurrio un error :(")
self.resultados = [0] * self.n
i = 0
for elem in lista_resultados:
self.resultados = elem
i += 0
else:
self.resultados = [0] * self.n
for i in np.arange(0, n):
try:
for j in np.arange(l, r + 1):
pr += self.evalua_ecuacion_general(r, j, pn)
#self.resultados[i] = pr
self.resultados.append(pr)
pn = pr
pr = 0
except:
raise Exception("Ocurrio un error :(")
def get_pcr(self):
return self.pcr
def get_lambdar(self):
return self.lambdar
def get_pndr(self):
return self.pndr
def get_pnfr(self):
return self.pnfr
def get_resultados_ecuacion_general(self):
return self.resultados
def get_n(self):
return self.n
def reset(self):
self.n: int = 0 # n = número de iteraciones
self.resultados: List = [] # vector de resultados de la ecuación general[Ecuación 6]
self.pcr: Decimal = 0.0 # cálculo por medio de aproximación de pc,r [Ecuación 5]
self.lambdar: Decimal = 0.0 # cálculo de λr [Ecuación 8]
self.pndr: Decimal = 0.0 # cálculo del umbral inferior de credibilidad [Ecuación 12]
self.pnfr: Decimal = 0.0 # cálculo del umbral superior de credibilidad [Ecuación 13]
``` |
{
"source": "josuehfa/DAASystem",
"score": 2
} |
#### File: CoreSystem/examples/triangulate9.py
```python
from mesher.cgal_mesher import ConstrainedDelaunayTriangulation as CDT
from mesher.cgal_mesher import (
Point, Mesher, make_conforming_delaunay, make_conforming_gabriel, Criteria
)
def main():
cdt = CDT()
va = cdt.insert(Point(100, 269))
vb = cdt.insert(Point(246, 269))
vc = cdt.insert(Point(246, 223))
vd = cdt.insert(Point(303, 223))
ve = cdt.insert(Point(303, 298))
vf = cdt.insert(Point(246, 298))
vg = cdt.insert(Point(246, 338))
vh = cdt.insert(Point(355, 338))
vi = cdt.insert(Point(355, 519))
vj = cdt.insert(Point(551, 519))
vk = cdt.insert(Point(551, 445))
vl = cdt.insert(Point(463, 445))
vm = cdt.insert(Point(463, 377))
vn = cdt.insert(Point(708, 377))
vo = cdt.insert(Point(708, 229))
vp = cdt.insert(Point(435, 229))
vq = cdt.insert(Point(435, 100))
vr = cdt.insert(Point(100, 100))
cdt.insert_constraint(va, vb)
cdt.insert_constraint(vb, vc)
cdt.insert_constraint(vc, vd)
cdt.insert_constraint(vd, ve)
cdt.insert_constraint(ve, vf)
cdt.insert_constraint(vf, vg)
cdt.insert_constraint(vg, vh)
cdt.insert_constraint(vh, vi)
cdt.insert_constraint(vi, vj)
cdt.insert_constraint(vj, vk)
cdt.insert_constraint(vk, vl)
cdt.insert_constraint(vl, vm)
cdt.insert_constraint(vm, vn)
cdt.insert_constraint(vn, vo)
cdt.insert_constraint(vo, vp)
cdt.insert_constraint(vp, vq)
cdt.insert_constraint(vq, vr)
cdt.insert_constraint(vr, va)
vs = cdt.insert(Point(349, 236))
vt = cdt.insert(Point(370, 236))
vu = cdt.insert(Point(370, 192))
vv = cdt.insert(Point(403, 192))
vw = cdt.insert(Point(403, 158))
vx = cdt.insert(Point(349, 158))
cdt.insert_constraint(vs, vt)
cdt.insert_constraint(vt, vu)
cdt.insert_constraint(vu, vv)
cdt.insert_constraint(vv, vw)
cdt.insert_constraint(vw, vx)
cdt.insert_constraint(vx, vs)
vy = cdt.insert(Point(501, 336))
vz = cdt.insert(Point(533, 336))
v1 = cdt.insert(Point(519, 307))
v2 = cdt.insert(Point(484, 307))
cdt.insert_constraint(vy, vz)
cdt.insert_constraint(vz, v1)
cdt.insert_constraint(v1, v2)
cdt.insert_constraint(v2, vy)
print("Number of vertices:", cdt.number_of_vertices())
mesher = Mesher(cdt)
seeds = [
Point(505, 325),
Point(379, 172),
]
mesher.seeds_from(seeds)
make_conforming_delaunay(cdt)
print("Number of vertices:", cdt.number_of_vertices())
make_conforming_gabriel(cdt)
print("Number of vertices:", cdt.number_of_vertices())
mesher.criteria = Criteria(
aspect_bound=0.125,
size_bound=30
)
mesher.refine_mesh()
print("Number of vertices:", cdt.number_of_vertices())
if __name__ == '__main__':
main()
``` |
{
"source": "josuehfa/pyredemet",
"score": 2
} |
#### File: pyredemet/tests/test_pyredemet_TypeError.py
```python
import os
import sys
#import logging
import unittest
sys.path.insert(0, os.path.abspath(".."))
from pyredemet.src.pyredemet import pyredemet
class test_pyredemet(unittest.TestCase):
def test_init(self):
api_key = "banana"
with self.assertRaises(TypeError):
result = pyredemet(api_key=api_key)
server_url = 123
with self.assertRaises(TypeError):
result = pyredemet(api_key='<KEY>', server_url=server_url)
def setUp(self):
self.redemet = pyredemet(api_key = '<KEY>')
def test_get_aerodromos(self):
#API destina à retornar informações de Aeródromos de países disponíveis no banco de dados da REDEMET.
#https://api-redemet.decea.gov.br/aerodromos
#Parâmetros
# Nome Requerido Descrição Valor Padrão Exemplo
# pais Não Nome do país. Brasil Argentina
#Exemplo de Solicitação
# https://api-redemet.decea.gov.br/aerodromos?api_key=SUA_CHAVE_AQUI&pais=Argentina
pais = 123
with self.assertRaises(TypeError):
result = self.redemet.get_aerodromos(pais=pais)
def test_get_aerodromos_status(self):
# GET aerodromos/status
# API destina à retornar status das localidades em cores.
# As cores são obtidas através de avaliação de parâmetros baseados em visibilidade e teto da localidade, conforme tabela abaixo.
# Valor Visibilidade(m) Condição Teto(ft)
# g >= 5000 e >= 1500
# y < 5000 e >= 1500 e/ou < 1500 e > 500
# r < 1500 e/ou < 600
# Endereço de Acesso
# https://api-redemet.decea.gov.br/aerodromos/status
# Parâmetros
# Nome Requerido Descrição Valor Padrão Exemplo
# pais Sim País que se deseja as informações de status.
# Para obter informações de mais de um país, basta informar separados por vígula. BRASIL BRASIL,ARGENTINA
# Exemplo de Solicitação
# https://api-redemet.decea.gov.br/aerodromos/status?api_key=SUA_CHAVE_AQUI
pais = 123
with self.assertRaises(TypeError):
result = self.redemet.get_aerodromos_status(pais=pais)
def test_get_aerodromos_info(self):
# API destina à retornar informações das condições meteorológicas de uma localidade disponível no banco de dados da REDEMET.
# Endereço de Acesso
# https://api-redemet.decea.gov.br/aerodromos/info
# Parâmetros
# Nome Requerido Descrição Valor Padrão Exemplo
# localidade Sim Indicativo de localidade ICAO. Não há SBBR
# metar Não METAR codificado da localidade. sim sim
# taf Não TAF codificado da localidade nao sim
# datahora Não Data no formato (YYYYMMDDHH) Data e hora atual 2019010100
# Exemplo de Solicitação
# https://api-redemet.decea.gov.br/aerodromos/info?api_key=SUA_CHAVE_AQUI&localidade=SBBR&datahora=2019010100
localidade = 123
with self.assertRaises(TypeError):
result = self.redemet.get_aerodromos_info(localidade=localidade)
localidade = 'SBB'
with self.assertRaises(TypeError):
result = self.redemet.get_aerodromos_info(localidade=localidade)
localidade = 'SBBR'
metar = 123
with self.assertRaises(TypeError):
result = self.redemet.get_aerodromos_info(localidade=localidade,metar=metar)
taf = 123
with self.assertRaises(TypeError):
result = self.redemet.get_aerodromos_info(localidade=localidade,taf=taf)
datahora = 123
with self.assertRaises(TypeError):
result = self.redemet.get_aerodromos_info(localidade=localidade,datahora=datahora)
datahora = '123'
with self.assertRaises(TypeError):
result = self.redemet.get_aerodromos_info(localidade=localidade,datahora=datahora)
def test_get_produtos_amdar(self):
# GET produtos/amdar
# API destina à retornar informações do AMDAR
# Endereço de Acesso
# https://api-redemet.decea.gov.br/produtos/amdar
# Parâmetros
# Nome Requerido Descrição Valor Padrão Exemplo
# data Não Data no formato YYYYMMDDHH Data atual 2020051200
# Exemplo de Solicitação
# https://api-redemet.decea.gov.br/produtos/amdar?api_key=SUA_CHAVE_AQUI&data_ini=2020030313&data=2020032415
data = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produtos_amdar(data=data)
data = 'SBB'
with self.assertRaises(TypeError):
result = self.redemet.get_produtos_amdar(data=data)
def test_get_produtos_modelo(self):
# GET produtos/modelo
# API destina à retornar informações de imagens geradas pela modelagem numérica disponíveis na REDEMET.
# Endereço de Acesso
# https://api-redemet.decea.gov.br/produtos/modelo
# Parâmetros
# Nome Requerido Descrição Valor Padrão Exemplo
# modelo Sim wifs Não há wifs
# area Sim x Não há b1
# produto Sim x Não há cb_top
# nivel Sim x Não há 600
# anima Não x Não há 5
# Exemplo de Solicitação
# https://api-redemet.decea.gov.br/produtos/modelo?api_key=SUA_CHAVE_AQUI&modelo=wifs&area=b1&produto=vento-altitude-barb&nivel=600&anima=2
modelo = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produtos_modelo(modelo=modelo)
modelo = 'wifs'
produto = 'incldturb'
nivel = '850'
area = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produtos_modelo(modelo=modelo,area=area,produto=produto,nivel=nivel)
area = 'ab'
with self.assertRaises(TypeError):
result = self.redemet.get_produtos_modelo(modelo=modelo,area=area,produto=produto,nivel=nivel)
modelo = 'wifs'
nivel = '850'
area = 'as'
produto = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produtos_modelo(modelo=modelo,area=area,produto=produto,nivel=nivel)
produto = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_produtos_modelo(modelo=modelo,area=area,produto=produto,nivel=nivel)
modelo = 'wifs'
produto = 'incldturb'
area = 'as'
nivel = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produtos_modelo(modelo=modelo,area=area,produto=produto,nivel=nivel)
nivel = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_produtos_modelo(modelo=modelo,area=area,produto=produto,nivel=nivel)
modelo = 'wifs'
produto = 'incldturb'
nivel = '850'
area = 'as'
anima = '123'
with self.assertRaises(TypeError):
result = self.redemet.get_produtos_modelo(modelo=modelo,area=area,produto=produto,nivel=nivel,anima=anima)
anima = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produtos_modelo(modelo=modelo,area=area,produto=produto,nivel=nivel,anima=anima)
def test_get_produto_radar(self):
# GET produtos/radar
# API destina à retornar imagens de eco de Radar Meteorológico.
# Há disponibilidade de METAR desde 01/01/2006 até a presente data.
# Endereço de Acesso
# https://api-redemet.decea.gov.br/produtos/radar/{tipo}
# Parâmetros
# Nome Requerido Descrição Valor Padrão Exemplo
# tipo Sim Tipo de eco disponíveis
# maxccapi 07km
# data Não Data no formato YYYYMMDDHH Data atual 2020031211
# area Sim Radares disponíveis
# Não há pv
# anima Não Informe a quantidade de ecos de radar que deseja animar.
# A animação tem como referência a opção data como última imagem.
# O valor máximo permitido para a animação é 15. 1 10
# Exemplo de Solicitação
# https://api-redemet.decea.gov.br/produtos/radar/maxcappi?api_key=SUA_CHAVE_AQUI&data=2020032410
area='al'
tipo = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produto_radar(tipo=tipo,area=area)
tipo = 'ab'
with self.assertRaises(TypeError):
result = self.redemet.get_produto_radar(tipo=tipo,area=area)
tipo='maxcappi'
area = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produto_radar(tipo=tipo,area=area)
area = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_produto_radar(tipo=tipo,area=area)
area='al'
tipo='maxcappi'
data = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produto_radar(tipo=tipo,area=area,data=data)
data = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_produto_radar(tipo=tipo,area=area,data=data)
area='al'
tipo='maxcappi'
anima = '123'
with self.assertRaises(TypeError):
result = self.redemet.get_produto_radar(tipo=tipo,area=area,anima=anima)
anima = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produto_radar(tipo=tipo,area=area,anima=anima)
def test_get_produto_satelite(self):
# GET produtos/satelite
# API destina à retornar informações de imagens de satélite disponíveis na REDEMET.
# Endereço de Acesso
# https://api-redemet.decea.gov.br/produtos/satelite/{tipo}
# Parâmetros
# Nome Requerido Descrição Valor Padrão Exemplo
# tipo Sim Tipos disponíveis
# Não há realcada
# data Não Data no formato YYYYMMDDHH Data atual 2020051200
# anima Não Informe a quantidade de imagens que deseja animar.
# A animação tem como referência a opção data como última imagem.
# O valor máximo permitido para a animação é 15. 1 10
# Exemplo de Solicitação
# https://api-redemet.decea.gov.br/produtos/satelite/realcada?api_key=SUA_CHAVE_AQUI&data=2020032114
tipo = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produto_satelite(tipo=tipo)
tipo = 'ab'
with self.assertRaises(TypeError):
result = self.redemet.get_produto_satelite(tipo=tipo)
tipo='realcada'
data = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produto_satelite(tipo=tipo,data=data)
data = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_produto_satelite(tipo=tipo,data=data)
tipo='realcada'
anima = '123'
with self.assertRaises(TypeError):
result = self.redemet.get_produto_satelite(tipo=tipo,anima=anima)
anima = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produto_satelite(tipo=tipo,anima=anima)
def test_get_produto_stsc(self):
# GET produtos/stsc
# API destina à retornar mensagens as informação de ocorrência de trovoada.
# Endereço de Acesso
# https://api-redemet.decea.gov.br/produtos/stsc
# Parâmetros
# Nome Requerido Descrição Valor Padrão Exemplo
# data Não Data no formato YYYYMMDDHH Data atual 2020051200
# anima Não Informe a quantidade de ocorrências que deseja animar.
# A animação tem como referência a opção data como última imagem.
# O valor máximo permitido para a animação é 60. 1 10
# Exemplo de Solicitação
# https://api-redemet.decea.gov.br/produtos/satelite/stsc?api_key=SUA_CHAVE_AQUI&data=2020032114
data = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produto_stsc(data=data)
data = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_produto_stsc(data=data)
tipo='realcada'
anima = '123'
with self.assertRaises(TypeError):
result = self.redemet.get_produto_stsc(anima=anima)
anima = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produto_stsc(anima=anima)
def test_get_mensagens_aviso(self):
# GET mensagens/aviso
# API destina à retornar mensagens Aviso de Aeródromo das localidades disponíveis no banco de dados da REDEMET.
# Há disponibilidade de mensagens desde 01/01/2003 até a presente data.
# Endereço de Acesso
# https://api-redemet.decea.gov.br/mensagens/aviso/{localidades}
# Parâmetros
# Nome Requerido Descrição Valor Padrão Exemplo
# localidades Sim Indicativo de localidade ICAO.
# Quando precisar informar mais de uma localidade, basta informar separado por vírgula sem intervalo. Não há SBBR
# data_ini Não Data no formato YYYYMMDDHH Data atual 2020051200
# data_fim Não Data no formato YYYYMMDDHH Data atual 2020051206
# page_tam Não Número de registros por página 150 100
# Exemplo de Solicitação
# https://api-redemet.decea.gov.br/mensagens/aviso/SBBG?api_key=SUA_CHAVE_AQUI&data_ini=2020030313&data_fim=2020030313
localidades = 123
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_aviso(localidades=localidades)
localidades = 'SBBR, SBCF'
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_aviso(localidades=localidades)
localidades = 'SBBR,SBCF'
data_ini = 123
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_aviso(localidades=localidades,data_ini=data_ini)
data_ini = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_aviso(localidades=localidades,data_ini=data_ini)
data_fim = 123
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_aviso(localidades=localidades,data_fim=data_fim)
data_fim = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_aviso(localidades=localidades,data_fim=data_fim)
def test_get_mensagens_gamet(self):
# GET mensagens/gamet
# API destina à retornar mensagens GAMET dos países disponíveis no banco de dados da REDEMET.
# Endereço de Acesso
# https://api-redemet.decea.gov.br/mensagens/gamet
# Parâmetros
# Nome Requerido Descrição Valor Padrão Exemplo
# pais Sim Nome do País Brasil Argentina
# data_ini Não Data no formato YYYYMMDDHHII Data atual 202005120000
# data_fim Não Data no formato YYYYMMDDHHII Data atual 202005120600
# page_tam Não Número de registros por página 150 100
# Exemplo de Solicitação
# https://api-redemet.decea.gov.br/mensagens/gamet/?api_key=SUA_CHAVE_AQUI&data_ini=202006120300&data_fim=202006120300
pais = 123
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_gamet(pais=pais)
pais = 'Brasil'
data_ini = 123
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_gamet(pais=pais,data_ini=data_ini)
data_ini = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_gamet(pais=pais,data_ini=data_ini)
data_fim = 123
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_gamet(pais=pais,data_fim=data_fim)
data_fim = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_gamet(pais=pais,data_fim=data_fim)
def test_get_mensagens_metar(self):
# GET mensagens/metar
# API destina à retornar mensagens METAR das localidades disponíveis no banco de dados da REDEMET.
# Há disponibilidade de mensagens desde 01/01/2003 até a presente data.
# Endereço de Acesso
# https://api-redemet.decea.gov.br/mensagens/metar/{localidades}
# Parâmetros
# Nome Requerido Descrição Valor Padrão Exemplo
# localidades Sim Indicativo de localidade ICAO.
# Quando precisar informar mais de uma localidade, basta informar separado por vírgula sem intervalo. Não há SBBR
# data_ini Não Data no formato YYYYMMDDHH Data atual 2020051200
# data_fim Não Data no formato YYYYMMDDHH Data atual 2020051206
# page_tam Não Número de registros por página 150 100
# Exemplo de Solicitação
# https://api-redemet.decea.gov.br/mensagens/metar/SBGL,SBBR?api_key=SUA_CHAVE_AQUI&data_ini=2019010100&data_fim=2019010101
localidades = 123
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_metar(localidades=localidades)
localidades = 'SBBR, SBCF'
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_metar(localidades=localidades)
localidades = 'SBBR,SBCF'
data_ini = 123
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_metar(localidades=localidades,data_ini=data_ini)
data_ini = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_metar(localidades=localidades,data_ini=data_ini)
data_ini = '2020050512'
data_fim = 123
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_metar(localidades=localidades,data_ini=data_ini,data_fim=data_fim)
data_fim = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_metar(localidades=localidades,data_ini=data_ini,data_fim=data_fim)
def test_get_mensagens_meteograma(self):
# GET mensagens/meteograma
# API destina à retornar informações de mensagens de METAR, TAF e Aviso de Aeródromo das localidades disponíveis no banco de dados da REDEMET.
# Além de retornar as mensagens acima mencionadas, algumas informações também são disponibilizadas, tais como:
# Decodificação de METAR e TAF da data e hora solicitada
# Informações de até 96 horas passadas com base na data e hora solicitada
# Separação de grupos do METAR
# Há disponibilidade de mensagens desde 01/01/2003 até a presente data.
# Endereço de Acesso
# https://api-redemet.decea.gov.br/mensagens/meteograma/{localidades}
# Parâmetros
# Nome Requerido Descrição Valor Padrão Exemplo
# localidade Sim Indicativo de localidade ICAO.
# Somente será permitido uma localidade por solicitação. Não há SBBR
# data_hora Não Data no formato YYYYMMDDHH Data atual 2020051200
# horas Não Determina quantas horas passadas a partir de data_hora 96 72
# Exemplo de Solicitação
# https://api-redemet.decea.gov.br/mensagens/meteograma/SBBR?api_key=SUA_CHAVE_AQUI&data_hora=2020042114
localidade = 123
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_meteograma(localidade=localidade)
localidade = 'SBBR, SBCF'
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_meteograma(localidade=localidade)
localidade = 'SBBR'
data_hora = 123
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_meteograma(localidade=localidade,data_hora=data_hora)
data_hora = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_meteograma(localidade=localidade,data_hora=data_hora)
def test_get_mensagens_pilot(self):
# GET mensagens/pilot
# API destina à retornar mensagens PILOT das estações disponíveis no banco de dados da REDEMET.
# Endereço de Acesso
# https://api-redemet.decea.gov.br/mensagens/pilot
# Parâmetros
# Nome Requerido Descrição Valor Padrão Exemplo
# estacao Sim Número sinótico da Estação. Não há 83378
# data_ini Não Data no formato YYYYMMDDHH Data atual 2020051200
# data_fim Não Data no formato YYYYMMDDHH Data atual 2020051206
# page_tam Não Número de registros por página 150 100
# Exemplo de Solicitação
# https://api-redemet.decea.gov.br/mensagens/pilot?api_key=SUA_CHAVE_AQUI&estacao=83378&data_ini=2020032912&data_fim=2020032912
estacao = 83378
data_ini = 123
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_pilot(estacao=estacao,data_ini=data_ini)
data_ini = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_pilot(estacao=estacao,data_ini=data_ini)
data_ini = '2020050512'
data_fim = 123
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_pilot(estacao=estacao,data_ini=data_ini,data_fim=data_fim)
data_fim = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_pilot(estacao=estacao,data_ini=data_ini,data_fim=data_fim)
def test_get_mensagens_sigmet(self):
# GET mensagens/sigmet
# API destina à retornar mensagens SIGMET dos países disponíveis no banco de dados da REDEMET.
# Endereço de Acesso
# https://api-redemet.decea.gov.br/mensagens/sigmet
# Parâmetros
# Nome Requerido Descrição Valor Padrão Exemplo
# pais Sim Nome do País Brasil Argentina
# data_ini Não Data no formato YYYYMMDDHHII Data atual 202005120000
# data_fim Não Data no formato YYYYMMDDHHII Data atual 202005120600
# page_tam Não Número de registros por página 150 100
# Exemplo de Solicitação
# https://api-redemet.decea.gov.br/mensagens/sigmet/?api_key=SUA_CHAVE_AQUI&data_ini=202003291200&data_fim=202003291200
pais = 123
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_sigmet(pais=pais)
pais = 'Brasil'
data_ini = 123
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_sigmet(pais=pais,data_ini=data_ini)
data_ini = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_sigmet(pais=pais,data_ini=data_ini)
data_ini = '2020050512'
data_fim = 123
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_sigmet(pais=pais,data_ini=data_ini,data_fim=data_fim)
data_fim = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_sigmet(pais=pais,data_ini=data_ini,data_fim=data_fim)
def test_get_mensagens_taf(self):
# GET mensagens/taf
# API destina à retornar mensagens TAF das localidades disponíveis no banco de dados da REDEMET.
# Há disponibilidade de mensagens desde 01/01/2003 até a presente data.
# Endereço de Acesso
# https://api-redemet.decea.gov.br/mensagens/taf/{localidades}
# Parâmetros
# Nome Requerido Descrição Valor Padrão Exemplo
# localidades Sim Indicativo de localidade ICAO.
# Quando precisar informar mais de uma localidade, basta informar separado por vírgula sem intervalo. Não há SBBR
# data_ini Não Data no formato YYYYMMDDHH Data atual 2020051200
# data_fim Não Data no formato YYYYMMDDHH Data atual 2020051206
# page_tam Não Número de registros por página 150 100
# fim_linha Não Utilizado para formatar o TAF
# Valores possíveis:
# texto: para a quebra de linha com “\n”
# html: para a quebra de linha com “<br \>”
# Não há texto
# Exemplo de Solicitação
# https://api-redemet.decea.gov.br/mensagens/taf/SBBR,SBGL?api_key=SUA_CHAVE_AQUI&data_ini=2020031005&data_fim=2020031005&fim_linha=texto
localidades = 123
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_taf(localidades=localidades)
localidades = 'SBBR, SBCF'
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_taf(localidades=localidades)
localidades = 'SBBR,SBCF'
data_ini = 123
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_taf(localidades=localidades,data_ini=data_ini)
data_ini = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_taf(localidades=localidades,data_ini=data_ini)
data_ini = '2020050512'
data_fim = 123
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_taf(localidades=localidades,data_ini=data_ini,data_fim=data_fim)
data_fim = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_taf(localidades=localidades,data_ini=data_ini,data_fim=data_fim)
fim_linha = 123
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_taf(localidades=localidades,fim_linha=fim_linha)
fim_linha = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_taf(localidades=localidades,fim_linha=fim_linha)
def test_get_mensagens_temp(self):
# GET mensagens/temp
# API destina à retornar mensagens TEMP das estações disponíveis no banco de dados da REDEMET.
# Há disponibilidade de mensagens desde 01/01/2003 até a presente data.
# Endereço de Acesso
# https://api-redemet.decea.gov.br/mensagens/temp
# Parâmetros
# Nome Requerido Descrição Valor Padrão Exemplo
# estacao Sim Número sinótico da Estação.
# Permitido somente uma estação por solicitação Não há 83378
# data_ini Não Data no formato YYYYMMDDHH Data atual 2020051200
# data_fim Não Data no formato YYYYMMDDHH Data atual 2020051206
# page_tam Não Número de registros por página 150 100
# Exemplo de Solicitação
# https://api-redemet.decea.gov.br/mensagens/temp?api_key=SUA_CHAVE_AQUI&estacao=83378&data_ini=2020030912&data_fim=2020030912
estacao = 83378
data_ini = 123
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_temp(estacao=estacao,data_ini=data_ini)
data_ini = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_temp(estacao=estacao,data_ini=data_ini)
data_ini = '2020050512'
data_fim = 123
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_temp(estacao=estacao,data_ini=data_ini,data_fim=data_fim)
data_fim = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_temp(estacao=estacao,data_ini=data_ini,data_fim=data_fim)
``` |
{
"source": "josueischiu12/loginpy",
"score": 2
} |
#### File: loginpy/pags/models.py
```python
from django.db import models
# Create your models here.
class Paginas(models.Model):
"""Model definition for Paginas."""
# TODO: Define fields here
titulo = models.CharField(max_length=200)
contenido = models.TextField()
orden = models.SmallIntegerField(default=0)
creado = models.DateTimeField(auto_now_add=True)
editado = models.DateTimeField(auto_now_add=True)
class Meta:
"""Meta definition for Paginas."""
verbose_name = 'Pagina'
verbose_name_plural = 'Paginas'
ordering = ['orden', 'titulo']
def __str__(self):
"""Unicode representation of Paginas."""
return self.titulo
``` |
{
"source": "josueisonfire/CBBAS_git",
"score": 3
} |
#### File: microblog-0.13/app/models.py
```python
from datetime import datetime
from hashlib import md5
from time import time
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from json import *
import jwt
from app import app, db, login
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(128))
# posts = db.relationship('Post', backref='author', lazy='dynamic')
about_me = db.Column(db.String(140))
last_seen = db.Column(db.DateTime, default=datetime.utcnow)
authenticated = db.Column(db.Boolean, default=False, nullable=False)
# followed = db.relationship(
# 'User', secondary=followers,
# primaryjoin=(followers.c.follower_id == id),
# secondaryjoin=(followers.c.followed_id == id),
# backref=db.backref('followers', lazy='dynamic'), lazy='dynamic')
def __repr__(self):
return '<User {}>'.format(self.username)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def avatar(self, size):
digest = md5(self.email.lower().encode('utf-8')).hexdigest()
return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(
digest, size)
def get_reset_password_token(self, expires_in=600):
return jwt.encode(
{'reset_password': <PASSWORD>, 'exp': time() + expires_in},
app.config['SECRET_KEY'], algorithm='HS256').decode('utf-8')
#to authenticae users.
# acc confirmation expires in 7 days.
# -- must add additional function to send another authentication email.
def generate_account_confirmation_token(self, expires_in=10080):
return jwt.encode({'auth_req': self.id, 'exp': time() + expires_in}, app.config['SECRET_KEY'], algorithm='HS256').decode('utf-8')
@staticmethod
def verify_reset_password_token(token):
try:
id = jwt.decode(token, app.config['SECRET_KEY'], algorithms=['HS256'])['reset_password']
except:
return
return User.query.get(id)
@staticmethod
def verify_account_confirmation_token(token):
try:
id = jwt.decode(token, app.config['SECRET_KEY'], algorithms=['HS256'])['auth_req']
except:
return None
return User.query.get(id)
@login.user_loader
def load_user(id):
return User.query.get(int(id))
# class Post(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# body = db.Column(db.String(140))
# timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
# user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
# def __repr__(self):
# return '<Post {}>'.format(self.body)
'''
Classroom Model for the CBBAS.
Fields:
id: Class ID used to list class models within the database. Primary Key.
instructor: Instructor that runs the class. Initially, will be a one-to-one relationship.
start_date: Start Date of the class.
end_date: end date of the class.
curr_week: The current week fo the class.
attendance: Attendance data. Will be stored in a one-to-many relationship.
class_day_time: the day & time the class is run. one-to-one relationship. Will be implemented in the future.
The initiation form should contain the following information:
The implicit data that is already available are: {Instructor}
Class Name
Class Start Date
Class End Date
Classroom Schedule. {DAYS(multiple), {Start time, End time}}
// meaning: classroom schedule can have multiple days a week in which it runs, and the form must adjust according to the requirements. Each day must have a start and end time, which must also be filled subsequently.
The data editing forms shoudl contain the following imformation:
FORM: ADD STUDENT
<> Student Name (First, Last)
<> Student SBUID
FORM: DELETE STUDENT
<> Student SBUID
FORM: Edit Attendance:
(TBD) -- according to database logic.
'''
class Classroom(db.Model):
id = db.Column(db.Integer, primary_key=True)
start_date = db.Column(db.DateTime)
end_date = db.Column(db.DateTime)
current_week = db.Column(db.Integer)
class_name = db.Column(db.String(70))
# relationships
# instructor: 1:1
# attendance: 1:N
# class_day_time: 1:1
# <?> sudent_list
def __repr__(self):
return '<Class {}>'.format(self.class_name)
'''
Student Model for the CBBAS
'''
class Student(db.Model):
id = db.Column(db.Integer, primary_key=True)
sbuid = db.Column(db.Integer, index=True, unique=True)
first_name = db.Column(db.String(50))
last_name = db.Column(db.String(50))
#relationships
# class_id
# USER
class Instructor(db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(128))
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.