metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "Jeanselme/AdaBoost_Neural_Network",
"score": 4
}
|
#### File: AdaBoost_Neural_Network/NeuralNetwork/Network.py
```python
import numpy as np
from NeuralNetwork.Activation import fSigmoid as fActivation, dSigmoid as dActivation
from NeuralNetwork.Cost import fQuadratic as fCost, dQuadratic as dCost
class NeuralNetwork:
"""
Class of the neural network which works with backpropagation
"""
def __init__(self, dims):
"""
Creates a neural network respecting the different given dimensions,
this should be a list of number, wher the first represents the number of
inputs and the last, the number of outputs.
The neural network will be fully connected
"""
self.layersNumber = len(dims) - 1
self.weights = []
self.biases = []
np.random.seed(42)
for d in range(self.layersNumber):
self.weights.append(np.random.randn(dims[d+1], dims[d]))
self.biases.append(np.random.randn(dims[d+1], 1))
def compute(self, inputs):
"""
Computes the result of the network by propagation
"""
res = inputs
for layer in range(self.layersNumber):
weight = self.weights[layer]
bias = self.biases[layer]
res = fActivation(np.dot(weight, res) + bias)
return res
def backpropagationWeighted(self, inputs, inputsWeights, targets,
learningRate, batchSize, maxIteration):
"""
Computes the backpropagation of the gradient in order to reduce the
quadratic error with a weight for each input
Standard backpropagation is when weights are all equal to one
"""
error, pastError = 0, 0
errorVector, classifiedVector = [], []
for iteration in range(maxIteration):
errorVector, classifiedVector = [], []
# Decrease the learningRate
if iteration > 1 and error > pastError :
learningRate /= 2
pastError = error
# Computes each image
for batch in range(len(targets)//batchSize - 1):
totalDiffWeight = [np.zeros(weight.shape) for weight in self.weights]
totalDiffBias = [np.zeros(bias.shape) for bias in self.biases]
# Computes the difference for each batch
for i in range(batch*batchSize,(batch+1)*batchSize):
# TODO : Change the update of the weight in order to take
# into account inputs weights
diffWeight, diffBias, diffError, classified = self.computeDiff(inputs[i], targets[i])
totalDiffWeight = [totalDiffWeight[j] + diffWeight[j]*inputsWeights[j]
for j in range(len(totalDiffWeight))]
totalDiffBias = [totalDiffBias[j] + diffBias[j]*inputsWeights[j]
for j in range(len(totalDiffBias))]
error += diffError
errorVector.append(diffError)
classifiedVector.append(classified)
# Update weights and biases of each neuron
self.weights = [self.weights[i] - learningRate*totalDiffWeight[i]
for i in range(len(totalDiffWeight))]
self.biases = [self.biases[i] - learningRate*totalDiffBias[i]
for i in range(len(totalDiffBias))]
print("{} / {}".format(iteration+1, maxIteration), end = '\r')
print("\nBackPropagation done !")
return errorVector, classifiedVector
def computeDiff(self, input, target):
"""
Executes the forward and backward propagation for the given data
"""
diffWeight = [np.zeros(weight.shape) for weight in self.weights]
diffBias = [np.zeros(bias.shape) for bias in self.biases]
# Forward
# layerSum contents all the result of nodes
# layerAct = fActivation(layerSum)
layerSum = []
lastRes = input
layerAct = [lastRes]
for layer in range(self.layersNumber):
layerRes = np.dot(self.weights[layer], lastRes) + self.biases[layer]
lastRes = fActivation(layerRes)
layerSum.append(layerRes)
layerAct.append(lastRes)
classified = False
if (np.argmax(lastRes) == np.argmax(target)) :
classified = True
# Backward
diffError = sum(fCost(lastRes, target))
delta = dCost(lastRes, target) * dActivation(lastRes)
diffBias[-1] = delta
diffWeight[-1] = np.dot(delta, layerAct[-2].transpose())
for layer in reversed(range(self.layersNumber-1)):
delta = np.dot(self.weights[layer+1].transpose(), delta) *\
dActivation(layerSum[layer])
diffBias[layer] = delta
diffWeight[layer] = np.dot(delta, layerAct[layer].transpose())
return diffWeight, diffBias, diffError, classified
```
|
{
"source": "Jeanselme/CanonicalAutocorrelationAnalysis",
"score": 3
}
|
#### File: CanonicalAutocorrelationAnalysis/model/hash.py
```python
import numpy as np
from model.caa import CAAModel
class HashCAA(CAAModel):
def transform(self, x):
"""
Transforms the set of points by projecting on the caa
And computing the r2
Arguments:
x {Array} -- Set of points
"""
assert self.caas != {}, "CAA not trained"
projections = []
for c in self.caas:
projections.extend(self.caas[c].projections[:self.number_cell] if self.number_cell is not None else self.caa.projections)
return np.array([p.rSquareProjection(x) for p in projections]).reshape((1, -1))
```
|
{
"source": "Jeanselme/COVID-19",
"score": 3
}
|
#### File: Jeanselme/COVID-19/download_result_ihme.py
```python
import os
import requests, zipfile, io
from us import states
import numpy as np
import pandas as pd
import argparse
# Parse input / ouput
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output', help="Output Path", default ='results')
parser.add_argument('-l', '--last', help="Download only last", action='store_true')
args = parser.parse_args()
if not os.path.isdir(os.path.join(args.output, 'ihme')):
os.mkdir(os.path.join(args.output, 'ihme'))
def fromStateToAbbr(state):
search = states.lookup(state)
if search is not None:
return search.abbr
else:
return np.nan
# Download urls
urls = ['https://ihmecovid19storage.blob.core.windows.net/latest/ihme-covid19.zip']
# Add previous urls
if not args.last:
urls += ["https://ihmecovid19storage.blob.core.windows.net/archive/{:%Y-%m-%d}/ihme-covid19.zip".format(d) for d in pd.date_range(start='2020-03-28', end=pd.Timestamp('today'))]
for url in urls:
print('Download from {}'.format(url))
try:
r = requests.get(url)
z = zipfile.ZipFile(io.BytesIO(r.content))
csv_path = [i.filename for i in z.filelist if '.csv' in i.filename][0]
date = csv_path.split('/')[0].split('.')[0].replace('_', '')
# Parse to have similar format
ihme_predictions = pd.read_csv(z.open(csv_path), parse_dates=True)
ihme_predictions['date'] = ihme_predictions[[c for c in ihme_predictions.columns if 'date' in c][0]] # Change name in some version
ihme_predictions['state'] = ihme_predictions[[c for c in ihme_predictions.columns if 'location' in c][0]].apply(fromStateToAbbr) # Change name in some version
ihme_predictions['pred_deaths'] = ihme_predictions['totdea_mean']
ihme_predictions['pred_hosp'] = ihme_predictions['allbed_mean']
ihme_predictions = ihme_predictions[['state', 'date', 'pred_hosp', 'pred_deaths']].dropna()
# Save per state
for state in ihme_predictions.state.unique():
ihme_predictions[ihme_predictions.state == state][['date', 'pred_deaths', 'pred_hosp']].to_csv(os.path.join(args.output, 'ihme', '{}_{}.csv'.format(state, date)), index = False)
except Exception as e:
print("Format has changed")
print(e)
```
|
{
"source": "Jeanselme/KernelConstrainedKmeans",
"score": 3
}
|
#### File: Jeanselme/KernelConstrainedKmeans/wkckmeans.py
```python
import numpy as np
def weightedKernelConstrainedKmeans(kernel, assignation, constraints = None, weights = None, max_iteration = 100, threshold_certainty = 0):
"""
Compute kernel constrained kmeans
Arguments:
kernel {Array n*n} -- Kernel
assignation {Array n} -- Initial assignation
Keyword Arguments:
constraints {Array n*n} -- Constraints matrix with value between -1 and 1
(default: {None} -- No constraint => Simpel kernel kmeans)
weights {Array n} -- Initial Weights for the different points (default: {None} -- Equal weights)
max_iteration {int} -- Maximum iteration (default: {100})
threshold_certainty {float} -- Level under which we consider to break
a cannot link constraint (take negative value of it)
a must link (take positive)
(default: {0} Any constraint is considered)
Returns:
Assignation - Array n
"""
assignation_cluster, intra_distance, number, base_distance = {}, {}, {}, {}
index = np.arange(len(assignation))
clusters = np.unique(assignation)
iteration, change = 0, True
if weights is None:
weights = np.ones_like(assignation)
while change and iteration < max_iteration:
change = False
np.random.shuffle(index)
# Update cluster centers
for k in clusters:
assignation_cluster[k] = np.multiply((assignation == k), weights).reshape((-1,1))
intra_distance[k] = np.matmul(kernel, assignation_cluster[k])
number[k] = np.sum(assignation_cluster[k])
base_distance[k] = np.dot(assignation_cluster[k].T, intra_distance[k])/(number[k]**2)
for i in index:
previous = assignation[i]
if constraints is None:
possibleClusters = clusters
else:
# Computes possible cluster for the point that does not break any constraint
possibleClusters = [c for c in clusters if
(c not in np.unique(assignation[constraints[i, :] < -threshold_certainty])) and # Cannot link constraint
((c in np.unique(assignation[constraints[i, :] > threshold_certainty])) or # Must link constraint
(len(assignation[constraints[i, :] > threshold_certainty]) == 0))] # In case no constraint
assert len(possibleClusters) > 0, "No cluster respecting constraint"
distance = {k: float(base_distance[k]) - 2*intra_distance[k][i]/number[k] for k in possibleClusters}
assignation[i] = min(distance, key=lambda d: float(distance[d]))
if previous != assignation[i]:
change = True
iteration += 1
return assignation
```
#### File: Jeanselme/KernelConstrainedKmeans/wksckmeans.py
```python
import numpy as np
def weightedKernelSoftConstrainedKmeans(kernel, assignation, constraints = None, penalty = 0.5, weights = None, max_iteration = 100, threshold_certainty = 0):
"""
Compute kernel constrained kmeans with a penalty for broken constraints
Arguments:
kernel {Array n*n} -- Kernel
assignation {Array n} -- Initial assignation
Keyword Arguments:
constraints {Array n*n} -- Constraints matrix with value between -1 and 1
(default: {None} -- No constraint => Simpel kernel kmeans)
weights {Array n} -- Initial Weights for the different points (default: {None} -- Equal weights)
max_iteration {int} -- Maximum iteration (default: {100})
threshold_certainty {float} -- Level under which we consider to break
a cannot link constraint (take negative value of it)
a must link (take positive)
(default: {0} Any constraint is considered)
Returns:
Assignation - Array n
"""
intra_distance, number, base_distance = {}, {}, {}
index = np.arange(len(assignation))
clusters = np.unique(assignation)
if weights is None:
weights = np.ones_like(assignation)
max_distance = np.max([kernel[i, i] + kernel[j, j] - 2 * kernel[i, j] for i in index for j in index[:i]])
for _ in range(max_iteration):
iteration, change = 0, True
# Update cluster centers
for k in clusters:
assignation_cluster = np.multiply((assignation == k), weights).reshape((-1,1))
intra_distance[k] = np.matmul(kernel, assignation_cluster)
number[k] = np.sum(assignation_cluster)
base_distance[k] = np.dot(assignation_cluster.T, intra_distance[k])/(number[k]**2)
assignation_previous = assignation.copy()
# Double loop : centers are fixed but assignation is updated to compute broken constraints
while change and iteration < max_iteration:
change = False
np.random.shuffle(index)
for i in index:
previous = assignation[i]
distance = {k: float(base_distance[k] - 2*intra_distance[k][i]/number[k]) for k in clusters}
if constraints is not None and np.count_nonzero(constraints[i]):
for k in clusters:
# Also add the penalty of putting this points in this cluster
# Computes broken constraints
assignation_cluster = np.multiply((assignation == k), weights).reshape((-1,1))
not_assigned_cluster = np.multiply((assignation != k), weights).reshape((-1,1))
broken_must_link = np.multiply(not_assigned_cluster.T, constraints[i] > threshold_certainty)
broken_cannot_link = np.multiply(assignation_cluster.T, constraints[i] < -threshold_certainty)
# Computes penalty
## Allow to break ML if far away
## Allow to break CL if really close
penalty_ml = np.sum(broken_cannot_link) * max_distance \
- np.dot(broken_must_link, kernel.diagonal()) \
+ 2 * np.dot(broken_must_link, kernel[i, :]) \
- np.sum(broken_must_link) * kernel[i,i]
penalty_cl = np.dot(broken_cannot_link, kernel.diagonal()) \
- 2 * np.dot(broken_cannot_link, kernel[i, :]) \
+ np.sum(broken_cannot_link) * kernel[i,i]
distance[k] += penalty * float(penalty_cl + penalty_ml)
assignation[i] = min(distance, key=lambda d: float(distance[d]))
if previous != assignation[i]:
change = True
iteration += 1
# Stops if no change
if np.array_equal(assignation, assignation_previous):
break
return assignation
```
|
{
"source": "Jeanselme/SklearnTS",
"score": 3
}
|
#### File: SklearnTS/metrics/comparison.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from metrics.histogram import histPlot
from metrics.calibration import calibrationPlot
from metrics.roc import rocPlot, computeEvolutionRoc
from metrics.precision_recall import aprEvolutionPlot, precisionRecallPlot
def rocCompare(listModels, truth, classes = None, **arg_roc):
"""
Plots the different roc for different models
Arguments:
listModels {List of (name, predictions)*} -- Models to display
truth {Dict / List of true labels} -- Ground truth
classes {Dict "+":int, "-":int} -- Classes to consider to plot {Default None ie {+":1, "-":0}}
"""
for reverse in [False, True]:
for log in [False, True]:
plt.figure("Roc")
plt.plot(np.linspace(0, 1, 100), np.linspace(0, 1, 100), 'k--', label="Random")
if reverse:
plt.xlabel('False negative rate')
plt.ylabel('True negative rate')
plt.title('Reverse ROC curve')
else:
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
for (name, predictions) in listModels:
rocPlot(predictions, truth, classes, name, "Roc", reverse, **arg_roc)
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15))
if log:
plt.xscale('log')
plt.grid(alpha = 0.3)
plt.ylim(-0.1, 1.1)
plt.show()
def prCompare(listModels, truth, classes = None):
"""
Plots the different precision recall for different models
Arguments:
listModels {List of (name, predictions)*} -- Models to display
truth {Dict / List of true labels} -- Ground truth
classes {Dict "+":int, "-":int} -- Classes to consider to plot {Default None ie {+":1, "-":0}}
"""
plt.figure("Precision Recall")
for (name, predictions) in listModels:
precisionRecallPlot(predictions, truth, classes, name, "Precision Recall")
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15))
plt.grid(alpha = 0.3)
plt.xlabel('Precision')
plt.ylabel('Recall')
plt.xlim(-0.1, 1.1)
plt.ylim(-0.1, 1.1)
plt.show()
def histCompare(listModels, truth, classes = None, splitPosNeg = False, kde = False):
"""
Plots the different histogram of predictions
Arguments:
listModels {List of (name, predictions)*} -- Models to display
truth {Dict / List of true labels} -- Ground truth
classes {Dict "+":int, "-":int} -- Classes to consider to plot {Default None ie {+":1, "-":0}}
"""
plt.figure("Histogram Probabilities")
plt.xlabel('Predicted Probability')
plt.ylabel('Frequency')
plt.title('Histogram Probabilities')
for (name, predictions) in listModels:
histPlot(predictions, truth, classes, name, "Histogram Probabilities", splitPosNeg, kde)
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15))
plt.show()
def calibrationCompare(listModels, truth, classes = None, n_bins = 5):
"""
Plots the different histogram of predictions
Arguments:
listModels {List of (name, predictions)*} -- Models to display
truth {Dict / List of true labels} -- Ground truth
classes {Dict "+":int, "-":int} -- Classes to consider to plot {Default None ie {+":1, "-":0}}
"""
plt.figure("Calibration")
plt.xlabel('Mean Predicted Value')
plt.ylabel('Fraction Positive')
plt.title('Calibration')
plt.plot(np.linspace(0, 1, 100), np.linspace(0, 1, 100), 'k--', label="Ideal")
for (name, predictions) in listModels:
calibrationPlot(predictions, truth, classes, name, "Calibration", n_bins)
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15))
plt.grid(alpha = 0.3)
plt.show()
def rocEvolutionCompare(listModels, temporalListLabels, classes, percentage = 0.001):
"""
Plots the different histogram of predictions
Arguments:
listModels {List of (name, predictions)*} -- Models to display
temporalListLabels {Dict {time: true labels}} -- Ground truth
classes {Dict "+":int, "-":int} -- Classes to consider to plot {Default None ie {+":1, "-":0}}
"""
aucs = {}
for (name, predictions) in listModels:
aucs[name] = computeEvolutionRoc(temporalListLabels, predictions, classes, percentage)
# AUC
plt.figure("Evolution")
plt.xlabel('Time before event (in minutes)')
plt.ylabel('Evolution')
plt.title('Evolution AUC')
plt.plot([min(temporalListLabels)[0].total_seconds() / 60., max(temporalListLabels)[0].total_seconds() / 60.], [0.5, 0.5], 'k--', label="Random Model")
for name in aucs:
plAuc = plt.plot(aucs[name].index.total_seconds() / 60., aucs[name]["auc"].values, label = name, ls = '--' if "train" in name.lower() else '-')
plt.fill_between(aucs[name].index.total_seconds() / 60., aucs[name]["lower"], aucs[name]["upper"], color=plAuc[0].get_color(), alpha=.2)
plt.gca().invert_xaxis()
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15))
plt.ylim(0.4, 1.1)
plt.grid(alpha = 0.3)
plt.show()
# TPR
for typePlot in ["tnr", "tpr"]:
plt.figure("Evolution {}".format(typePlot))
plt.xlabel('Time before event (in minutes)')
plt.ylabel('Evolution')
plt.title('Evolution {} @{:.2f}% {}'.format(typePlot, percentage * 100, "fnr" if typePlot == "tnr" else "fpr"))
plt.plot([min(temporalListLabels)[0].total_seconds() / 60., max(temporalListLabels)[0].total_seconds() / 60.], [0, 0], 'k--', label="Random Model")
for name in aucs:
plAuc = plt.plot(aucs[name].index.total_seconds() / 60., aucs[name][typePlot].values, label = name, ls = '--' if "train" in name.lower() else '-')
plt.fill_between(aucs[name].index.total_seconds() / 60., aucs[name][typePlot].values - aucs[name][typePlot + '_wilson'], aucs[name][typePlot].values + aucs[name][typePlot + '_wilson'], color=plAuc[0].get_color(), alpha=.2)
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15))
plt.gca().invert_xaxis()
plt.ylim(-0.1, 1.1)
plt.grid(alpha = 0.3)
plt.show()
def prEvolutionCompare(listModels, temporalListLabels, classes):
"""
Plots the different histogram of predictions
Arguments:
listModels {List of (name, predictions)*} -- Models to display
temporalListLabels {Dict {time: true labels}} -- Ground truth
classes {Dict "+":int, "-":int} -- Classes to consider to plot {Default None ie {+":1, "-":0}}
"""
plt.figure("Evolution")
plt.xlabel('Time before event (in minutes)')
plt.ylabel('Evolution')
plt.title('Evolution APR')
for (name, predictions) in listModels:
aprEvolutionPlot(temporalListLabels, predictions, classes, name, "Evolution")
plt.gca().invert_xaxis()
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15))
plt.ylim(-0.1, 1.1)
plt.grid(alpha = 0.3)
plt.show()
def featuresImportanceCompare(listModels, featuresNames, top = None):
"""
Plots the importance that each model assign to each features
Arguments:
listModels {List of (name, features_weights)*} -- Models to display
featuresNames {str list} -- Same size than features_weights
"""
weights_model = {}
for (name, weights) in listModels:
weights_model[name] = {f: w for w, f in zip(weights / np.max(np.abs(weights)), featuresNames)}
weights_model = pd.DataFrame.from_dict(weights_model)
# Sort by mean value of features
weights_model = weights_model.reindex(weights_model.abs().mean(axis = "columns").sort_values().index, axis = 0)
if top is not None:
weights_model = weights_model.iloc[-top:]
plt.figure("Features importance", figsize=(8, max(4.8, len(weights_model) / 5)))
plt.xlabel('Weights')
plt.ylabel('Features')
plt.title('Features importance')
weights_model.plot.barh(ax = plt.gca())
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15))
plt.show()
```
#### File: tests/utils/test_io.py
```python
import unittest
import numpy as np
import pandas as pd
from utils.io import *
class TestIo(unittest.TestCase):
def setUp(self):
self.number_points = 10
self.number_classes = 3
self.dim = 10
self.data = {str(j): pd.DataFrame(np.random.rand(np.random.randint(10, 100), self.dim)) for j in range(self.number_points)}
self.path = "tests/fake_data/"
def test_writeParallel(self):
writeParallel(self.data, self.path, 3)
def test_readParallel(self):
writeParallel(self.data, self.path, 3)
data = readParallel(self.path, 3)
self.assertEqual(len(data), len(self.data))
for d in data:
self.assertEqual(len(data[d]), len(self.data[d]))
def test_extractParallel(self):
writeParallel(self.data, self.path, 3)
data = extractParallel(self.path, lambda x: x.pow(2), 3)
self.assertEqual(len(data), len(self.data))
for d in data:
self.assertEqual(len(data[d]), len(self.data[d]))
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "Jeanselme/SuMo-net",
"score": 3
}
|
#### File: SuMo-net/sumo/sumo_api.py
```python
from dsm.dsm_api import DSMBase
from sumo.sumo_torch import SuMoTorch
import sumo.losses as losses
from sumo.utilities import train_sumo
import torch
import numpy as np
from tqdm import tqdm
class SuMo(DSMBase):
"""
Model API to call for using the method
Preprocess data to shape it to the right format and handle CUDA
"""
def __init__(self, cuda = torch.cuda.is_available(), **params):
self.params = params
self.fitted = False
self.cuda = cuda
def _gen_torch_model(self, inputdim, optimizer):
model = SuMoTorch(inputdim,
**self.params,
optimizer = optimizer).double()
if self.cuda:
model = model.cuda()
return model
def fit(self, x, t, e, vsize = 0.15, val_data = None,
optimizer = "Adam", random_state = 100, **args):
"""
This method is used to train an instance of the sumo model.
Parameters
----------
x: np.ndarray
A numpy array of the input features, \( x \).
t: np.ndarray
A numpy array of the event/censoring times, \( t \).
e: np.ndarray
A numpy array of the event/censoring indicators, \( \delta \).
\( \delta = 1 \) means the event took place.
vsize: float
Amount of data to set aside as the validation set.
val_data: tuple
A tuple of the validation dataset. If passed vsize is ignored.
optimizer: str
The choice of the gradient based optimization method. One of
'Adam', 'RMSProp' or 'SGD'.
random_state: float
random seed that determines how the validation set is chosen.
"""
processed_data = self._preprocess_training_data(x, t, e,
vsize, val_data,
random_state)
x_train, t_train, e_train, x_val, t_val, e_val = processed_data
model = self._gen_torch_model(x_train.size(1), optimizer)
model = train_sumo(model,
x_train, t_train, e_train,
x_val, t_val, e_val, cuda = self.cuda == 2,
**args)
self.torch_model = model.eval()
self.fitted = True
return self
def compute_nll(self, x, t, e):
"""
This method computes the negative log likelihood of the given data.
Parameters
----------
x: np.ndarray
A numpy array of the input features, \( x \).
t: np.ndarray
A numpy array of the event/censoring times, \( t \).
e: np.ndarray
A numpy array of the event/censoring indicators, \( \delta \).
\( \delta = 1 \) means the event took place.
Returns
float: NLL
"""
if not self.fitted:
raise Exception("The model has not been fitted yet. Please fit the " +
"model using the `fit` method on some training data " +
"before calling `_eval_nll`.")
processed_data = self._preprocess_training_data(x, t, e, 0, None, 0)
_, _, _, x_val, t_val, e_val = processed_data
if self.cuda == 2:
x_val, t_val, e_val = x_val.cuda(), t_val.cuda(), e_val.cuda()
loss = losses.total_loss(self.torch_model, x_val, t_val, e_val)
return loss.item()
def predict_survival(self, x, t, risk = None):
"""
This method computes the survival prediction of the given data at times t.
Parameters
----------
x: np.ndarray
A numpy array of the input features, \( x \).
t: float or list
A list of times at which evaluate the model.
Returns
np.array (len(x), len(t)) Survival prediction for each points
"""
x = self._preprocess_test_data(x)
if not isinstance(t, list):
t = [t]
if self.fitted:
scores = []
for t_ in t:
t_ = torch.DoubleTensor([t_] * len(x)).to(x.device)
outcomes = self.torch_model.predict(x, t_)
scores.append(outcomes.detach().cpu().numpy())
return np.concatenate(scores, axis = 1)
else:
raise Exception("The model has not been fitted yet. Please fit the " +
"model using the `fit` method on some training data " +
"before calling `predict_survival`.")
```
|
{
"source": "Jeanselme/VisualizerPipeline",
"score": 3
}
|
#### File: Jeanselme/VisualizerPipeline/pipeline_to_graph.py
```python
import json
from rendering import buildGraph
class Step:
"""
Tree structure used for the eclat algorithm
"""
# Id of the given Node
idNum = -1
def __init__(self, name, hyperparams):
"""
Arguments:
name {str} -- Name of the feature
graph {graph object} -- To Display
"""
self.name = name
self.hyperparams = hyperparams
self.children = []
self.id = Step.idNum
Step.idNum += 1
def addChild(self, child, arg_type):
if child is not None:
self.children.append((arg_type, child))
def __str__(self, move = 0):
string = "| " * move + " + Step {} : {} \n".format(self.id, self.name)
for _, child in self.children:
string += child.__str__(move + 1)
return string
class Pipeline:
"""
DAG with extra information
"""
def __init__(self, filename):
"""
Reads the json and creates the associated pipeline
Arguments:
filename {str} -- File name
"""
with open(filename) as pipeline:
pipeline = json.load(pipeline)
self.id = pipeline['id']
self.dag = self.extractSteps(pipeline['steps'])
def extractSteps(self, steps):
"""
Extracts steps
Arguments:
steps {dict} -- Dictionary of primitives
"""
original = Step('Original data', "D3M Format")
extracted_step = []
# Iterate over steps
for step in steps:
if "primitive" in step:
# Extracts name (only most meaningful part)
hyperparams = step["hyperparams"] if "hyperparams" in step else "No hyperparams"
new_step = Step(step["primitive"]["python_path"].split('.')[3], hyperparams)
extracted_step.append(new_step)
# If have arguments in previous step, update the link between node
if "arguments" in step:
for arg_type in step["arguments"]:
data = step["arguments"][arg_type]["data"]
# Deal with list of parents
if not isinstance(data, list):
data = [data]
for parent in data:
parent = parent.split('.')
if parent[0] == "steps":
extracted_step[int(parent[1])].addChild(new_step, arg_type)
elif parent[0] == "inputs" and int(parent[1]) == 0:
original.addChild(new_step, arg_type)
return original
def visualize(self):
"""
Visualization of the dag
"""
return buildGraph(self.dag, graph = None, explored = [])
```
|
{
"source": "Jeanselme/xgboost-survival-embeddings",
"score": 2
}
|
#### File: xgboost-survival-embeddings/tests/test_meta.py
```python
import pytest
from xgbse import (
XGBSEDebiasedBCE,
XGBSEKaplanNeighbors,
XGBSEKaplanTree,
XGBSEBootstrapEstimator,
)
from xgbse.metrics import concordance_index
from tests.data import get_data
(
X_train,
X_test,
X_valid,
T_train,
T_test,
T_valid,
E_train,
E_test,
E_valid,
y_train,
y_test,
y_valid,
features,
) = get_data()
def is_ci_width_consistent(bootstrap, X):
mean1, high1, low1 = bootstrap.predict(X, return_ci=True, ci_width=0.683)
mean2, high2, low2 = bootstrap.predict(X, return_ci=True, ci_width=0.95)
equal_means = (mean1 == mean2).all().all()
consistent_highs = (high2 >= high1).all().all()
consistent_lows = (low2 <= low1).all().all()
return equal_means & consistent_highs & consistent_lows
@pytest.mark.parametrize(
"model", [XGBSEDebiasedBCE, XGBSEKaplanNeighbors, XGBSEKaplanTree]
)
def test_ci_width_consistency(model):
model = model()
bootstrap = XGBSEBootstrapEstimator(model)
bootstrap.fit(X_train, y_train)
assert is_ci_width_consistent(bootstrap, X_test)
def test_accuracy_improvement():
base_model = XGBSEKaplanTree()
base_model.fit(X_train, y_train)
bootstrap = XGBSEBootstrapEstimator(base_model)
bootstrap.fit(X_train, y_train)
cind_base = concordance_index(y_test, base_model.predict(X_test))
cind_boots = concordance_index(y_test, bootstrap.predict(X_test))
assert cind_boots > cind_base
```
|
{
"source": "jean-sh/pi-sense-visualizer",
"score": 3
}
|
#### File: jean-sh/pi-sense-visualizer/audioExtraction.py
```python
import numpy as np
import cv2
import struct
def calculate_magnitudes(data, frame_count, nb_channels):
"""
Takes audio data in wav format, a frame count and the number of channels
(mono or stereo) and returns an array of magnitude by frequency
"""
if nb_channels == 2: # Strip every other sample point to keep only one channel
data = np.array(struct.unpack('{n}h'.format(n=nb_channels * frame_count), data))[::2]
else:
data = np.array(struct.unpack('{n}h'.format(n=nb_channels * frame_count), data))
windowed_data = np.multiply(data, np.hanning(len(data)))
# Calculate the Fourier Transform coefficients
dft_array = cv2.dft(np.float32(windowed_data))
# Return the power in each frequency
magnitudes = np.add(np.sqrt((dft_array*dft_array).sum(axis=1)), 10)
log_mag = np.log10(magnitudes)
return log_mag
```
#### File: jean-sh/pi-sense-visualizer/matrix.py
```python
from sense_hat import SenseHat
import numpy as np
class Colors:
black = [0, 0, 0]
white = [255, 255, 255]
# Bright
b_magenta = [255, 0, 255]
b_violet = [128, 0, 255]
b_blue = [64, 64, 255]
b_electric = [0, 128, 255]
b_cyan = [0, 255, 255]
b_emerald = [0, 255, 128]
b_green = [0, 255, 0]
b_citrus = [128, 255, 0]
b_yellow = [255, 255, 0]
b_orange = [255, 128, 0]
b_red = [255, 0, 0]
b_fashion = [255, 0, 128]
# Medium
m_magenta = [192, 0, 192]
m_violet = [96, 0, 192]
m_blue = [48, 48, 192]
m_electric = [0, 96, 192]
m_cyan = [0, 192, 192]
m_emerald = [0, 192, 96]
m_green = [0, 192, 0]
m_citrus = [96, 192, 0]
m_yellow = [192, 192, 0]
m_orange = [192, 96, 0]
m_red = [192, 0, 0]
m_fashion = [192, 0, 96]
m_white = [128, 128, 128]
# Dark
d_magenta = [128, 0, 128]
d_violet = [96, 0, 128]
d_blue = [0, 0, 160]
d_electric = [0, 64, 128]
d_cyan = [0, 128, 128]
d_emerald = [0, 128, 64]
d_green = [0, 128, 0]
d_citrus = [64, 128, 0]
d_yellow = [128, 128, 0]
d_orange = [128, 64, 0]
d_red = [128, 0, 0]
d_fashion = [128, 0, 64]
# Desatured
u_magenta = [144, 72, 144]
u_violet = [128, 96, 160]
u_blue = [96, 96, 160]
u_electric = [96, 128, 160]
u_cyan = [96, 160, 160]
u_emerald = [96, 160, 128]
u_green = [96, 160, 96]
u_citrus = [128, 160, 96]
u_yellow = [160, 160, 96]
u_orange = [160, 128, 96]
u_red = [160, 96, 96]
u_fashion = [192, 96, 128]
# Rainbows
rainbow = [d_violet, d_blue, m_cyan, m_green, b_yellow, b_orange, b_red, b_red]
ice_bow = [d_violet, d_blue, m_electric, m_cyan, b_cyan, b_emerald, white, white]
fire_bow = [white, b_yellow, b_yellow, b_orange, m_orange, b_red, b_red, b_red]
pink_bow = [d_red, d_red, u_magenta, m_magenta, b_magenta, b_fashion, b_fashion, white]
reverse_bow = [d_red, m_orange, m_yellow, m_green, b_cyan, b_blue, b_violet, b_violet]
buster_bow = [d_blue, d_electric, m_cyan, m_cyan, m_orange, b_orange, b_orange, b_red]
# Rainbow table
rbow_table = [rainbow, ice_bow, fire_bow, pink_bow, reverse_bow, buster_bow]
class Display(SenseHat):
"""
Class containing additional methods for operating the LED matrix
"""
def __init__(self):
SenseHat.__init__(self)
@staticmethod
def display_64(magnitudes, mode):
"""
Takes an array of 8 magnitudes and a display mode and returns
the corresponding matrix of eight 8-pixel columns
"""
pixels = []
for mag in magnitudes:
if mag > 255:
mag = 255
i = 0
while mag > 31:
pixels.append(np.divide(Colors.rbow_table[mode][i], (2 - (i / 8))))
i += 1
mag -= 32
if mag > 0:
pixels.append(np.floor_divide(Colors.rbow_table[mode][i], (32 / mag)))
i += 1
while i < 8:
pixels.append(Colors.black)
i += 1
return pixels
```
|
{
"source": "jeansignos/testett",
"score": 2
}
|
#### File: jeansignos/testett/config.py
```python
def generate_dm_text(name):
return '''Hey {}, It great to connect
with you on twitter'''.format(name)
scheduler_time = 15 #in minutes
tw_username = "signosdazueira" #change this to yours
```
|
{
"source": "JeansReal/grupo_express_it",
"score": 3
}
|
#### File: doctype/sales_invoice/sales_invoice.py
```python
import frappe
from frappe.model.document import Document
from frappe.utils import in_words
class SalesInvoice(Document):
pass
@frappe.whitelist(allow_guest=False)
def money_in_words(number) -> str:
whole, _, fraction = number.partition('.') # Split the number and the fraction. Even if number is integer
out = '{0} dólares'.format(in_words(whole)[:-1] if whole[-1:] == '1' else in_words(whole)) # Ends with 1 then trim last char
if fraction and fraction[:2] not in ['0', '00']: # same as float(number).is_integer(). check if 2 first digits are zeros
out += ' con {0}/100'.format(fraction[:2] + '0' if len(fraction[:2]) == 1 else fraction[:2]) # Fraction is one digit add a zero
return out.capitalize()
```
#### File: patches/v13_0/sales_invoice_naming.py
```python
import frappe
def execute():
frappe.reload_doctype('Document Naming Rule')
doc = frappe.get_doc({
'doctype': 'Document Naming Rule',
'document_type': 'Sales Invoice',
'priority': 1,
'prefix': 'Recibo No. ',
'prefix_digits': 6,
'counter': 0
})
doc.insert()
```
|
{
"source": "jeantardelli/architecture-patterns-with-python",
"score": 2
}
|
#### File: allocation/adapters/orm.py
```python
import logging
from sqlalchemy import (Table,
MetaData,
Column,
Integer,
String,
Date,
ForeignKey,
event)
from sqlalchemy.orm import mapper, relationship
from allocation.domain import model
logger = logging.getLogger(__name__)
metadata = MetaData()
order_lines = Table(
"order_lines",
metadata,
Column("id", Integer, primary_key=True, autoincrement=True),
Column("sku", String(255)),
Column("qty", Integer, nullable=False),
Column("orderid", String(255)),
)
products = Table(
"products",
metadata,
Column("sku", String(255), primary_key=True),
Column("version_number", Integer, nullable=False, server_default="0"))
batches = Table(
"batches",
metadata,
Column("id", Integer, primary_key=True, autoincrement=True),
Column("reference", String(255)),
Column("sku", ForeignKey("products.sku")),
Column("_purchased_quantity", Integer, nullable=False),
Column("eta", Date, nullable=True),
)
allocations = Table(
"allocations",
metadata,
Column("id", Integer, primary_key=True, autoincrement=True),
Column("orderline_id", ForeignKey("order_lines.id")),
Column("batch_id", ForeignKey("batches.id")),
)
allocations_view = Table(
"allocations_view",
metadata,
Column("orderid", String(255)),
Column("sku", String(255)),
Column("batchref", String(255)),
)
def start_mappers():
logger.info("Starting mappers")
lines_mapper = mapper(model.OrderLine, order_lines)
batches_mapper = mapper(
model.Batch,
batches,
properties={
"_allocations": relationship(
lines_mapper,
secondary=allocations,
collection_class=set,
)})
mapper(
model.Product,
products,
properties={"batches": relationship(batches_mapper)})
@event.listens_for(model.Product, "load")
def receive_load(product, _):
product.events = []
```
#### File: allocation/adapters/repository.py
```python
import abc
from typing import Set
from allocation.adapters import orm
from allocation.domain import model
class AbstractRepository(abc.ABC):
def __init__(self):
self.seen = set() # type: Set[model.Product]
def add(self, product: model.Product):
self._add(product)
self.seen.add(product)
def get(self, sku) -> model.Product:
product = self._get(sku)
if product:
self.seen.add(product)
return product
def get_by_batchref(self, batchref) -> model.Product:
product = self._get_by_batchref(batchref)
if product:
self.seen.add(product)
return product
@abc.abstractmethod
def _add(self, product: model.Product):
raise NotImplementedError
@abc.abstractmethod
def _get(self, sku) -> model.Product:
raise NotImplementedError
def _get_by_batchref(self, batchref) -> model.Product:
raise NotImplementedError
class SqlAlchemyRepository(AbstractRepository):
def __init__(self, session):
super().__init__()
self.session = session
def _add(self, product):
self.session.add(product)
def _get(self, sku):
return self.session.query(model.Product).filter_by(sku=sku).first()
def _get_by_batchref(self, batchref):
return (
self.session.query(model.Product)
.join(model.Batch)
.filter(orm.batches.c.reference == batchref)
.first())
```
#### File: allocation/entrypoints/flask_app.py
```python
from datetime import datetime
from flask import Flask, request, jsonify
from allocation.domain import commands
from allocation.service_layer.handlers import InvalidSku
from allocation import bootstrap, views
app = Flask(__name__)
bus = bootstrap.bootstrap()
@app.route("/add_batch", methods=["POST"])
def add_batch():
eta = request.json["eta"]
if eta is not None:
eta = datetime.fromisoformat(eta).date()
cmd = commands.CreateBatch(
request.json["ref"],
request.json["sku"],
request.json["qty"],
eta)
bus.handle(cmd)
return "OK", 201
@app.route("/allocate", methods=["POST"])
def allocate_endpoint():
try:
cmd = commands.Allocate(
request.json["orderid"],
request.json["sku"],
request.json["qty"])
bus.handle(cmd)
except InvalidSku as e:
return {"message": str(e)}, 400
return "OK", 202
@app.route("/allocations/<orderid>", methods=["GET"])
def allocations_view_endpoint(orderid):
result = views.allocations(orderid, bus.uow)
if not result:
return "not found", 404
return jsonify(result), 200
```
#### File: allocation/service_layer/unit_of_work.py
```python
from __future__ import annotations
import abc
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.session import Session
from allocation import config
from allocation.adapters import repository
from . import messagebus
class AbstractUnitOfWork(abc.ABC):
products: repository.AbstractRepository
def __enter__(self) -> AbstractUnitOfWork:
return self
def __exit__(self, *args):
self.rollback()
def commit(self):
self._commit()
def collect_new_events(self):
for product in self.products.seen:
while product.events:
yield product.events.pop(0)
@abc.abstractmethod
def _commit(self):
raise NotImplementedError
@abc.abstractmethod
def rollback(self):
raise NotImplementedError
DEFAULT_SESSION_FACTORY = sessionmaker(
bind=create_engine(config.get_mysql_uri(),
isolation_level="SERIALIZABLE"))
class SqlAlchemyUnitOfWork(AbstractUnitOfWork):
def __init__(self, session_factory=DEFAULT_SESSION_FACTORY):
self.session_factory = session_factory
def __enter__(self):
self.session = self.session_factory() # type: Session
self.products = repository.SqlAlchemyRepository(self.session)
return super().__enter__()
def __exit__(self, *args):
super().__exit__(*args)
self.session.close()
def _commit(self):
self.session.commit()
def rollback(self):
self.session.rollback()
```
#### File: tests/integration/test_uow.py
```python
import threading
import time
import traceback
import pytest
from typing import List
from allocation.domain import model
from allocation.service_layer import unit_of_work
from ..random_refs import random_sku, random_batchref, random_orderid
pytestmark = pytest.mark.usefixtures("mappers")
def insert_batch(session, ref, sku, qty, eta, product_version=1):
session.execute(
"INSERT INTO products (sku, version_number) VALUES (:sku, :version)",
dict(sku=sku, version=product_version))
session.execute(
"INSERT INTO batches (reference, sku, _purchased_quantity, eta)"
" VALUES (:ref, :sku, :qty, :eta)",
dict(ref=ref, sku=sku, qty=qty, eta=eta))
def get_allocated_batch_ref(session, orderid, sku):
[[orderlineid]] = session.execute(
"SELECT id FROM order_lines WHERE orderid=:orderid AND sku=:sku",
dict(orderid=orderid, sku=sku))
[[batchref]] = session.execute(
"SELECT b.reference FROM allocations AS a JOIN batches AS b ON a.batch_id = b.id"
" WHERE orderline_id=:orderlineid",
dict(orderlineid=orderlineid))
return batchref
def test_uow_can_retrieve_a_batch_and_allocate_to_it(sqlite_session_factory):
session = sqlite_session_factory()
insert_batch(session, "batch01", "HIPSTER-WORKBENCH", 100, None)
session.commit()
uow = unit_of_work.SqlAlchemyUnitOfWork(sqlite_session_factory)
with uow:
product = uow.products.get(sku="HIPSTER-WORKBENCH")
line = model.OrderLine("o1", "HIPSTER-WORKBENCH", 10)
product.allocate(line)
time.sleep(0.2)
uow.commit()
batchref = get_allocated_batch_ref(session, "o1", "HIPSTER-WORKBENCH")
assert batchref == "batch01"
def test_rolls_back_uncommitted_work_by_default(sqlite_session_factory):
uow = unit_of_work.SqlAlchemyUnitOfWork(sqlite_session_factory)
with uow:
insert_batch(uow.session, "batch01", "MEDIUM-PLINTH", 100, None)
new_session = sqlite_session_factory()
rows = list(new_session.execute("SELECT * FROM 'batches'"))
assert rows == []
def test_rolls_back_on_error(sqlite_session_factory):
class MyException(Exception):
pass
uow = unit_of_work.SqlAlchemyUnitOfWork(sqlite_session_factory)
with pytest.raises(MyException):
with uow:
insert_batch(uow.session, "batch01", "LARGE-FORK", 100, None)
raise MyException()
new_session = sqlite_session_factory()
rows = list(new_session.execute("SELECT * FROM 'batches'"))
assert rows == []
def try_to_allocate(orderid, sku, exceptions):
line = model.OrderLine(orderid, sku, 10)
uow = unit_of_work.SqlAlchemyUnitOfWork()
try:
with uow:
product = uow.products.get(sku=sku)
product.allocate(line)
time.sleep(0.5)
uow.commit()
except Exception as e:
print(traceback.format_exc())
exceptions.append(e)
def test_concurrent_updates_to_version_are_not_allowed(mysql_session_factory):
sku, batch = random_sku(), random_batchref()
session = mysql_session_factory()
insert_batch(session, batch, sku, 100, eta=None, product_version=1)
session.commit()
order1, order2 = random_orderid(1), random_orderid(2)
exceptions = [] # type: List[Exception]
try_to_allocate_order1 = lambda : try_to_allocate(order1, sku, exceptions)
try_to_allocate_order2 = lambda : try_to_allocate(order2, sku, exceptions)
thread1 = threading.Thread(target=try_to_allocate_order1)
thread2 = threading.Thread(target=try_to_allocate_order2)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
[[version]] = session.execute(
"SELECT version_number FROM products WHERE sku=:sku", dict(sku=sku))
assert version == 2
orders = session.execute(
"SELECT orderid FROM allocations"
" JOIN batches ON allocations.batch_id = batches.id"
" JOIN order_lines ON allocations.orderline_id = order_lines.id"
" WHERE order_lines.sku=:sku",
dict(sku=sku))
[exception] = exceptions
assert "Deadlock found when trying to get lock" in str(exception)
assert orders.rowcount == 1
with unit_of_work.SqlAlchemyUnitOfWork() as uow:
uow.session.execute("SELECT 1")
```
#### File: tests/unit/test_product.py
```python
from datetime import date, timedelta
from allocation.domain import events
from allocation.domain.model import Product, OrderLine, Batch
today = date.today()
tomorrow = today + timedelta(days=1)
later = tomorrow + timedelta(days=10)
def test_prefers_warehouse_batches_to_shipments():
in_stock_batch = Batch("in-stock-batch", "RETRO-CLOCK", 100, eta=None)
shipment_batch = Batch("shipment-batch", "RETRO-CLOCK", 100, eta=tomorrow)
product = Product(sku="RETRO-CLOCK", batches=[in_stock_batch, shipment_batch])
line = OrderLine("ofer", "RETRO-CLOCK", 10)
product.allocate(line)
assert in_stock_batch.available_quantity == 90
assert shipment_batch.available_quantity == 100
def test_prefers_ealier_batches():
earliest = Batch("speedy-batch", "MINIMALIST-SPOON", 100, eta=today)
medium = Batch("normal-batch", "MINIMALIST-SPOON", 100, eta=tomorrow)
latest = Batch("slow-batch", "MINIMALIST-SPOON", 100, eta=later)
product = Product(sku="MINIMALIST-SPOON", batches=[medium, earliest, latest])
line = OrderLine("order1", "MINIMALIST-SPOON", 10)
product.allocate(line)
assert earliest.available_quantity == 90
assert medium.available_quantity == 100
assert latest.available_quantity == 100
def test_returns_allocated_batch_ref():
in_stock_batch = Batch("in-stock-batch-ref", "HIGHBROW-POSTER", 100, eta=None)
shipment_batch = Batch("shipment-batch-ref", "HIGHBROW-POSTER", 100, eta=tomorrow)
line = OrderLine("oref", "HIGHBROW-POSTER", 10)
product = Product(sku="HIGHBROW-POSTER", batches=[in_stock_batch, shipment_batch])
allocation = product.allocate(line)
assert allocation == in_stock_batch.reference
def test_records_out_of_stock_event_if_cannot_allocate():
batch = Batch("batch1", "SMALL-FORK", 10, eta=today)
product = Product(sku="SMALL-FORK", batches=[batch])
product.allocate(OrderLine("order1", "SMALL-FORK", 10))
allocation = product.allocate(OrderLine("order2", "SMALL-FORK", 1))
assert product.events[-1] == events.OutOfStock(sku="SMALL-FORK")
assert allocation is None
def test_increments_version_number():
line = OrderLine("oref", "SCANDI-PEN", 10)
product = Product(
sku="SCANDI-PEN", batches=[Batch("b1", "SCANDI-PEN", 100, eta=None)])
product.version_number = 7
product.allocate(line)
assert product.version_number == 8
```
|
{
"source": "jeantardelli/data-engineering-with-python",
"score": 3
}
|
#### File: data-engineering-with-python/airflow-dag/airflowclean.py
```python
import datetime as dt
from datetime import timedelta
import pandas as pd
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
def cleanScooter():
"""This functions performs the cleaning taks.
Firtly, it reads the file and drop unnecessary columns (the region ID).
Then convert all the columns to lowercase and change the started_at field
to a datetime data type. Lastly, write the changes to a file.
"""
df = pd.read_csv('../scooter-data/scooter.csv')
df.drop(columns=['region_id'], inplace=True)
df.columns = [column.lower() for column in df.columns]
df['started_at'] = pd.to_datetime(df['started_at'], format='%m/%d/%Y %H:%M')
df.to_csv('../scooter-data/cleanscooter.csv')
def filterData():
"""Reads the cleaned data and filter based on a start and end date."""
df = pd.read_csv('../scooter-data/cleanscooter.csv')
fromdate = '2019-05-23'
todate = '2019-06-03'
tofrom = df[(df['started_at'] > fromdate) & (df['started_at'] < todate)]
tofrom.to_csv('../scooter-data/may23-june3.csv')
default_args = {
'owner': 'jeantardelli',
'start_date': dt.datetime(2020, 12, 2),
'retires': 1,
'retry_delay': dt.timedelta(minutes=5)
}
with DAG('CleanData',
default_args = default_args,
schedule_interval = timedelta(minutes=5)) as dag:
cleanData = PythonOperator(task_id='clean',
python_callable=cleanScooter)
selectData = PythonOperator(task_id='filter',
python_callable=filterData)
copyFile = BashOperator(task_id='copy',
bash_command='cp ~/data-engineering-with-python/scooter-data/may23-june3.csv ~/may23-june3.csv')
cleanData >> selectData >> copyFile
```
|
{
"source": "jeantardelli/math-with-python",
"score": 3
}
|
#### File: math-with-python/calculus-and-differential-equations/numerical-integration.py
```python
import numpy as np
from scipy import integrate
def erf_integrand(t):
"""Represents the integrand of a Gaussian Error Function."""
return np.exp(-t**2)
val_quad, err_quad = integrate.quad(erf_integrand, -1., 1.) # Using QUADPACK
val_quar, err_quar = integrate.quadrature(erf_integrand, -1., 1.) # Using Quadrature
print(val_quad, err_quad)
print(val_quar, err_quar)
```
#### File: math-with-python/data-and-statistics/manipulating-data-frames.py
```python
import pandas as pd
import numpy as np
from numpy.random import default_rng
def transform_function(row):
if row["four"]:
return 0.5 * row["two"]
return row["one"] * row["two"]
rng = default_rng(12345)
three = rng.uniform(-0.2, 1.0, size=100)
three[three < 0] = np.nan
data_frame = pd.DataFrame({
"one": rng.random(size=100),
"two": np.add.accumulate(rng.normal(0, 1, size=100)),
"three": three
})
data_frame["four"] = data_frame["one"] > 0.5
data_frame["five"] = data_frame.apply(transform_function, axis=1)
print(data_frame)
df = data_frame.dropna()
print(df)
```
#### File: math-with-python/finding-optimal-solutions/using-gradient-descent-methods.py
```python
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def descend(func, x0, grad, bounds, tol=1e-8, max_iter=100):
xn = x0
xnm1 = np.inf
grad_xn = grad(x0)
for i in range(max_iter):
if np.linalg.norm(xn - xnm1) < tol:
break
direction = -grad_xn
xnm1 = xn
xn = xn + 0.2 * direction
grad_xn = grad(xn)
yield i, xn, func(xn), grad_xn
def func(x):
return ((x[0] - 0.5)**2 + (x[1] + 0.5)**2)*np.cos(0.5*x[0]*x[1])
def grad(x):
c1 = x[0]**2 - x[0] + x[1]**2 + 0.5
cos_t = np.cos(0.5*x[0]*x[1])
sin_t = np.sin(0.5*x[0]*x[1])
return np.array([
(2*x[0]-1)*cos_t - 0.5*x[1]*c1*sin_t,
(2*x[1]+1)*cos_t - 0.5*x[0]*c1*sin_t])
x_r = np.linspace(-1, 1)
y_r = np.linspace(-2, 2)
x, y = np.meshgrid(x_r, y_r)
z = func([x, y])
surf_fig = plt.figure(tight_layout=True)
surf_ax = surf_fig.add_subplot(projection="3d")
surf_ax.tick_params(axis="both", which="major", labelsize=9)
surf_ax.set(xlabel="x", ylabel="y", zlabel="z")
surf_ax.set_title("Objective function")
surf_ax.plot_surface(x, y, z, alpha=0.7)
x0 = np.array([-0.8, 1.3])
surf_ax.plot([x0[0]], [x0[1]], func(x0), "r*")
cont_fig, cont_ax = plt.subplots()
cont_ax.set(xlabel="x", ylabel="y")
cont_ax.set_title("Contour plot with iterates")
cont_ax.contour(x, y, z, levels=30)
bounds = ((-1, 1), (-2, 2))
xnm1 = x0
for i, xn, fxn, grad_xn in descend(func, x0, grad, bounds):
cont_ax.plot([xnm1[0], xn[0]], [xnm1[1], xn[1]], "k*--")
xnm1, grad_xnm1 = xn, grad_xn
print(f"iterations={i}")
print(f"min val at {xn}")
print(f"min func value = {fxn}")
surf_ax.plot([xn[0]], [xn[1]], func(xn), "r*")
plt.show()
```
#### File: math-with-python/miscellaneous-topics/run.py
```python
import time
import matplotlib.pyplot as plt
from functools import wraps
from mandelbrot.python_mandel import compute_mandel as compute_mandel_py
from mandelbrot.hybrid_mandel import compute_mandel as compute_mandel_hy
from mandelbrot.cython_mandel import compute_mandel as compute_mandel_cy
def timer(func, name):
@wraps(func)
def wrapper(*args, **kwargs):
t_start = time.time()
val = func(*args, **kwargs)
t_end = time.time()
print(f"Time taken for {name}: {t_end - t_start}")
return val
return wrapper
mandel_py = timer(compute_mandel_py, "Python")
mandel_hy = timer(compute_mandel_hy, "Hybrid")
mandel_cy = timer(compute_mandel_cy, "Cython")
Nx = 320
Ny = 240
steps = 255
mandel_py(Nx, Ny, steps)
mandel_hy(Nx, Ny, steps)
vals = mandel_cy(Nx, Ny, steps)
fig, ax = plt.subplots()
ax.imshow(vals.T, extent=(-2.5, 0.5, -1.2, 1.2))
plt.show()
```
|
{
"source": "jeantardelli/python-automation",
"score": 3
}
|
#### File: python-automation/automating-tasks-made-easy/task_with_error_handling_step1.py
```python
import sys
import argparse
def main(number, other_number, output):
result = number / other_number
print(f'The result is {result}', file=output)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-n1', type=int, help='a number', default=1)
parser.add_argument('-n2', type=int, help='another number', default=1)
parser.add_argument('-o', dest='output', type=argparse.FileType('w'),
help='output file', default=sys.stdout)
args = parser.parse_args()
main(args.n1, args.n2, args.output)
```
|
{
"source": "jeantardelli/restful-python-web-services",
"score": 2
}
|
#### File: database-models-interaction/service/app.py
```python
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from models import db
from views import service_blueprint
def create_app(config_filename):
app = Flask(__name__)
app.config.from_object(config_filename)
db.init_app(app)
app.register_blueprint(service_blueprint, url_prefix='/service')
migrate = Migrate(app, db)
return app
app = create_app('config')
```
|
{
"source": "jeantardelli/wargameRepo",
"score": 4
}
|
#### File: wargameRepo/wargame/abstractgameunit.py
```python
import random
from abc import ABC, abstractmethod
from gameuniterror import GameUnitError
from gameutils import print_bold, weighted_random_selection
class AbstractGameUnit(ABC):
"""Abstract class to represent a game character (or a 'unit')
:ivar name: Name of the character (set by subclassess)
:ivar max_hp: Maximum 'hit points' or 'health points' for the unit.
This is set by the subclasses.
:ivar health_meter: Keeps track of the current health of the unit
:ivar enemy: Present enemy of this unit. At any time, it can have only one enemy.
:ivar unit_type: Tells if this is a 'friend' or an 'enemy'
:param name: Accept the name of this game character
.. seealso:: Classes :py:class:`Knight` and :py:class:`OrcRider`
"""
def __init__(self, name=''):
self.name = name
self.max_hp = 0
self.health_meter = 0
self.enemy = None
self.unit_type = None
@abstractmethod
def info(self):
"""Print information about this game unit.
Abstract method. See subclasses for implementation.
"""
def attack(self, enemy):
"""The main logic to 'attack' the enemy unit.
This method handles combat between the player (Knight instance) and the
given enemy (at the moment OrcRider instance). In the combat, one of the
units could get injured or both will scape unhurt. The method reduces the
'health' oh the injured unit by a randomly selected amount.
:param enemy: The enemy to be attacked (instance of subclass of AbstractGameUnit)
.. seealso:: :py:meth:`Knight.acquire_hut`
"""
if not enemy:
print("No enemy to attack")
else:
injured_unit = weighted_random_selection(self, enemy)
injury = random.randint(10, 15)
injured_unit.health_meter = max(injured_unit.health_meter - injury, 0)
print("ATTACK! ", end='')
self.show_health(end=' ')
enemy.show_health(end=' ')
def heal(self, heal_by=2, full_healing=True):
"""Heal the unit replenishing all the hit points
This method is called when you (the player) enters a friendly hut.
:param heal_by: `health_meter`will be updated by this amount if full
healing is not requested.
:param full_healing: Fully heal this unit by resetting the `heal_meter`to
the maximum limit.
.. seealso:: :py:meth:`Knigth.acquire_hut`
"""
if self.health_meter == self.max_hp:
return
if full_healing:
self.health_meter = self.max_hp
else:
self.health_meter += heal_by
if self.health_meter > self.max_hp:
raise GameUnitError("health_meter > max_hp!")
print_bold("You are HEALED!", end=' ')
self.show_health(bold=True)
def reset_health_meter(self):
"""Reset the `health_meter` (assing default hit points)"""
self.health_meter = self.max_hp
def show_health(self, bold=False, end='\n'):
"""Print info on the current health reading of this game unit
The arguments to this method are mainly to customize the message display style.
:param bold: Flag to indicate whether information should be printed in bold
style or normal style.
:param end: Specify how the message should end i.e wheter a new line character
should be appended in the end or you want to add a space or a tab
(for message continuation)
"""
msg = "Health: {0:s} {1:d}".format(self.name, self.health_meter)
if bold:
print_bold(msg, end=end)
else:
print(msg, end=end)
```
#### File: wargameRepo/wargame/attackoftheorcs.py
```python
import sys
import random
from hut import Hut
from knight import Knight
from orcrider import OrcRider
from gameutils import print_bold
if sys.version_info < (3, 0):
print("This code requires Python 3.x and is tested with version 3.7.x")
print("Looks like you are trying to run this using Python version: {0}.{1}"
.format(sys.version_info[0], sys_version_info[1]))
print("Exiting...")
sys.exit(1)
class AttackOfTheOrcs():
"""Main class with the high level logic to play Attack of The Orcs game
:ivar huts: List object to hold instances of `Hut` class.
:ivar player: Represents the player playing this game. This is an instance
of class `Knight` in current implementation.
.. seealso:: :py:meth:`self.play` where the main action happens.
"""
def __init__(self):
self.huts = []
self.player = None
def get_occupants(self):
"""Return a list of occupant types for all huts.
This is mainly used for printing information on current status of the
hut (wheter unoccupied or acquired).
If the occupant is not `None` the occupant type will be 'enemy' or
'friend'. But if there is no occupant or is already 'ACQUIRED' the
occupant_type will display that information instead.
See `Hut.get_occupant_type()` for more details.
Return a list that collects this information from all the huts.
This is a list comprehension example. More on the list comprehension
in a chapter on Performance.
:return: A list containing occupant types (string)
.. seealso: :py:meth:`Hut.get_occupant_type`
"""
return [x.get_occupant_type() for x in self.huts]
def show_game_mission(self):
"""Print the game mission in the console"""
print_bold("Mission:")
print(" 1. Fight with the enemy.")
print(" 2. Bring all the huts in the village under your control")
print("---------------------------------------------------------\n")
def _process_user_choice(self):
"""Process the user input for choice of hut to enter
Returns the hut number to enter based on the user input. This method
makes sure that the hut number user has entered is valid. If not, it
prompts the user to re-enter this information.
:return: hut index to enter.
"""
verifying_choice = True
idx = 0
print("Current occupants: {0:s}".format(', '.join(self.get_occupants())))
while verifying_choice:
user_choice = input("Choose a hut number to enter (1-5): ")
# Handling Exceptions block
try:
idx = int(user_choice)
assert idx > 0
except ValueError as err:
print("Invalid input, args: {0:s}".format(', '.join(err.args)))
continue
except AssertionError as err:
print("Number should be in the range 1-5. Try again")
try:
if self.huts[idx-1].is_acquired:
print("You have already acquired this hut. Try another one.\n"
"<INFO: You can NOT get healed in already acquired hut.>")
else:
verifying_choice = False
except IndexError as err:
print("Number should be in the range 1-5. Try again")
return idx
def _occupy_huts(self):
"""Randomly occupy the huts with one of the options (friend, enemy or 'None')
.. todo::
Here we assume there are exactly 5 huts. As an exercise, make it a user
input. Note that after such change, the unit test is expected to fail!
"""
choice_lst = ['friend', 'enemy', None]
for i in range(5):
computer_choice = random.choice(choice_lst)
if computer_choice == 'enemy':
name = 'enemy-' + str(i+1)
self.huts.append(Hut(i+1, OrcRider(name)))
elif computer_choice == 'friend':
name = 'knight-' + str(i+1)
self.huts.append(Hut(i+1, Knight(name)))
else:
self.huts.append(Hut(i+1, computer_choice))
def setup_game_scenario(self):
"""Create player and huts and then randomly pre-occupy huts.
The huts might be ledt empty as weel. This method also prints the game
mission which could be refactored out of this as an exercise.
.. seealso:: :py:meth: `self.play`,
:py:meth: `self._occupy_huts`
"""
self.player = Knight()
self._occupy_huts()
self.show_game_mission()
self.player.show_health(bold=True)
def play(self):
"""
Workhorse method to play the game.
Controls the high level logic to play the game. this is called from
the main program to begin the game execution.
In summary, this method has the high level logic that does the following
by calling appropriate functionality:
* Set up instance variables for the game
* Accept the user input for hut number to enter
* Attempt to acquire the hut (:py:meth:`Knight.acquire_hut`)
* Determine if the player wins or loses.
.. seealso:: :py:meth: `setup_game_scenario`,
:py:meth:`Knight.acquire_hut`
"""
# Create a Knight instance, create huts and preoccupy them with a game
# character instance (or leave empty)
self.setup_game_scenario()
# Initial setup is done, now the main play logic
acquired_hut_counter = 0
while acquired_hut_counter < 5:
idx = self._process_user_choice()
self.player.acquire_hut(self.huts[idx-1])
if self.player.health_meter <= 0:
print_bold("YOU LOSE :( Better luck next time")
break
if self.huts[idx-1].is_acquired:
acquired_hut_counter += 1
if acquired_hut_counter == 5:
print_bold("Congratulations! YOU WIN!!!")
if __name__ == '__main__':
game = AttackOfTheOrcs()
game.play()
```
#### File: wargame/designpatterns/pythonic_adapter_foreignunitadapter.py
```python
class ForeignUnitAdapter:
"""Generalized adapter class for 'fixing' incompatible interfaces.
:arg adaptee: An instance of the 'adaptee' class. For example, WoodElf
is an adaptee as it has a method 'leap' when we expect
'jump'.
:arg adaptee_method: The method you want the to adapt. Example, when
client calls 'jump' method on the adapter instance, it is
delegated to 'leap' method of the adaptee.
:ivar foreign_unit: The instance of the adaptee class
:ivar jump: Instance variable jump is assigned as the adaptee_method
(e.g. 'leap')
"""
def __init__(self, adaptee, adaptee_method):
self.foreign_unit = adaptee
self.jump = adaptee_method
def __getattr__(self, item):
"""Handle all the undefined attributes the client code expects.
:param item: name of the attribute.
:return: Returns the corresponding attribute of the adaptee instance
(self.foreign_unit).
"""
return getattr(self.foreign_unit, item)
```
#### File: wargame/designpatterns/strategypattern_pythonic_jumpstrategies.py
```python
def can_not_jump():
"""A demo function representing a jump algorithm.
.. note:: No actual agorithm is implemented, it just prints some information.
"""
print("--> CanNotJump.jump: I can not jump")
def power_jump():
"""A demo function representing a jump algorithm.
.. note:: No actual algorithm is implemented, it just prints some information.
"""
print("--> PowerJump.jump: I can jump 100 feet from the ground!")
def horse_jump():
"""A demo function representing a jump algorithm.
.. note:: No actual algorithm is implemented, it just prints some information.
"""
print("--> HorseJump.jump: Jumping my horse.")
```
#### File: wargame/designpatterns/traditional_abstractgameunit.py
```python
from abc import ABC, abstractmethod
from strategypattern_traditional_jumpstrategies import JumpStrategy
class AbstractGameUnit(ABC):
"""Base class for all the game characters.
:arg string name: Name of the game character
:arg JumpStrategy jump_object: Could be an instance of JumpStrategy or
its subclasses. Default to None.
:ivar jump_strategy: Choose the algorithm for jumping.
"""
def __init__(self, name='stranger', jump_object=None):
"""Initializes the AbstractGameUnit object."""
self.jump_strategy = None
self.name = name
self.set_jump_strategy(jump_object)
def set_jump_strategy(self, jump_object=None):
"""Set up the object that defines the jump strategy.
Choose an algorithm that defines the jump behaviour. The algorithm is
represented by a 'strategy object'.
:arg JumpStrategy jump_object: Instance of the class that should handle
how this game unit 'jumps'. Could be an instance of
JumpStrategy or its subclasses. Defaults to None.
"""
if isinstance(jump_object, JumpStrategy):
self.jump_strategy = jump_object
else:
self.jump_strategy = JumpStrategy()
def jump(self):
"""Perform the jump operation (delegated)"""
try:
self.jump_strategy.jump()
except AttributeError as err:
print("Error: AbstractGameUnit.jump: self.jump_strategy: {0}"
"\nError details: {1}".format(self.jump_strategy, err.args))
@abstractmethod
def info(self):
"""Print information aboute this game unit."""
```
#### File: wargame/designpatterns/traditional_dwarffighter.py
```python
from traditional_abstractgameunit import AbstractGameUnit
class DwarfFighter(AbstractGameUnit):
"""Create a DwarfFighter object instance"""
def info(self):
"""Print info about this unit, overrides superclass method."""
print("I am a great dwarf of the eastern foo mountain!")
```
#### File: wargame/designpatterns/traditional_elflord.py
```python
from traditional_abstractgameunit import AbstractGameUnit
class ElfLord(AbstractGameUnit):
"""Create a ElfLord object instance"""
def info(self):
"""Print info about this unit, overrides superclass method."""
print("I am a Elf Lord from far away!")
```
#### File: wargame/designpatterns/traditional_unitfactory_kingdom.py
```python
class Kingdom:
"""Class that uses a 'factory' to get an instance of a game character.
:arg UnitFactory factory: A factory instance used to create new units.
:ivar UnitFactory factory: Represents a factory instance used to create a
new game unit.
.. seealso:: class `UnitFactory`
"""
def __init__(self, factory):
self.factory = factory
def recruit(self, unit_type):
"""Recruit a new game unit, creating it first using a factory.
This method recruits a new unit for the 'kingdom'. First it 'orders' a
unit from the factory instance, then pays the price and updates some
record. The pay_gold and update_record methods are dummy, they just print
some information.
:arg string unit_type: The type (name) of unit requested.
:return: A game unit instance returned by the factory.
"""
unit = self.factory.create_unit(unit_type)
self.pay_gold(unit)
self.update_records(unit)
return unit
def pay_gold(self, something):
"""Pay gold for the recruited unit (dummy method)."""
print("GOLD PAID")
def update_records(self, something):
"""Update some record to reflect new recruit (dummy method)."""
print("Some logic (not shown) to update database of units")
```
#### File: wargame/GUI/hutgame.py
```python
import sys
import random
if sys.version_info < (3, 0):
from Tkinter import Tk, Label, Radiobutton, PhotoImage, IntVar
import tkMessageBox as messagebox
else:
from tkinter import Tk, Label, Radiobutton, PhotoImage, IntVar
from tkinter import messagebox
class HutGame:
def __init__(self, parent):
"""A game where the player selects a hut to rest.
The program initially puts 'enemy' or a 'friend' inside each hut. Some
huts could also be left 'unoccupied'. You are asked to select a hut.
You win if the hut occupant is either a 'friend' or if the hut is not
occupied.
:param parent: the parent tkinter widget
:ivar list huts: list to store occupant types (as strings)
:ivar int hut_width: the width of the application window in pixels
:ivar int hut_height: the height of the application window in pixels
:ivar PhotoImage village_image: background image for the app
:ivar PhotoImage hut_image: the hut image for the radio buttons
:ivar Tk container: the main widget serving as a parent for others. In
this example it is just the main Tk instance.
:ivar str result: the string to declare the result via a messagebox.
"""
self.village_image = PhotoImage(file="jungle_small.gif")
self.hut_image = PhotoImage(file="hut_small.gif")
self.hut_width = 40
self.hut_height = 56
self.container = parent
self.huts = []
self.result = ""
self.occupy_huts()
self.setup()
def occupy_huts(self):
"""Randomly occupy the huts: enemy or friend or keep unoccupied"""
occupants = ['enemy', 'friend', 'unoccupied']
while len(self.huts) < 5:
self.huts = [random.choice(occupants) for _ in range(5)]
print("Hut occupants are: {0}".format(', '.join(self.huts)))
def enter_hut(self, hut_number):
"""Enter the selected hut and determine the winner
This method checks the hut occupant stored in self.huts for the
given hut_number. Depending on the occupant the winner is 'announced'.
:param hut_number: the number assigned to the selected hut
.. seealso:: :py:meth: `occupy_huts`
.. seealso:: the equivalent method in fiel hutgame_mvc.py
"""
print("Entering hut #: {0}".format(hut_number))
hut_occupant = self.huts[hut_number-1]
print("Hut occupant is: {0}".format(hut_occupant))
if hut_occupant == 'enemy':
self.result = "Enemy sighted in Hut # {0}\n\n".format(hut_number)
self.result += "YOU LOSE :( Luck next time!"
elif hut_occupant == 'unoccupied':
self.result = "Hut # {0} is unoccupied!\n\n".format(hut_number)
self.result += "Congratulations! YOU WIN!!!"
else:
self.result = "Friend sighted in Hut # {0}\n\n".format(hut_number)
self.result += "Congratulations! YOU WIN!!!"
# Announce the winner!
self.announce_winner(self.result)
def create_widgets(self):
"""Create various widges in the tkinter main window."""
self.var = IntVar()
self.background_label = Label(self.container, image=self.village_image)
txt = "Select a hut to enter. You win if:\n"
txt += "The hut is unoccupie or the occupant is a friend!"
self.info_label = Label(self.container, text=txt, bg='yellow')
# Create a dictionary for radio button config options.
r_btn_config = {'variable': self.var,
'bg': '#A8884C',
'activebackground': 'yellow',
'image': self.hut_image,
'height': self.hut_height,
'width': self.hut_width,
'command': self.radio_btn_pressed}
self.r1 = Radiobutton(self.container, r_btn_config, value=1)
self.r2 = Radiobutton(self.container, r_btn_config, value=2)
self.r3 = Radiobutton(self.container, r_btn_config, value=3)
self.r4 = Radiobutton(self.container, r_btn_config, value=4)
self.r5 = Radiobutton(self.container, r_btn_config, value=5)
def setup(self):
"""Calls methods to setup the user interface."""
self.create_widgets()
self.setup_layout()
def setup_layout(self):
"""Use the grid geometry manager to place widgets."""
self.container.grid_rowconfigure(1, weight=1)
self.container.grid_columnconfigure(0, weight=1)
self.container.grid_columnconfigure(4, weight=1)
self.background_label.place(x=0, y=0, relwidth=1, relheight=1)
self.info_label.grid(row=0, column=0, columnspan=5, sticky='nsew')
self.r1.grid(row=1, column=0)
self.r2.grid(row=1, column=4)
self.r3.grid(row=2, column=3)
self.r4.grid(row=3, column=0)
self.r5.grid(row=4, column=4)
def announce_winner(self, data):
"""Declare the winner by displaying a tkinter messagebox.
:param data: the data to be 'published'. This could be any object.
"""
messagebox.showinfo("Winner Announcement", message=data)
# Handle Events
def radio_btn_pressed(self):
"""Command callback when radio button is pressed.
.. seealso:: :py:meth: `create_widgets`
"""
self.enter_hut(self.var.get())
if __name__ == '__main__':
# Create Tk instance. This is popularly called 'root'. But let's
# call it mainwin (the 'main window' of the application.)
mainwin = Tk()
WIDTH = 494
HEIGHT = 307
mainwin.geometry("{0}x{1}".format(WIDTH, HEIGHT))
mainwin.resizable(0, 0)
mainwin.title("Attack of the Orcs Game")
game_app = HutGame(mainwin)
mainwin.mainloop()
```
#### File: wargameRepo/wargame/hut.py
```python
from gameutils import print_bold
class Hut():
"""Class to create hut object(s) in the game Attack of the Orcs
:arg int number: Hut number to be assigned
:arg AbstractGameUnit occupant: The new occupant of the Hut
:ivar int number: A number assigned to this hut
:ivar AbstractGameUnit occupant: The occupant of this hut.
Needs to be an instance of the subclass of
`AbstractGameUnit`.
:ivar boolean is_acquired: A boolean flag to indicate if the
hut is acquired. In the current implementation
this is viewed from the player's perspective.
.. seealso:: where it is used --
:py:meth: `attackoftheorcs.AttackOfTheOrcs.setup_game_scenario`
"""
def __init__(self, number, occupant):
self.number = number
self.occupant = occupant
self.is_acquired = False
def acquire(self, new_occupant):
"""Update the occupant of this hut and set is_acquired flag.
Update the occupant instance variable with the parameter new_occupant
and set the is_acquired flag to True.
:arg new_occupant: self.occupant will be updated with this parameter
.. todo:: In the current implementation this is supposed to be called
only bt the `Knight` instance (everything from the player
context). A generalization is to allow anyone to 'acquire'
the hut! In that case, the client code should properly
interpret meaning of `is_acquired` flag!
Otherwise it will be a bug! As an exercise, write a unit
test to catch this and/or make the calling code robust.
"""
self.occupant = new_occupant
self.is_acquired = True
print_bold("GOOD JOB! Hut {0:d} acquired".format(self.number))
def get_occupant_type(self):
"""Return a string giving info on the hut occupant type.
Used only for the printing information on who is present in the
hut. The information it will return depends on the occupant and
can be one of these strings: 'enemy', 'friend', 'ACQUIRED',
'unoccupied'
The logic is as follows: If the hut.occupant is one of the game
characters, it will simply retrieve this info from that instance.
Otherwise determine whether it is acquired or unoccupied.
:return: A string representing the occupant type
.. seealso: :py:meth: `attackoftheorcs.AttackOfTheOrcs.get_occupants`
"""
if self.is_acquired:
occupant_type = 'ACQUIRED'
elif self.occupant is None:
occupant_type = 'unoccupied'
else:
occupant_type = self.occupant.unit_type
return occupant_type
```
|
{
"source": "jeantardelli/web-dev-flask",
"score": 3
}
|
#### File: jeantardelli/web-dev-flask/db-setup.py
```python
import sqlalchemy
from sqlalchemy.ext.declarative import declarative_base
# Define the MySQL engine using MySQL Connector/Python
engine = sqlalchemy.create_engine(
'mysql+mysqlconnector://pyuser:Py@pp4Demo@localhost:3306/sqlalchemy',
echo=True)
engine_dev = sqlalchemy.create_engine(
'mysql+mysqlconnector://pyuser:Py@pp4Demo@localhost:3306/sqlalchemy_dev',
echo=True)
# Define and create the table
Base = declarative_base()
class Subscriber(Base):
__tablename__ = 'subscribers'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
email = sqlalchemy.Column(sqlalchemy.String(64), unique=True)
def __repr__(self):
return "<Subscriber(email='{0}')>".format(self.email)
class Letter(Base):
__tablename__ = 'letters'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
firstname = sqlalchemy.Column(sqlalchemy.String(64))
lastname = sqlalchemy.Column(sqlalchemy.String(64))
email = sqlalchemy.Column(sqlalchemy.String(64))
subject = sqlalchemy.Column(sqlalchemy.String(64))
body = sqlalchemy.Column(sqlalchemy.Text)
def __repr__(self):
return "<Letter(firstname='{0}', lastname='{1}', email='{2}', "\
"subject='{3}', body='{4}')>".format(self.firstname, self.lastname,
self.email, self.subject, self.body)
Base.metadata.create_all(engine)
Base.metadata.create_all(engine_dev)
```
#### File: web-dev-flask/tests/test_subscriber_model.py
```python
import unittest
from app import create_app, db
from app.models import Subscriber, Letter
class SubscriberModelTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_subscribe_registration(self):
s = Subscriber(email='<EMAIL>')
self.assertTrue(s.email is not None)
def test_subscribe_sameuser(self):
s1 = Subscriber(email='<EMAIL>')
s2 = Subscriber(email='<EMAIL>')
db.session.add(s1)
db.session.add(s2)
with self.assertRaises(Exception):
db.session.commit()
def test_subscriber_and_letter_repr(self):
s = Subscriber(email='<EMAIL>')
l = Letter(firstname='example', lastname='example',
subject='Outro', email='<EMAIL>',
body='A new message!')
print("")
print('This is the subscriber __repr__: {0}'.format(s))
print('This is the letter __repr__: {0}'.format(l))
```
|
{
"source": "JeanTheodoro/LibPython",
"score": 3
}
|
#### File: libpython/spam/db.py
```python
from time import sleep
class Session:
contador = 0
users = []
def save(self, user):
Session.contador += 1
user.id = Session.contador
self.users.append(user)
def list(self):
print(self.users)
return self.users
def rool_back(self):
self.users.clear()
def close(self):
pass
class Conection:
def __init__(self):
sleep(2)
def create_session(self):
return Session()
def close(self):
pass
```
#### File: libpython/spam/main.py
```python
class SendOfSpam:
def __init__(self, session, send):
self.session = session
self.send = send
def send_emails(self, remetente, assunto, corpo):
for user in self.session.list():
self.send.enviar(
remetente,
user.email,
assunto,
corpo
)
```
#### File: tests/test_spam/test_github_api.py
```python
from unittest.mock import Mock
import pytest
from libpython import github_api
def test_search_avatar(avatar_url):
url = github_api.search_avatar('JeanTheodoro')
assert avatar_url == url
@pytest.fixture
def avatar_url(mocker):
resp_mock = Mock()
url = 'https://avatars.githubusercontent.com/u/69826260?v=4'
resp_mock.json.return_value = {
'login': 'JeanTheodoro', 'id': 69826260,
'avatar_url': url,
}
get_mock = mocker.patch('libpython.github_api.requests.get')
get_mock.return_value = resp_mock
return url
def test_search_avatar_integration():
url = github_api.search_avatar('JeanTheodoro')
assert 'https://avatars.githubusercontent.com/u/69826260?v=4' == url
```
|
{
"source": "jeanthom/nmigen",
"score": 2
}
|
#### File: nmigen/build/res.py
```python
from collections import OrderedDict
from ..hdl.ast import *
from ..hdl.rec import *
from ..lib.io import *
from .dsl import *
__all__ = ["ResourceError", "ResourceManager"]
class ResourceError(Exception):
pass
class ResourceManager:
def __init__(self, resources, connectors):
self.resources = OrderedDict()
self._requested = OrderedDict()
self._phys_reqd = OrderedDict()
self.connectors = OrderedDict()
self._conn_pins = OrderedDict()
# Constraint lists
self._ports = []
self._clocks = SignalDict()
self.add_resources(resources)
self.add_connectors(connectors)
def add_resources(self, resources):
for res in resources:
if not isinstance(res, Resource):
raise TypeError("Object {!r} is not a Resource".format(res))
if (res.name, res.number) in self.resources:
raise NameError("Trying to add {!r}, but {!r} has the same name and number"
.format(res, self.resources[res.name, res.number]))
self.resources[res.name, res.number] = res
def add_connectors(self, connectors):
for conn in connectors:
if not isinstance(conn, Connector):
raise TypeError("Object {!r} is not a Connector".format(conn))
if (conn.name, conn.number) in self.connectors:
raise NameError("Trying to add {!r}, but {!r} has the same name and number"
.format(conn, self.connectors[conn.name, conn.number]))
self.connectors[conn.name, conn.number] = conn
for conn_pin, plat_pin in conn:
assert conn_pin not in self._conn_pins
self._conn_pins[conn_pin] = plat_pin
def lookup(self, name, number=0):
if (name, number) not in self.resources:
raise ResourceError("Resource {}#{} does not exist"
.format(name, number))
return self.resources[name, number]
def request(self, name, number=0, *, dir=None, xdr=None):
resource = self.lookup(name, number)
if (resource.name, resource.number) in self._requested:
raise ResourceError("Resource {}#{} has already been requested"
.format(name, number))
def merge_options(subsignal, dir, xdr):
if isinstance(subsignal.ios[0], Subsignal):
if dir is None:
dir = dict()
if xdr is None:
xdr = dict()
if not isinstance(dir, dict):
raise TypeError("Directions must be a dict, not {!r}, because {!r} "
"has subsignals"
.format(dir, subsignal))
if not isinstance(xdr, dict):
raise TypeError("Data rate must be a dict, not {!r}, because {!r} "
"has subsignals"
.format(xdr, subsignal))
for sub in subsignal.ios:
sub_dir = dir.get(sub.name, None)
sub_xdr = xdr.get(sub.name, None)
dir[sub.name], xdr[sub.name] = merge_options(sub, sub_dir, sub_xdr)
else:
if dir is None:
dir = subsignal.ios[0].dir
if xdr is None:
xdr = 0
if dir not in ("i", "o", "oe", "io", "-"):
raise TypeError("Direction must be one of \"i\", \"o\", \"oe\", \"io\", "
"or \"-\", not {!r}"
.format(dir))
if dir != subsignal.ios[0].dir and \
not (subsignal.ios[0].dir == "io" or dir == "-"):
raise ValueError("Direction of {!r} cannot be changed from \"{}\" to \"{}\"; "
"direction can be changed from \"io\" to \"i\", \"o\", or "
"\"oe\", or from anything to \"-\""
.format(subsignal.ios[0], subsignal.ios[0].dir, dir))
if not isinstance(xdr, int) or xdr < 0:
raise ValueError("Data rate of {!r} must be a non-negative integer, not {!r}"
.format(subsignal.ios[0], xdr))
return dir, xdr
def resolve(resource, dir, xdr, name, attrs):
for attr_key, attr_value in attrs.items():
if hasattr(attr_value, "__call__"):
attr_value = attr_value(self)
assert attr_value is None or isinstance(attr_value, str)
if attr_value is None:
del attrs[attr_key]
else:
attrs[attr_key] = attr_value
if isinstance(resource.ios[0], Subsignal):
fields = OrderedDict()
for sub in resource.ios:
fields[sub.name] = resolve(sub, dir[sub.name], xdr[sub.name],
name="{}__{}".format(name, sub.name),
attrs={**attrs, **sub.attrs})
return Record([
(f_name, f.layout) for (f_name, f) in fields.items()
], fields=fields, name=name)
elif isinstance(resource.ios[0], (Pins, DiffPairs)):
phys = resource.ios[0]
if isinstance(phys, Pins):
phys_names = phys.names
port = Record([("io", len(phys))], name=name)
if isinstance(phys, DiffPairs):
phys_names = phys.p.names + phys.n.names
port = Record([("p", len(phys)),
("n", len(phys))], name=name)
if dir == "-":
pin = None
else:
pin = Pin(len(phys), dir, xdr=xdr, name=name)
for phys_name in phys_names:
if phys_name in self._phys_reqd:
raise ResourceError("Resource component {} uses physical pin {}, but it "
"is already used by resource component {} that was "
"requested earlier"
.format(name, phys_name, self._phys_reqd[phys_name]))
self._phys_reqd[phys_name] = name
self._ports.append((resource, pin, port, attrs))
if pin is not None and resource.clock is not None:
self.add_clock_constraint(pin.i, resource.clock.frequency)
return pin if pin is not None else port
else:
assert False # :nocov:
value = resolve(resource,
*merge_options(resource, dir, xdr),
name="{}_{}".format(resource.name, resource.number),
attrs=resource.attrs)
self._requested[resource.name, resource.number] = value
return value
def iter_single_ended_pins(self):
for res, pin, port, attrs in self._ports:
if pin is None:
continue
if isinstance(res.ios[0], Pins):
yield pin, port.io, attrs, res.ios[0].invert
def iter_differential_pins(self):
for res, pin, port, attrs in self._ports:
if pin is None:
continue
if isinstance(res.ios[0], DiffPairs):
yield pin, port.p, port.n, attrs, res.ios[0].invert
def should_skip_port_component(self, port, attrs, component):
return False
def iter_ports(self):
for res, pin, port, attrs in self._ports:
if isinstance(res.ios[0], Pins):
if not self.should_skip_port_component(port, attrs, "io"):
yield port.io
elif isinstance(res.ios[0], DiffPairs):
if not self.should_skip_port_component(port, attrs, "p"):
yield port.p
if not self.should_skip_port_component(port, attrs, "n"):
yield port.n
else:
assert False
def iter_port_constraints(self):
for res, pin, port, attrs in self._ports:
if isinstance(res.ios[0], Pins):
if not self.should_skip_port_component(port, attrs, "io"):
yield port.io.name, res.ios[0].map_names(self._conn_pins, res), attrs
elif isinstance(res.ios[0], DiffPairs):
if not self.should_skip_port_component(port, attrs, "p"):
yield port.p.name, res.ios[0].p.map_names(self._conn_pins, res), attrs
if not self.should_skip_port_component(port, attrs, "n"):
yield port.n.name, res.ios[0].n.map_names(self._conn_pins, res), attrs
else:
assert False
def iter_port_constraints_bits(self):
for port_name, pin_names, attrs in self.iter_port_constraints():
if len(pin_names) == 1:
yield port_name, pin_names[0], attrs
else:
for bit, pin_name in enumerate(pin_names):
yield "{}[{}]".format(port_name, bit), pin_name, attrs
def add_clock_constraint(self, clock, frequency):
if not isinstance(clock, Signal):
raise TypeError("Object {!r} is not a Signal".format(clock))
if not isinstance(frequency, (int, float)):
raise TypeError("Frequency must be a number, not {!r}".format(frequency))
if clock in self._clocks:
raise ValueError("Cannot add clock constraint on {!r}, which is already constrained "
"to {} Hz"
.format(clock, self._clocks[clock]))
else:
self._clocks[clock] = float(frequency)
def iter_clock_constraints(self):
# Back-propagate constraints through the input buffer. For clock constraints on pins
# (the majority of cases), toolchains work better if the constraint is defined on the pin
# and not on the buffered internal net; and if the toolchain is advanced enough that
# it considers clock phase and delay of the input buffer, it is *necessary* to define
# the constraint on the pin to match the designer's expectation of phase being referenced
# to the pin.
#
# Constraints on nets with no corresponding input pin (e.g. PLL or SERDES outputs) are not
# affected.
pin_i_to_port = SignalDict()
for res, pin, port, attrs in self._ports:
if hasattr(pin, "i"):
if isinstance(res.ios[0], Pins):
pin_i_to_port[pin.i] = port.io
elif isinstance(res.ios[0], DiffPairs):
pin_i_to_port[pin.i] = port.p
else:
assert False
for net_signal, frequency in self._clocks.items():
port_signal = pin_i_to_port.get(net_signal)
yield net_signal, port_signal, frequency
```
#### File: nmigen/sim/_core.py
```python
__all__ = ["Process", "Timeline"]
class Process:
def __init__(self, *, is_comb):
self.is_comb = is_comb
self.reset()
def reset(self):
self.runnable = self.is_comb
self.passive = True
def run(self):
raise NotImplementedError
class Timeline:
def __init__(self):
self.now = 0.0
self.deadlines = dict()
def reset(self):
self.now = 0.0
self.deadlines.clear()
def at(self, run_at, process):
assert process not in self.deadlines
self.deadlines[process] = run_at
def delay(self, delay_by, process):
if delay_by is None:
run_at = self.now
else:
run_at = self.now + delay_by
self.at(run_at, process)
def advance(self):
nearest_processes = set()
nearest_deadline = None
for process, deadline in self.deadlines.items():
if deadline is None:
if nearest_deadline is not None:
nearest_processes.clear()
nearest_processes.add(process)
nearest_deadline = self.now
break
elif nearest_deadline is None or deadline <= nearest_deadline:
assert deadline >= self.now
if nearest_deadline is not None and deadline < nearest_deadline:
nearest_processes.clear()
nearest_processes.add(process)
nearest_deadline = deadline
if not nearest_processes:
return False
for process in nearest_processes:
process.runnable = True
del self.deadlines[process]
self.now = nearest_deadline
return True
```
#### File: nmigen/sim/_pyrtl.py
```python
import os
import tempfile
from contextlib import contextmanager
from ..hdl import *
from ..hdl.ast import SignalSet
from ..hdl.xfrm import ValueVisitor, StatementVisitor, LHSGroupFilter
from ._core import *
__all__ = ["PyRTLProcess"]
class PyRTLProcess(Process):
pass
class _PythonEmitter:
def __init__(self):
self._buffer = []
self._suffix = 0
self._level = 0
def append(self, code):
self._buffer.append(" " * self._level)
self._buffer.append(code)
self._buffer.append("\n")
@contextmanager
def indent(self):
self._level += 1
yield
self._level -= 1
def flush(self, indent=""):
code = "".join(self._buffer)
self._buffer.clear()
return code
def gen_var(self, prefix):
name = f"{prefix}_{self._suffix}"
self._suffix += 1
return name
def def_var(self, prefix, value):
name = self.gen_var(prefix)
self.append(f"{name} = {value}")
return name
class _Compiler:
def __init__(self, state, emitter):
self.state = state
self.emitter = emitter
class _ValueCompiler(ValueVisitor, _Compiler):
helpers = {
"sign": lambda value, sign: value | sign if value & sign else value,
"zdiv": lambda lhs, rhs: 0 if rhs == 0 else lhs // rhs,
"zmod": lambda lhs, rhs: 0 if rhs == 0 else lhs % rhs,
}
def on_ClockSignal(self, value):
raise NotImplementedError # :nocov:
def on_ResetSignal(self, value):
raise NotImplementedError # :nocov:
def on_AnyConst(self, value):
raise NotImplementedError # :nocov:
def on_AnySeq(self, value):
raise NotImplementedError # :nocov:
def on_Sample(self, value):
raise NotImplementedError # :nocov:
def on_Initial(self, value):
raise NotImplementedError # :nocov:
class _RHSValueCompiler(_ValueCompiler):
def __init__(self, state, emitter, *, mode, inputs=None):
super().__init__(state, emitter)
assert mode in ("curr", "next")
self.mode = mode
# If not None, `inputs` gets populated with RHS signals.
self.inputs = inputs
def on_Const(self, value):
return f"{value.value}"
def on_Signal(self, value):
if self.inputs is not None:
self.inputs.add(value)
if self.mode == "curr":
return f"slots[{self.state.get_signal(value)}].{self.mode}"
else:
return f"next_{self.state.get_signal(value)}"
def on_Operator(self, value):
def mask(value):
value_mask = (1 << len(value)) - 1
return f"({self(value)} & {value_mask})"
def sign(value):
if value.shape().signed:
return f"sign({mask(value)}, {-1 << (len(value) - 1)})"
else: # unsigned
return mask(value)
if len(value.operands) == 1:
arg, = value.operands
if value.operator == "~":
return f"(~{self(arg)})"
if value.operator == "-":
return f"(-{self(arg)})"
if value.operator == "b":
return f"bool({mask(arg)})"
if value.operator == "r|":
return f"({mask(arg)} != 0)"
if value.operator == "r&":
return f"({mask(arg)} == {(1 << len(arg)) - 1})"
if value.operator == "r^":
# Believe it or not, this is the fastest way to compute a sideways XOR in Python.
return f"(format({mask(arg)}, 'b').count('1') % 2)"
if value.operator in ("u", "s"):
# These operators don't change the bit pattern, only its interpretation.
return self(arg)
elif len(value.operands) == 2:
lhs, rhs = value.operands
lhs_mask = (1 << len(lhs)) - 1
rhs_mask = (1 << len(rhs)) - 1
if value.operator == "+":
return f"({sign(lhs)} + {sign(rhs)})"
if value.operator == "-":
return f"({sign(lhs)} - {sign(rhs)})"
if value.operator == "*":
return f"({sign(lhs)} * {sign(rhs)})"
if value.operator == "//":
return f"zdiv({sign(lhs)}, {sign(rhs)})"
if value.operator == "%":
return f"zmod({sign(lhs)}, {sign(rhs)})"
if value.operator == "&":
return f"({self(lhs)} & {self(rhs)})"
if value.operator == "|":
return f"({self(lhs)} | {self(rhs)})"
if value.operator == "^":
return f"({self(lhs)} ^ {self(rhs)})"
if value.operator == "<<":
return f"({sign(lhs)} << {sign(rhs)})"
if value.operator == ">>":
return f"({sign(lhs)} >> {sign(rhs)})"
if value.operator == "==":
return f"({sign(lhs)} == {sign(rhs)})"
if value.operator == "!=":
return f"({sign(lhs)} != {sign(rhs)})"
if value.operator == "<":
return f"({sign(lhs)} < {sign(rhs)})"
if value.operator == "<=":
return f"({sign(lhs)} <= {sign(rhs)})"
if value.operator == ">":
return f"({sign(lhs)} > {sign(rhs)})"
if value.operator == ">=":
return f"({sign(lhs)} >= {sign(rhs)})"
elif len(value.operands) == 3:
if value.operator == "m":
sel, val1, val0 = value.operands
return f"({self(val1)} if {self(sel)} else {self(val0)})"
raise NotImplementedError("Operator '{}' not implemented".format(value.operator)) # :nocov:
def on_Slice(self, value):
return f"(({self(value.value)} >> {value.start}) & {(1 << len(value)) - 1})"
def on_Part(self, value):
offset_mask = (1 << len(value.offset)) - 1
offset = f"(({self(value.offset)} & {offset_mask}) * {value.stride})"
return f"({self(value.value)} >> {offset} & " \
f"{(1 << value.width) - 1})"
def on_Cat(self, value):
gen_parts = []
offset = 0
for part in value.parts:
part_mask = (1 << len(part)) - 1
gen_parts.append(f"(({self(part)} & {part_mask}) << {offset})")
offset += len(part)
if gen_parts:
return f"({' | '.join(gen_parts)})"
return f"0"
def on_Repl(self, value):
part_mask = (1 << len(value.value)) - 1
gen_part = self.emitter.def_var("repl", f"{self(value.value)} & {part_mask}")
gen_parts = []
offset = 0
for _ in range(value.count):
gen_parts.append(f"({gen_part} << {offset})")
offset += len(value.value)
if gen_parts:
return f"({' | '.join(gen_parts)})"
return f"0"
def on_ArrayProxy(self, value):
index_mask = (1 << len(value.index)) - 1
gen_index = self.emitter.def_var("rhs_index", f"{self(value.index)} & {index_mask}")
gen_value = self.emitter.gen_var("rhs_proxy")
if value.elems:
gen_elems = []
for index, elem in enumerate(value.elems):
if index == 0:
self.emitter.append(f"if {gen_index} == {index}:")
else:
self.emitter.append(f"elif {gen_index} == {index}:")
with self.emitter.indent():
self.emitter.append(f"{gen_value} = {self(elem)}")
self.emitter.append(f"else:")
with self.emitter.indent():
self.emitter.append(f"{gen_value} = {self(value.elems[-1])}")
return gen_value
else:
return f"0"
@classmethod
def compile(cls, state, value, *, mode):
emitter = _PythonEmitter()
compiler = cls(state, emitter, mode=mode)
emitter.append(f"result = {compiler(value)}")
return emitter.flush()
class _LHSValueCompiler(_ValueCompiler):
def __init__(self, state, emitter, *, rhs, outputs=None):
super().__init__(state, emitter)
# `rrhs` is used to translate rvalues that are syntactically a part of an lvalue, e.g.
# the offset of a Part.
self.rrhs = rhs
# `lrhs` is used to translate the read part of a read-modify-write cycle during partial
# update of an lvalue.
self.lrhs = _RHSValueCompiler(state, emitter, mode="next", inputs=None)
# If not None, `outputs` gets populated with signals on LHS.
self.outputs = outputs
def on_Const(self, value):
raise TypeError # :nocov:
def on_Signal(self, value):
if self.outputs is not None:
self.outputs.add(value)
def gen(arg):
value_mask = (1 << len(value)) - 1
if value.shape().signed:
value_sign = f"sign({arg} & {value_mask}, {-1 << (len(value) - 1)})"
else: # unsigned
value_sign = f"{arg} & {value_mask}"
self.emitter.append(f"next_{self.state.get_signal(value)} = {value_sign}")
return gen
def on_Operator(self, value):
raise TypeError # :nocov:
def on_Slice(self, value):
def gen(arg):
width_mask = (1 << (value.stop - value.start)) - 1
self(value.value)(f"({self.lrhs(value.value)} & " \
f"{~(width_mask << value.start)} | " \
f"(({arg} & {width_mask}) << {value.start}))")
return gen
def on_Part(self, value):
def gen(arg):
width_mask = (1 << value.width) - 1
offset_mask = (1 << len(value.offset)) - 1
offset = f"(({self.rrhs(value.offset)} & {offset_mask}) * {value.stride})"
self(value.value)(f"({self.lrhs(value.value)} & " \
f"~({width_mask} << {offset}) | " \
f"(({arg} & {width_mask}) << {offset}))")
return gen
def on_Cat(self, value):
def gen(arg):
gen_arg = self.emitter.def_var("cat", arg)
gen_parts = []
offset = 0
for part in value.parts:
part_mask = (1 << len(part)) - 1
self(part)(f"(({gen_arg} >> {offset}) & {part_mask})")
offset += len(part)
return gen
def on_Repl(self, value):
raise TypeError # :nocov:
def on_ArrayProxy(self, value):
def gen(arg):
index_mask = (1 << len(value.index)) - 1
gen_index = self.emitter.def_var("index", f"{self.rrhs(value.index)} & {index_mask}")
if value.elems:
gen_elems = []
for index, elem in enumerate(value.elems):
if index == 0:
self.emitter.append(f"if {gen_index} == {index}:")
else:
self.emitter.append(f"elif {gen_index} == {index}:")
with self.emitter.indent():
self(elem)(arg)
self.emitter.append(f"else:")
with self.emitter.indent():
self(value.elems[-1])(arg)
else:
self.emitter.append(f"pass")
return gen
class _StatementCompiler(StatementVisitor, _Compiler):
def __init__(self, state, emitter, *, inputs=None, outputs=None):
super().__init__(state, emitter)
self.rhs = _RHSValueCompiler(state, emitter, mode="curr", inputs=inputs)
self.lhs = _LHSValueCompiler(state, emitter, rhs=self.rhs, outputs=outputs)
def on_statements(self, stmts):
for stmt in stmts:
self(stmt)
if not stmts:
self.emitter.append("pass")
def on_Assign(self, stmt):
return self.lhs(stmt.lhs)(self.rhs(stmt.rhs))
def on_Switch(self, stmt):
gen_test = self.emitter.def_var("test",
f"{self.rhs(stmt.test)} & {(1 << len(stmt.test)) - 1}")
for index, (patterns, stmts) in enumerate(stmt.cases.items()):
gen_checks = []
if not patterns:
gen_checks.append(f"True")
else:
for pattern in patterns:
if "-" in pattern:
mask = int("".join("0" if b == "-" else "1" for b in pattern), 2)
value = int("".join("0" if b == "-" else b for b in pattern), 2)
gen_checks.append(f"({gen_test} & {mask}) == {value}")
else:
value = int(pattern, 2)
gen_checks.append(f"{gen_test} == {value}")
if index == 0:
self.emitter.append(f"if {' or '.join(gen_checks)}:")
else:
self.emitter.append(f"elif {' or '.join(gen_checks)}:")
with self.emitter.indent():
self(stmts)
def on_Assert(self, stmt):
raise NotImplementedError # :nocov:
def on_Assume(self, stmt):
raise NotImplementedError # :nocov:
def on_Cover(self, stmt):
raise NotImplementedError # :nocov:
@classmethod
def compile(cls, state, stmt):
output_indexes = [state.get_signal(signal) for signal in stmt._lhs_signals()]
emitter = _PythonEmitter()
for signal_index in output_indexes:
emitter.append(f"next_{signal_index} = slots[{signal_index}].next")
compiler = cls(state, emitter)
compiler(stmt)
for signal_index in output_indexes:
emitter.append(f"slots[{signal_index}].set(next_{signal_index})")
return emitter.flush()
class _FragmentCompiler:
def __init__(self, state):
self.state = state
def __call__(self, fragment):
processes = set()
for domain_name, domain_signals in fragment.drivers.items():
domain_stmts = LHSGroupFilter(domain_signals)(fragment.statements)
domain_process = PyRTLProcess(is_comb=domain_name is None)
emitter = _PythonEmitter()
emitter.append(f"def run():")
emitter._level += 1
if domain_name is None:
for signal in domain_signals:
signal_index = self.state.get_signal(signal)
emitter.append(f"next_{signal_index} = {signal.reset}")
inputs = SignalSet()
_StatementCompiler(self.state, emitter, inputs=inputs)(domain_stmts)
for input in inputs:
self.state.add_trigger(domain_process, input)
else:
domain = fragment.domains[domain_name]
clk_trigger = 1 if domain.clk_edge == "pos" else 0
self.state.add_trigger(domain_process, domain.clk, trigger=clk_trigger)
if domain.rst is not None and domain.async_reset:
rst_trigger = 1
self.state.add_trigger(domain_process, domain.rst, trigger=rst_trigger)
for signal in domain_signals:
signal_index = self.state.get_signal(signal)
emitter.append(f"next_{signal_index} = slots[{signal_index}].next")
_StatementCompiler(self.state, emitter)(domain_stmts)
for signal in domain_signals:
signal_index = self.state.get_signal(signal)
emitter.append(f"slots[{signal_index}].set(next_{signal_index})")
# There shouldn't be any exceptions raised by the generated code, but if there are
# (almost certainly due to a bug in the code generator), use this environment variable
# to make backtraces useful.
code = emitter.flush()
if os.getenv("NMIGEN_pysim_dump"):
file = tempfile.NamedTemporaryFile("w", prefix="nmigen_pysim_", delete=False)
file.write(code)
filename = file.name
else:
filename = "<string>"
exec_locals = {"slots": self.state.slots, **_ValueCompiler.helpers}
exec(compile(code, filename, "exec"), exec_locals)
domain_process.run = exec_locals["run"]
processes.add(domain_process)
for subfragment_index, (subfragment, subfragment_name) in enumerate(fragment.subfragments):
if subfragment_name is None:
subfragment_name = "U${}".format(subfragment_index)
processes.update(self(subfragment))
return processes
```
#### File: test/compat/test_coding.py
```python
import unittest
from ...compat import *
from ...compat.genlib.coding import *
from .support import SimCase
class EncCase(SimCase, unittest.TestCase):
class TestBench(Module):
def __init__(self):
self.submodules.dut = Encoder(8)
def test_sizes(self):
self.assertEqual(len(self.tb.dut.i), 8)
self.assertEqual(len(self.tb.dut.o), 3)
self.assertEqual(len(self.tb.dut.n), 1)
def test_run_sequence(self):
seq = list(range(1<<8))
def gen():
for _ in range(256):
if seq:
yield self.tb.dut.i.eq(seq.pop(0))
yield
if (yield self.tb.dut.n):
self.assertNotIn((yield self.tb.dut.i), [1<<i for i in range(8)])
else:
self.assertEqual((yield self.tb.dut.i), 1<<(yield self.tb.dut.o))
self.run_with(gen())
class PrioEncCase(SimCase, unittest.TestCase):
class TestBench(Module):
def __init__(self):
self.submodules.dut = PriorityEncoder(8)
def test_sizes(self):
self.assertEqual(len(self.tb.dut.i), 8)
self.assertEqual(len(self.tb.dut.o), 3)
self.assertEqual(len(self.tb.dut.n), 1)
def test_run_sequence(self):
seq = list(range(1<<8))
def gen():
for _ in range(256):
if seq:
yield self.tb.dut.i.eq(seq.pop(0))
yield
i = yield self.tb.dut.i
if (yield self.tb.dut.n):
self.assertEqual(i, 0)
else:
o = yield self.tb.dut.o
if o > 0:
self.assertEqual(i & 1<<(o - 1), 0)
self.assertGreaterEqual(i, 1<<o)
self.run_with(gen())
class DecCase(SimCase, unittest.TestCase):
class TestBench(Module):
def __init__(self):
self.submodules.dut = Decoder(8)
def test_sizes(self):
self.assertEqual(len(self.tb.dut.i), 3)
self.assertEqual(len(self.tb.dut.o), 8)
self.assertEqual(len(self.tb.dut.n), 1)
def test_run_sequence(self):
seq = list(range(8*2))
def gen():
for _ in range(256):
if seq:
i = seq.pop()
yield self.tb.dut.i.eq(i//2)
yield self.tb.dut.n.eq(i%2)
yield
i = yield self.tb.dut.i
o = yield self.tb.dut.o
if (yield self.tb.dut.n):
self.assertEqual(o, 0)
else:
self.assertEqual(o, 1<<i)
self.run_with(gen())
class SmallPrioEncCase(SimCase, unittest.TestCase):
class TestBench(Module):
def __init__(self):
self.submodules.dut = PriorityEncoder(1)
def test_sizes(self):
self.assertEqual(len(self.tb.dut.i), 1)
self.assertEqual(len(self.tb.dut.o), 1)
self.assertEqual(len(self.tb.dut.n), 1)
def test_run_sequence(self):
seq = list(range(1))
def gen():
for _ in range(5):
if seq:
yield self.tb.dut.i.eq(seq.pop(0))
yield
i = yield self.tb.dut.i
if (yield self.tb.dut.n):
self.assertEqual(i, 0)
else:
o = yield self.tb.dut.o
if o > 0:
self.assertEqual(i & 1<<(o - 1), 0)
self.assertGreaterEqual(i, 1<<o)
self.run_with(gen())
```
#### File: nmigen/test/test_build_res.py
```python
from .. import *
from ..hdl.rec import *
from ..lib.io import *
from ..build.dsl import *
from ..build.res import *
from .utils import *
class ResourceManagerTestCase(FHDLTestCase):
def setUp(self):
self.resources = [
Resource("clk100", 0, DiffPairs("H1", "H2", dir="i"), Clock(100e6)),
Resource("clk50", 0, Pins("K1"), Clock(50e6)),
Resource("user_led", 0, Pins("A0", dir="o")),
Resource("i2c", 0,
Subsignal("scl", Pins("N10", dir="o")),
Subsignal("sda", Pins("N11"))
)
]
self.connectors = [
Connector("pmod", 0, "B0 B1 B2 B3 - -"),
]
self.cm = ResourceManager(self.resources, self.connectors)
def test_basic(self):
self.cm = ResourceManager(self.resources, self.connectors)
self.assertEqual(self.cm.resources, {
("clk100", 0): self.resources[0],
("clk50", 0): self.resources[1],
("user_led", 0): self.resources[2],
("i2c", 0): self.resources[3]
})
self.assertEqual(self.cm.connectors, {
("pmod", 0): self.connectors[0],
})
def test_add_resources(self):
new_resources = [
Resource("user_led", 1, Pins("A1", dir="o"))
]
self.cm.add_resources(new_resources)
self.assertEqual(self.cm.resources, {
("clk100", 0): self.resources[0],
("clk50", 0): self.resources[1],
("user_led", 0): self.resources[2],
("i2c", 0): self.resources[3],
("user_led", 1): new_resources[0]
})
def test_lookup(self):
r = self.cm.lookup("user_led", 0)
self.assertIs(r, self.cm.resources["user_led", 0])
def test_request_basic(self):
r = self.cm.lookup("user_led", 0)
user_led = self.cm.request("user_led", 0)
self.assertIsInstance(user_led, Pin)
self.assertEqual(user_led.name, "user_led_0")
self.assertEqual(user_led.width, 1)
self.assertEqual(user_led.dir, "o")
ports = list(self.cm.iter_ports())
self.assertEqual(len(ports), 1)
self.assertEqual(list(self.cm.iter_port_constraints()), [
("user_led_0__io", ["A0"], {})
])
def test_request_with_dir(self):
i2c = self.cm.request("i2c", 0, dir={"sda": "o"})
self.assertIsInstance(i2c, Record)
self.assertIsInstance(i2c.sda, Pin)
self.assertEqual(i2c.sda.dir, "o")
def test_request_tristate(self):
i2c = self.cm.request("i2c", 0)
self.assertEqual(i2c.sda.dir, "io")
ports = list(self.cm.iter_ports())
self.assertEqual(len(ports), 2)
scl, sda = ports
self.assertEqual(ports[1].name, "i2c_0__sda__io")
self.assertEqual(ports[1].width, 1)
self.assertEqual(list(self.cm.iter_single_ended_pins()), [
(i2c.scl, scl, {}, False),
(i2c.sda, sda, {}, False),
])
self.assertEqual(list(self.cm.iter_port_constraints()), [
("i2c_0__scl__io", ["N10"], {}),
("i2c_0__sda__io", ["N11"], {})
])
def test_request_diffpairs(self):
clk100 = self.cm.request("clk100", 0)
self.assertIsInstance(clk100, Pin)
self.assertEqual(clk100.dir, "i")
self.assertEqual(clk100.width, 1)
ports = list(self.cm.iter_ports())
self.assertEqual(len(ports), 2)
p, n = ports
self.assertEqual(p.name, "clk100_0__p")
self.assertEqual(p.width, clk100.width)
self.assertEqual(n.name, "clk100_0__n")
self.assertEqual(n.width, clk100.width)
self.assertEqual(list(self.cm.iter_differential_pins()), [
(clk100, p, n, {}, False),
])
self.assertEqual(list(self.cm.iter_port_constraints()), [
("clk100_0__p", ["H1"], {}),
("clk100_0__n", ["H2"], {}),
])
def test_request_inverted(self):
new_resources = [
Resource("cs", 0, PinsN("X0")),
Resource("clk", 0, DiffPairsN("Y0", "Y1")),
]
self.cm.add_resources(new_resources)
sig_cs = self.cm.request("cs")
sig_clk = self.cm.request("clk")
port_cs, port_clk_p, port_clk_n = self.cm.iter_ports()
self.assertEqual(list(self.cm.iter_single_ended_pins()), [
(sig_cs, port_cs, {}, True),
])
self.assertEqual(list(self.cm.iter_differential_pins()), [
(sig_clk, port_clk_p, port_clk_n, {}, True),
])
def test_request_raw(self):
clk50 = self.cm.request("clk50", 0, dir="-")
self.assertIsInstance(clk50, Record)
self.assertIsInstance(clk50.io, Signal)
ports = list(self.cm.iter_ports())
self.assertEqual(len(ports), 1)
self.assertIs(ports[0], clk50.io)
def test_request_raw_diffpairs(self):
clk100 = self.cm.request("clk100", 0, dir="-")
self.assertIsInstance(clk100, Record)
self.assertIsInstance(clk100.p, Signal)
self.assertIsInstance(clk100.n, Signal)
ports = list(self.cm.iter_ports())
self.assertEqual(len(ports), 2)
self.assertIs(ports[0], clk100.p)
self.assertIs(ports[1], clk100.n)
def test_request_via_connector(self):
self.cm.add_resources([
Resource("spi", 0,
Subsignal("ss", Pins("1", conn=("pmod", 0))),
Subsignal("clk", Pins("2", conn=("pmod", 0))),
Subsignal("miso", Pins("3", conn=("pmod", 0))),
Subsignal("mosi", Pins("4", conn=("pmod", 0))),
)
])
spi0 = self.cm.request("spi", 0)
self.assertEqual(list(self.cm.iter_port_constraints()), [
("spi_0__ss__io", ["B0"], {}),
("spi_0__clk__io", ["B1"], {}),
("spi_0__miso__io", ["B2"], {}),
("spi_0__mosi__io", ["B3"], {}),
])
def test_request_via_nested_connector(self):
new_connectors = [
Connector("pmod_extension", 0, "1 2 3 4 - -", conn=("pmod", 0)),
]
self.cm.add_connectors(new_connectors)
self.cm.add_resources([
Resource("spi", 0,
Subsignal("ss", Pins("1", conn=("pmod_extension", 0))),
Subsignal("clk", Pins("2", conn=("pmod_extension", 0))),
Subsignal("miso", Pins("3", conn=("pmod_extension", 0))),
Subsignal("mosi", Pins("4", conn=("pmod_extension", 0))),
)
])
spi0 = self.cm.request("spi", 0)
self.assertEqual(list(self.cm.iter_port_constraints()), [
("spi_0__ss__io", ["B0"], {}),
("spi_0__clk__io", ["B1"], {}),
("spi_0__miso__io", ["B2"], {}),
("spi_0__mosi__io", ["B3"], {}),
])
def test_request_clock(self):
clk100 = self.cm.request("clk100", 0)
clk50 = self.cm.request("clk50", 0, dir="i")
clk100_port_p, clk100_port_n, clk50_port = self.cm.iter_ports()
self.assertEqual(list(self.cm.iter_clock_constraints()), [
(clk100.i, clk100_port_p, 100e6),
(clk50.i, clk50_port, 50e6)
])
def test_add_clock(self):
i2c = self.cm.request("i2c")
self.cm.add_clock_constraint(i2c.scl.o, 100e3)
self.assertEqual(list(self.cm.iter_clock_constraints()), [
(i2c.scl.o, None, 100e3)
])
def test_wrong_resources(self):
with self.assertRaises(TypeError, msg="Object 'wrong' is not a Resource"):
self.cm.add_resources(['wrong'])
def test_wrong_resources_duplicate(self):
with self.assertRaises(NameError,
msg="Trying to add (resource user_led 0 (pins o A1)), but "
"(resource user_led 0 (pins o A0)) has the same name and number"):
self.cm.add_resources([Resource("user_led", 0, Pins("A1", dir="o"))])
def test_wrong_connectors(self):
with self.assertRaises(TypeError, msg="Object 'wrong' is not a Connector"):
self.cm.add_connectors(['wrong'])
def test_wrong_connectors_duplicate(self):
with self.assertRaises(NameError,
msg="Trying to add (connector pmod 0 1=>1 2=>2), but "
"(connector pmod 0 1=>B0 2=>B1 3=>B2 4=>B3) has the same name and number"):
self.cm.add_connectors([Connector("pmod", 0, "1 2")])
def test_wrong_lookup(self):
with self.assertRaises(ResourceError,
msg="Resource user_led#1 does not exist"):
r = self.cm.lookup("user_led", 1)
def test_wrong_clock_signal(self):
with self.assertRaises(TypeError,
msg="Object None is not a Signal"):
self.cm.add_clock_constraint(None, 10e6)
def test_wrong_clock_frequency(self):
with self.assertRaises(TypeError,
msg="Frequency must be a number, not None"):
self.cm.add_clock_constraint(Signal(), None)
def test_wrong_request_duplicate(self):
with self.assertRaises(ResourceError,
msg="Resource user_led#0 has already been requested"):
self.cm.request("user_led", 0)
self.cm.request("user_led", 0)
def test_wrong_request_duplicate_physical(self):
self.cm.add_resources([
Resource("clk20", 0, Pins("H1", dir="i")),
])
self.cm.request("clk100", 0)
with self.assertRaises(ResourceError,
msg="Resource component clk20_0 uses physical pin H1, but it is already "
"used by resource component clk100_0 that was requested earlier"):
self.cm.request("clk20", 0)
def test_wrong_request_with_dir(self):
with self.assertRaises(TypeError,
msg="Direction must be one of \"i\", \"o\", \"oe\", \"io\", or \"-\", "
"not 'wrong'"):
user_led = self.cm.request("user_led", 0, dir="wrong")
def test_wrong_request_with_dir_io(self):
with self.assertRaises(ValueError,
msg="Direction of (pins o A0) cannot be changed from \"o\" to \"i\"; direction "
"can be changed from \"io\" to \"i\", \"o\", or \"oe\", or from anything "
"to \"-\""):
user_led = self.cm.request("user_led", 0, dir="i")
def test_wrong_request_with_dir_dict(self):
with self.assertRaises(TypeError,
msg="Directions must be a dict, not 'i', because (resource i2c 0 (subsignal scl "
"(pins o N10)) (subsignal sda (pins io N11))) "
"has subsignals"):
i2c = self.cm.request("i2c", 0, dir="i")
def test_wrong_request_with_wrong_xdr(self):
with self.assertRaises(ValueError,
msg="Data rate of (pins o A0) must be a non-negative integer, not -1"):
user_led = self.cm.request("user_led", 0, xdr=-1)
def test_wrong_request_with_xdr_dict(self):
with self.assertRaises(TypeError,
msg="Data rate must be a dict, not 2, because (resource i2c 0 (subsignal scl "
"(pins o N10)) (subsignal sda (pins io N11))) "
"has subsignals"):
i2c = self.cm.request("i2c", 0, xdr=2)
def test_wrong_clock_constraint_twice(self):
clk100 = self.cm.request("clk100")
with self.assertRaises(ValueError,
msg="Cannot add clock constraint on (sig clk100_0__i), which is already "
"constrained to 100000000.0 Hz"):
self.cm.add_clock_constraint(clk100.i, 1e6)
```
#### File: nmigen/vendor/xilinx_spartan_3_6.py
```python
from abc import abstractproperty
from ..hdl import *
from ..lib.cdc import ResetSynchronizer
from ..build import *
__all__ = ["XilinxSpartan3APlatform", "XilinxSpartan6Platform"]
# The interface to Spartan 3 and 6 are substantially the same. Handle
# differences internally using one class and expose user-aliases for
# convenience.
class XilinxSpartan3Or6Platform(TemplatedPlatform):
"""
Required tools:
* ISE toolchain:
* ``xst``
* ``ngdbuild``
* ``map``
* ``par``
* ``bitgen``
The environment is populated by running the script specified in the environment variable
``NMIGEN_ENV_ISE``, if present.
Available overrides:
* ``script_after_run``: inserts commands after ``run`` in XST script.
* ``add_constraints``: inserts commands in UCF file.
* ``xst_opts``: adds extra options for ``xst``.
* ``ngdbuild_opts``: adds extra options for ``ngdbuild``.
* ``map_opts``: adds extra options for ``map``.
* ``par_opts``: adds extra options for ``par``.
* ``bitgen_opts``: adds extra and overrides default options for ``bitgen``;
default options: ``-g Compress``.
Build products:
* ``{{name}}.srp``: synthesis report.
* ``{{name}}.ngc``: synthesized RTL.
* ``{{name}}.bld``: NGDBuild log.
* ``{{name}}.ngd``: design database.
* ``{{name}}_map.map``: MAP log.
* ``{{name}}_map.mrp``: mapping report.
* ``{{name}}_map.ncd``: mapped netlist.
* ``{{name}}.pcf``: physical constraints.
* ``{{name}}_par.par``: PAR log.
* ``{{name}}_par_pad.txt``: I/O usage report.
* ``{{name}}_par.ncd``: place and routed netlist.
* ``{{name}}.drc``: DRC report.
* ``{{name}}.bgn``: BitGen log.
* ``{{name}}.bit``: binary bitstream with metadata.
* ``{{name}}.bin``: raw binary bitstream.
"""
toolchain = "ISE"
device = abstractproperty()
package = abstractproperty()
speed = abstractproperty()
required_tools = [
"xst",
"ngdbuild",
"map",
"par",
"bitgen",
]
@property
def family(self):
device = self.device.upper()
if device.startswith("XC3S"):
if device.endswith("A"):
return "3A"
elif device.endswith("E"):
raise NotImplementedError("""Spartan 3E family is not supported
as a nMigen platform.""")
else:
raise NotImplementedError("""Spartan 3 family is not supported
as a nMigen platform.""")
elif device.startswith("XC6S"):
return "6"
else:
assert False
file_templates = {
**TemplatedPlatform.build_script_templates,
"build_{{name}}.sh": r"""
# {{autogenerated}}
set -e{{verbose("x")}}
if [ -z "$BASH" ] ; then exec /bin/bash "$0" "$@"; fi
[ -n "${{platform._toolchain_env_var}}" ] && . "${{platform._toolchain_env_var}}"
{{emit_commands("sh")}}
""",
"{{name}}.v": r"""
/* {{autogenerated}} */
{{emit_verilog()}}
""",
"{{name}}.debug.v": r"""
/* {{autogenerated}} */
{{emit_debug_verilog()}}
""",
"{{name}}.prj": r"""
# {{autogenerated}}
{% for file in platform.iter_extra_files(".vhd", ".vhdl") -%}
vhdl work {{file}}
{% endfor %}
{% for file in platform.iter_extra_files(".v") -%}
verilog work {{file}}
{% endfor %}
verilog work {{name}}.v
""",
"{{name}}.xst": r"""
# {{autogenerated}}
run
-ifn {{name}}.prj
-ofn {{name}}.ngc
-top {{name}}
{% if platform.family in ["3", "3E", "3A"] %}
-use_new_parser yes
{% endif %}
-p {{platform.device}}{{platform.package}}-{{platform.speed}}
{{get_override("script_after_run")|default("# (script_after_run placeholder)")}}
""",
"{{name}}.ucf": r"""
# {{autogenerated}}
{% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%}
{% set port_name = port_name|replace("[", "<")|replace("]", ">") -%}
NET "{{port_name}}" LOC={{pin_name}};
{% for attr_name, attr_value in attrs.items() -%}
NET "{{port_name}}" {{attr_name}}={{attr_value}};
{% endfor %}
{% endfor %}
{% for net_signal, port_signal, frequency in platform.iter_clock_constraints() -%}
NET "{{net_signal|hierarchy("/")}}" TNM_NET="PRD{{net_signal|hierarchy("/")}}";
TIMESPEC "TS{{net_signal|hierarchy("/")}}"=PERIOD "PRD{{net_signal|hierarchy("/")}}" {{1000000000/frequency}} ns HIGH 50%;
{% endfor %}
{{get_override("add_constraints")|default("# (add_constraints placeholder)")}}
"""
}
command_templates = [
r"""
{{invoke_tool("xst")}}
{{get_override("xst_opts")|options}}
-ifn {{name}}.xst
""",
r"""
{{invoke_tool("ngdbuild")}}
{{quiet("-quiet")}}
{{verbose("-verbose")}}
{{get_override("ngdbuild_opts")|options}}
-uc {{name}}.ucf
{{name}}.ngc
""",
r"""
{{invoke_tool("map")}}
{{verbose("-detail")}}
{{get_override("map_opts")|default([])|options}}
-w
-o {{name}}_map.ncd
{{name}}.ngd
{{name}}.pcf
""",
r"""
{{invoke_tool("par")}}
{{get_override("par_opts")|default([])|options}}
-w
{{name}}_map.ncd
{{name}}_par.ncd
{{name}}.pcf
""",
r"""
{{invoke_tool("bitgen")}}
{{get_override("bitgen_opts")|default(["-g Compress"])|options}}
-w
-g Binary:Yes
{{name}}_par.ncd
{{name}}.bit
"""
]
def create_missing_domain(self, name):
# Xilinx devices have a global write enable (GWE) signal that asserted during configuraiton
# and deasserted once it ends. Because it is an asynchronous signal (GWE is driven by logic
# syncronous to configuration clock, which is not used by most designs), even though it is
# a low-skew global network, its deassertion may violate a setup/hold constraint with
# relation to a user clock. The recommended solution is to use a BUFGCE driven by the EOS
# signal (if available). For details, see:
# * https://www.xilinx.com/support/answers/44174.html
# * https://www.xilinx.com/support/documentation/white_papers/wp272.pdf
if self.family != "6":
# Spartan 3 lacks a STARTUP primitive with EOS output; use a simple ResetSynchronizer
# in that case, as is the default.
return super().create_missing_domain(name)
if name == "sync" and self.default_clk is not None:
clk_i = self.request(self.default_clk).i
if self.default_rst is not None:
rst_i = self.request(self.default_rst).i
m = Module()
eos = Signal()
m.submodules += Instance("STARTUP_SPARTAN6", o_EOS=eos)
m.domains += ClockDomain("sync", reset_less=self.default_rst is None)
m.submodules += Instance("BUFGCE", i_CE=eos, i_I=clk_i, o_O=ClockSignal("sync"))
if self.default_rst is not None:
m.submodules.reset_sync = ResetSynchronizer(rst_i, domain="sync")
return m
def _get_xdr_buffer(self, m, pin, *, i_invert=False, o_invert=False):
def get_dff(clk, d, q):
# SDR I/O is performed by packing a flip-flop into the pad IOB.
for bit in range(len(q)):
m.submodules += Instance("FDCE",
a_IOB="TRUE",
i_C=clk,
i_CE=Const(1),
i_CLR=Const(0),
i_D=d[bit],
o_Q=q[bit]
)
def get_iddr(clk, d, q0, q1):
for bit in range(len(q0)):
m.submodules += Instance("IDDR2",
p_DDR_ALIGNMENT="C0",
p_SRTYPE="ASYNC",
p_INIT_Q0=0, p_INIT_Q1=0,
i_C0=clk, i_C1=~clk,
i_CE=Const(1),
i_S=Const(0), i_R=Const(0),
i_D=d[bit],
o_Q0=q0[bit], o_Q1=q1[bit]
)
def get_oddr(clk, d0, d1, q):
for bit in range(len(q)):
m.submodules += Instance("ODDR2",
p_DDR_ALIGNMENT="C0",
p_SRTYPE="ASYNC",
p_INIT=0,
i_C0=clk, i_C1=~clk,
i_CE=Const(1),
i_S=Const(0), i_R=Const(0),
i_D0=d0[bit], i_D1=d1[bit],
o_Q=q[bit]
)
def get_ineg(y, invert):
if invert:
a = Signal.like(y, name_suffix="_n")
m.d.comb += y.eq(~a)
return a
else:
return y
def get_oneg(a, invert):
if invert:
y = Signal.like(a, name_suffix="_n")
m.d.comb += y.eq(~a)
return y
else:
return a
if "i" in pin.dir:
if pin.xdr < 2:
pin_i = get_ineg(pin.i, i_invert)
elif pin.xdr == 2:
pin_i0 = get_ineg(pin.i0, i_invert)
pin_i1 = get_ineg(pin.i1, i_invert)
if "o" in pin.dir:
if pin.xdr < 2:
pin_o = get_oneg(pin.o, o_invert)
elif pin.xdr == 2:
pin_o0 = get_oneg(pin.o0, o_invert)
pin_o1 = get_oneg(pin.o1, o_invert)
i = o = t = None
if "i" in pin.dir:
i = Signal(pin.width, name="{}_xdr_i".format(pin.name))
if "o" in pin.dir:
o = Signal(pin.width, name="{}_xdr_o".format(pin.name))
if pin.dir in ("oe", "io"):
t = Signal(1, name="{}_xdr_t".format(pin.name))
if pin.xdr == 0:
if "i" in pin.dir:
i = pin_i
if "o" in pin.dir:
o = pin_o
if pin.dir in ("oe", "io"):
t = ~pin.oe
elif pin.xdr == 1:
if "i" in pin.dir:
get_dff(pin.i_clk, i, pin_i)
if "o" in pin.dir:
get_dff(pin.o_clk, pin_o, o)
if pin.dir in ("oe", "io"):
get_dff(pin.o_clk, ~pin.oe, t)
elif pin.xdr == 2:
if "i" in pin.dir:
# Re-register first input before it enters fabric. This allows both inputs to
# enter fabric on the same clock edge, and adds one cycle of latency.
i0_ff = Signal.like(pin_i0, name_suffix="_ff")
get_dff(pin.i_clk, i0_ff, pin_i0)
get_iddr(pin.i_clk, i, i0_ff, pin_i1)
if "o" in pin.dir:
get_oddr(pin.o_clk, pin_o0, pin_o1, o)
if pin.dir in ("oe", "io"):
get_dff(pin.o_clk, ~pin.oe, t)
else:
assert False
return (i, o, t)
def get_input(self, pin, port, attrs, invert):
self._check_feature("single-ended input", pin, attrs,
valid_xdrs=(0, 1, 2), valid_attrs=True)
m = Module()
i, o, t = self._get_xdr_buffer(m, pin, i_invert=invert)
for bit in range(len(port)):
m.submodules["{}_{}".format(pin.name, bit)] = Instance("IBUF",
i_I=port[bit],
o_O=i[bit]
)
return m
def get_output(self, pin, port, attrs, invert):
self._check_feature("single-ended output", pin, attrs,
valid_xdrs=(0, 1, 2), valid_attrs=True)
m = Module()
i, o, t = self._get_xdr_buffer(m, pin, o_invert=invert)
for bit in range(len(port)):
m.submodules["{}_{}".format(pin.name, bit)] = Instance("OBUF",
i_I=o[bit],
o_O=port[bit]
)
return m
def get_tristate(self, pin, port, attrs, invert):
self._check_feature("single-ended tristate", pin, attrs,
valid_xdrs=(0, 1, 2), valid_attrs=True)
m = Module()
i, o, t = self._get_xdr_buffer(m, pin, o_invert=invert)
for bit in range(len(port)):
m.submodules["{}_{}".format(pin.name, bit)] = Instance("OBUFT",
i_T=t,
i_I=o[bit],
o_O=port[bit]
)
return m
def get_input_output(self, pin, port, attrs, invert):
self._check_feature("single-ended input/output", pin, attrs,
valid_xdrs=(0, 1, 2), valid_attrs=True)
m = Module()
i, o, t = self._get_xdr_buffer(m, pin, i_invert=invert, o_invert=invert)
for bit in range(len(port)):
m.submodules["{}_{}".format(pin.name, bit)] = Instance("IOBUF",
i_T=t,
i_I=o[bit],
o_O=i[bit],
io_IO=port[bit]
)
return m
def get_diff_input(self, pin, p_port, n_port, attrs, invert):
self._check_feature("differential input", pin, attrs,
valid_xdrs=(0, 1, 2), valid_attrs=True)
m = Module()
i, o, t = self._get_xdr_buffer(m, pin, i_invert=invert)
for bit in range(len(p_port)):
m.submodules["{}_{}".format(pin.name, bit)] = Instance("IBUFDS",
i_I=p_port[bit], i_IB=n_port[bit],
o_O=i[bit]
)
return m
def get_diff_output(self, pin, p_port, n_port, attrs, invert):
self._check_feature("differential output", pin, attrs,
valid_xdrs=(0, 1, 2), valid_attrs=True)
m = Module()
i, o, t = self._get_xdr_buffer(m, pin, o_invert=invert)
for bit in range(len(p_port)):
m.submodules["{}_{}".format(pin.name, bit)] = Instance("OBUFDS",
i_I=o[bit],
o_O=p_port[bit], o_OB=n_port[bit]
)
return m
def get_diff_tristate(self, pin, p_port, n_port, attrs, invert):
self._check_feature("differential tristate", pin, attrs,
valid_xdrs=(0, 1, 2), valid_attrs=True)
m = Module()
i, o, t = self._get_xdr_buffer(m, pin, o_invert=invert)
for bit in range(len(p_port)):
m.submodules["{}_{}".format(pin.name, bit)] = Instance("OBUFTDS",
i_T=t,
i_I=o[bit],
o_O=p_port[bit], o_OB=n_port[bit]
)
return m
def get_diff_input_output(self, pin, p_port, n_port, attrs, invert):
self._check_feature("differential input/output", pin, attrs,
valid_xdrs=(0, 1, 2), valid_attrs=True)
m = Module()
i, o, t = self._get_xdr_buffer(m, pin, i_invert=invert, o_invert=invert)
for bit in range(len(p_port)):
m.submodules["{}_{}".format(pin.name, bit)] = Instance("IOBUFDS",
i_T=t,
i_I=o[bit],
o_O=i[bit],
io_IO=p_port[bit], io_IOB=n_port[bit]
)
return m
# The synchronizer implementations below apply the ASYNC_REG attribute. This attribute
# prevents inference of shift registers from synchronizer FFs, and constraints the FFs
# to be placed as close as possible, ideally in one CLB. This attribute only affects
# the synchronizer FFs themselves.
def get_ff_sync(self, ff_sync):
if ff_sync._max_input_delay is not None:
raise NotImplementedError("Platform '{}' does not support constraining input delay "
"for FFSynchronizer"
.format(type(self).__name__))
m = Module()
flops = [Signal(ff_sync.i.shape(), name="stage{}".format(index),
reset=ff_sync._reset, reset_less=ff_sync._reset_less,
attrs={"ASYNC_REG": "TRUE"})
for index in range(ff_sync._stages)]
for i, o in zip((ff_sync.i, *flops), flops):
m.d[ff_sync._o_domain] += o.eq(i)
m.d.comb += ff_sync.o.eq(flops[-1])
return m
def get_async_ff_sync(self, async_ff_sync):
if self._max_input_delay is not None:
raise NotImplementedError("Platform '{}' does not support constraining input delay "
"for AsyncFFSynchronizer"
.format(type(self).__name__))
m = Module()
m.domains += ClockDomain("async_ff", async_reset=True, local=True)
flops = [Signal(1, name="stage{}".format(index), reset=1,
attrs={"ASYNC_REG": "TRUE"})
for index in range(async_ff_sync._stages)]
for i, o in zip((0, *flops), flops):
m.d.async_ff += o.eq(i)
if async_ff_sync._edge == "pos":
m.d.comb += ResetSignal("async_ff").eq(async_ff_sync.i)
else:
m.d.comb += ResetSignal("async_ff").eq(~async_ff_sync.i)
m.d.comb += [
ClockSignal("async_ff").eq(ClockSignal(async_ff_sync._domain)),
async_ff_sync.o.eq(flops[-1])
]
return m
XilinxSpartan3APlatform = XilinxSpartan3Or6Platform
XilinxSpartan6Platform = XilinxSpartan3Or6Platform
```
|
{
"source": "jeantristanb/phase_genomic",
"score": 3
}
|
#### File: phase_genomic/bin/format_vcf_forphasing.py
```python
import argparse
import re
import os
import sys
import glob
def GetHeaderVcf(File):
VcfRead=open(File)
Head=[]
for line in VcfRead:
if line[0]=="#" :
Head.append(line.lower().replace("\n",""))
else :
VcfRead.close()
return Head
return Head
def parseArguments():
parser = argparse.ArgumentParser(description='fill in missing bim values')
parser.add_argument('--vcf',type=str,required=True)
parser.add_argument('--out',type=str,required=True)
args = parser.parse_args()
return args
args = parseArguments()
headervcf=GetHeaderVcf(args.vcf)
headervcf=headervcf[-1].split()
Nind=len(headervcf)-9
readvcf=open(args.vcf)
list_pos=[]
list_typepos=[]
listinfoind=[]
for cmtind in range(Nind):
listinfoind.append([[],[]])
rind=range(Nind)
nbpos=0
for line in readvcf :
if line[0]!="#":
splitl=line.split()
chro=splitl[0]
list_pos.append(splitl[1])
if splitl[4]==".":
continue
alt=splitl[4].split(",")
if len(alt)==1 :
list_typepos.append("S")
error="?"
else :
list_typepos.append("M")
error="-1"
for cmtind in rind:
infoind=splitl[cmtind+9]
if infoind[0]=='.' :
geno=[error,error]
else :
geno=infoind.split(':')[0].split('/')
listinfoind[cmtind][0].append(geno[0])
listinfoind[cmtind][1].append(geno[1])
nbpos+=1
finalphase=str(Nind)+"\n"+str(nbpos)+"\nP "+" ".join(list_pos)+"\n"+" ".join(list_typepos)+"\n"
for cmtind in range(Nind) :
finalphase+="#"+headervcf[cmtind+9]+"\n"
finalphase+=" ".join(listinfoind[cmtind][0])+"\n"
finalphase+=" ".join(listinfoind[cmtind][1])+"\n"
##
writePhase=open(args.out+'_PHASE.inp', 'w')
writePhase.write(finalphase)
writePhase.close()
```
|
{
"source": "jeantsai/backbone-exercises",
"score": 2
}
|
#### File: jeantsai/backbone-exercises/backend.py
```python
import sys
import logging
from flask import Flask, jsonify, abort, make_response, request, url_for, g
from flask_cors import CORS
from flask_httpauth import HTTPBasicAuth
from flask_sqlalchemy import SQLAlchemy
from passlib.apps import custom_app_context as pwd_context
from flask_redis import FlaskRedis
from dns_resolver import get_redis_address
from kafka import KafkaProducer
# Setup logging facility
console = logging.StreamHandler(sys.stdout)
console.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(name)s %(message)s'))
log = logging.getLogger('')
log.addHandler(console)
log.setLevel(logging.INFO)
logger = logging.getLogger(__name__)
# Fetch the address of service redis from Consul
(redis_ip, redis_port) = get_redis_address()
redis_url = "redis://%s:%s/0" % (redis_ip, redis_port)
logger.info('Got the address of service redis as: %s' % redis_url)
app = Flask(__name__)
CORS(app)
auth = HTTPBasicAuth()
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
app.config['REDIS_URL'] = redis_url
redis_store = FlaskRedis(app)
API_USAGE_KEY = 'visit:api:total'
courses = [
{
'id': 1,
'title': u'Software Architecture',
'description': u'Software architecture refers to the high level structures of a software system, the discipline of creating such structures, and the documentation of these structures.',
'done': False
},
{
'id': 2,
'title': u'Software Management',
'description': u'Software project management is an art and science of planning and leading software projects.',
'done': False
}
]
producer = KafkaProducer(bootstrap_servers='kafka:29092')
topic = 'api-visits'
# Record API visit
@app.before_request
def before_request():
producer.send(topic, bytes(request.path, encoding='utf-8'))
# redis_store.incr(API_USAGE_KEY)
# Get API usage
@app.route('/ce/api/v1.0/usages', methods=['GET'])
def get_usage():
count = redis_store.get(API_USAGE_KEY)
# count = 12345
return jsonify({"count": str(count, encoding='utf-8')})
# GET one specific course
@app.route('/ce/api/v1.0/courses/<int:course_id>', methods=['GET'])
# @auth.login_required
def get_course(course_id):
course = list(filter(lambda t: t['id'] == course_id, courses))
if len(course) == 0:
abort(404)
return jsonify( course[0] )
# transfer error page into JSON format
# @app.errorhandler(404)
# def not_found(error):
# return make_response(jsonify({'error': 'Not found'}), 404)
# POST a new course
@app.route('/ce/api/v1.0/courses', methods=['POST'])
# @auth.login_required
def create_course():
if not request.json or not 'title' in request.json:
abort(400)
course = {
'id': courses[-1]['id'] + 1,
'title': request.json['title'],
'description': request.json.get('description', ""),
'done': False
}
courses.append(course)
return jsonify( course ), 201
# PUT an update
@app.route('/ce/api/v1.0/courses/<int:course_id>', methods=['PUT'])
# @auth.login_required
def update_course(course_id):
course = list(filter(lambda t: t['id'] == course_id, courses))
if len(course) == 0:
abort(404)
if not request.json:
abort(400)
# if 'title' in request.json and type(request.json['title']) != unicode:
# abort(400)
# if 'description' in request.json and type(request.json['description']) is not unicode:
# abort(400)
if 'done' in request.json and type(request.json['done']) is not bool:
abort(400)
course[0]['title'] = request.json.get('title', course[0]['title'])
course[0]['description'] = request.json.get('description', course[0]['description'])
course[0]['done'] = request.json.get('done', course[0]['done'])
return jsonify( course[0] )
# DELETE a course
@app.route('/ce/api/v1.0/courses/<int:course_id>', methods=['DELETE'])
# @auth.login_required
def delete_course(course_id):
course = list(filter(lambda t: t['id'] == course_id, courses))
if len(course) == 0:
abort(404)
courses.remove(course[0])
return jsonify({'result': True})
# optimize web service interface: change id into url
def make_client_course(course):
new_course = {}
for field in course:
if field == 'id':
new_course['url'] = url_for('get_course', course_id=course['id'],_external=True)
else:
new_course[field] = course[field]
return new_course
# @app.route('/ce/api/v1.0/courses',methods=['GET'])
# def get_courses():
# return jsonify({'courses': list(map(make_client_course, courses))})
# strengthen security: login session
# @auth.get_password
# def get_password(username):
# if username == 'miguel':
# return 'python'
# return None
@auth.error_handler
def unauthorized():
return make_response(jsonify({'error': 'Unauthorized access'}), 403)
# @app.route('/ce/api/v1.0/courses', methods=['GET'])
# # @auth.login_required
# def get_courses():
# return jsonify({'courses': list(map(make_client_course, courses))})
db = SQLAlchemy(app)
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key = True)
username = db.Column(db.String(32), index = True)
password_hash = db.Column(db.String(128))
def hash_password(self, password):
self.password_hash = pwd_context.encrypt(password)
def verify_password(self, password):
return pwd_context.verify(password, self.password_hash)
# must be here to create db
db.create_all()
# POST a new user
@app.route('/api/users', methods=['POST'])
def new_user():
username = request.json.get('username')
password = <PASSWORD>('password')
if username is None or password is None:
abort(400) # missing arguments
if User.query.filter_by(username = username).first() is not None:
abort(400) # existing user
user = User(username = username)
user.hash_password(password)
db.session.add(user)
db.session.commit()
return jsonify({ 'username': user.username }), 201, {'Location': url_for('get_user', id = user.id, _external = True)}
@app.route('/api/users/<int:id>')
def get_user(id):
user = User.query.get(id)
if not user:
abort(400)
return jsonify({'username': user.username})
@app.route('/ce/api/v1.0/courses')
# @auth.login_required
# def get_resource():
# return jsonify({'data': 'Hello, %s!' % g.user.username})
def get_courses():
return jsonify({'courses': courses})
@auth.verify_password
def verify_password(username, password):
user = User.query.filter_by(username = username).first()
if not user or not user.verify_password(password):
return False
g.user = user
return True
if __name__ == '__main__':
# Run the main function of this module
app.run(host='0.0.0.0', port=5000, debug=True)
```
#### File: jeantsai/backbone-exercises/dns_resolver.py
```python
import sys
import logging
from dns import resolver
import socket
import time
def lookup_consul_ip():
logger = logging.getLogger(__name__)
logger.info("Finding IP of Service Registry (Consul) ...")
consul_server_ip = socket.gethostbyname('consului')
logger.info("Found IP of Service Registry (Consul): %s" % consul_server_ip)
return consul_server_ip
def lookup_redis_address(consul_server_ip):
consul_resolver = resolver.Resolver()
consul_resolver.port = 8600
consul_resolver.nameservers = [consul_server_ip]
logger = logging.getLogger(__name__)
logger.info("Finding service address of Redis ...")
dns_answer = consul_resolver.query("redis.service.consul", 'A')
ip = str(dns_answer[0])
dns_answer_srv = consul_resolver.query("redis.service.consul", 'SRV')
port = int(str(dns_answer_srv[0]).split()[2])
logger.info("Found service address of Redis: ip=%s port=%d" % (ip, port))
return ip, port
def get_redis_address():
consul_ip = lookup_consul_ip()
count = 18
while count > 0:
try:
time.sleep(10)
(ip, port) = lookup_redis_address(consul_ip)
return ip, port
except:
count -= 1
if count == 0:
raise
if __name__ == '__main__':
# Setup logging facility
console = logging.StreamHandler(sys.stdout)
console.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(name)s %(message)s'))
logger = logging.getLogger('')
logger.addHandler(console)
logger.setLevel(logging.INFO)
# Run the main function of this module
logger.info('This module resolve the address of '
'a service (Redis) through Consul DNS SRV service.')
get_redis_address()
```
|
{
"source": "JeanVassalo/iControl",
"score": 3
}
|
#### File: iControl/RastreioDoOlhar/rastreio_do_olhar.py
```python
from __future__ import division
import os
import cv2
import dlib
from .olho import Olho
from .calibracao import Calibracao
class InformacoesDaImagem(object):
"""
Essa classe acompanha o olhar do usuário.
Ela fornece informações úteis como a posição dos olhos
e pupilas e permite saber se os olhos estão abertos ou fechados
"""
def __init__(self):
self.imagem = None
self.olhoEsquerdo = None
self.olhoDireito = None
self.calibracao = Calibracao()
# detectorDeFace
self.detectorDeFace = dlib.get_frontal_face_detector()
# preditor para marcar os pontos na face
cwd = os.path.abspath(os.path.dirname(__file__))
enderecoDoModelo = os.path.abspath(os.path.join(cwd, "modelo_treinado/shape_predictor_68_face_landmarks.dat"))
self.preditor = dlib.shape_predictor(enderecoDoModelo)
@property
def coordenadasDasPupilas(self):
"""Verifica se as pupilas foram localizadas"""
try:
int(self.olhoEsquerdo.pupil.x)
int(self.olhoEsquerdo.pupil.y)
int(self.olhoDireito.pupil.x)
int(self.olhoDireito.pupil.y)
return True
except Exception:
return False
def analisar(self):
"""detecta a face e instancia o objeto olho"""
imagemConvertida = cv2.cvtColor(self.imagem, cv2.COLOR_BGR2GRAY)
faces = self.detectorDeFace(imagemConvertida)
try:
landmarks = self.preditor(imagemConvertida, faces[0])
self.olhoEsquerdo = Olho(imagemConvertida, landmarks, 0, self.calibracao)
self.olhoDireito = Olho(imagemConvertida, landmarks, 1, self.calibracao)
except IndexError:
self.olhoEsquerdo = None
self.olhoDireito = None
def proximaImagem(self, frame):
"""Atualiza a imagem e a analisa.
Arguments:
frame (numpy.ndarray): a imagem em analise
"""
self.imagem = frame
self.analisar()
def coordenadaDaPupilaEsquerda(self):
"""retorna as coordenadas da pupila esquerda"""
if self.coordenadasDasPupilas:
x = self.olhoEsquerdo.origin[0] + self.olhoEsquerdo.pupil.x
y = self.olhoEsquerdo.origin[1] + self.olhoEsquerdo.pupil.y
return (x, y)
def coordenadaDaPupilaDireita(self):
"""Retorna as coordenadas da pupila direita"""
if self.coordenadasDasPupilas:
x = self.olhoDireito.origin[0] + self.olhoDireito.pupil.x
y = self.olhoDireito.origin[1] + self.olhoDireito.pupil.y
return (x, y)
def horizontal_ratio(self):
"""Retorna um número entre 0,0 e 1,0 que indica a
direção horizontal do olhar. A extrema direita é 0,0,
o centro é 0,5 e a extrema esquerda é 1,0
"""
if self.coordenadasDasPupilas:
pupil_left = self.olhoEsquerdo.pupil.x / (self.olhoEsquerdo.center[0] * 2 - 10)
pupil_right = self.olhoDireito.pupil.x / (self.olhoDireito.center[0] * 2 - 10)
return (pupil_left + pupil_right) / 2
def vertical_ratio(self):
"""Retorna um número entre 0,0 e 1,0 que indica o
direção vertical do olhar. O topo extremo é 0,0,
o centro é 0,5 e o fundo extremo é 1,0
"""
if self.coordenadasDasPupilas:
pupil_left = self.olhoEsquerdo.pupil.y / (self.olhoEsquerdo.center[1] * 2 - 10)
pupil_right = self.olhoDireito.pupil.y / (self.olhoDireito.center[1] * 2 - 10)
return (pupil_left + pupil_right) / 2
def olhandoParaDireita(self):
"""Retorna verdadeiro se o usuário estiver olhando para a direita"""
if self.coordenadasDasPupilas:
return self.horizontal_ratio() <= 0.35
def olhandoParaEsquerda(self):
"""Retorna verdadeiro se o usuário estiver olhando para a esquerda """
if self.coordenadasDasPupilas:
return self.horizontal_ratio() >= 0.65
def olhandoParaCentro(self):
"""Retorna verdadeiro se o usuário estiver olhando para o centro"""
if self.coordenadasDasPupilas:
return self.olhandoParaDireita() is not True and self.olhandoParaEsquerda() is not True
def piscando(self):
"""Retorna verdadeiro se o usuário estiver piscando """
if self.coordenadasDasPupilas:
blinking_ratio = (self.olhoEsquerdo.blinking + self.olhoDireito.blinking) / 2
return blinking_ratio > 3.8
def pupilasLocalizadas(self):
"""Retorna a imagem principal as pupilas destacadas"""
frame = self.imagem.copy()
if self.coordenadasDasPupilas:
color = (0, 255, 0)
x_left, y_left = self.coordenadaDaPupilaEsquerda()
x_right, y_right = self.coordenadaDaPupilaDireita()
cv2.line(frame, (x_left - 5, y_left), (x_left + 5, y_left), color)
cv2.line(frame, (x_left, y_left - 5), (x_left, y_left + 5), color)
cv2.line(frame, (x_right - 5, y_right), (x_right + 5, y_right), color)
cv2.line(frame, (x_right, y_right - 5), (x_right, y_right + 5), color)
return frame
```
|
{
"source": "jeanveau/rasa_core",
"score": 2
}
|
#### File: rasa/cli/data.py
```python
import argparse
from typing import List
from rasa import data
from rasa.cli.default_arguments import add_nlu_data_param
from rasa.cli.utils import get_validated_path
from rasa.constants import DEFAULT_DATA_PATH
# noinspection PyProtectedMember
def add_subparser(subparsers: argparse._SubParsersAction,
parents: List[argparse.ArgumentParser]):
import rasa_nlu.convert as convert
data_parser = subparsers.add_parser(
"data",
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Utils for the Rasa training files")
data_parser.set_defaults(func=lambda _: data_parser.print_help(None))
data_subparsers = data_parser.add_subparsers()
convert_parser = data_subparsers.add_parser(
"convert",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Convert Rasa data between different formats")
convert_parser.set_defaults(func=lambda _: convert_parser.print_help(None))
convert_subparsers = convert_parser.add_subparsers()
convert_nlu_parser = convert_subparsers.add_parser(
"nlu",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Convert NLU training data between markdown and json")
convert.add_arguments(convert_nlu_parser)
convert_nlu_parser.set_defaults(func=convert.main)
split_parser = data_subparsers.add_parser(
"split",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Split Rasa data in training and test data")
split_parser.set_defaults(func=lambda _: split_parser.print_help(None))
split_subparsers = split_parser.add_subparsers()
nlu_split_parser = split_subparsers.add_parser(
"nlu",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Perform a split of your NLU data according to the specified "
"percentages")
nlu_split_parser.set_defaults(func=split_nlu_data)
_add_split_args(nlu_split_parser)
def _add_split_args(parser: argparse.ArgumentParser) -> None:
add_nlu_data_param(parser)
parser.add_argument("--training_fraction", type=float, default=0.8,
help="Percentage of the data which should be the "
"training data")
parser.add_argument("-o", "--out", type=str, default="train_test_split",
help="Directory where the split files should be "
"stored")
def split_nlu_data(args):
from rasa_nlu.training_data.loading import load_data
data_path = get_validated_path(args.nlu, "nlu", DEFAULT_DATA_PATH)
data_path = data.get_nlu_directory(data_path)
nlu_data = load_data(data_path)
train, test = nlu_data.train_test_split(args.training_fraction)
train.persist(args.out, filename="training_data.json")
test.persist(args.out, filename="test_data.json")
```
#### File: rasa/cli/show.py
```python
import argparse
import asyncio
import os
from typing import List
from rasa import data
from rasa.cli.default_arguments import (
add_config_param, add_domain_param,
add_stories_param)
from rasa.constants import DEFAULT_DATA_PATH
# noinspection PyProtectedMember
def add_subparser(subparsers: argparse._SubParsersAction,
parents: List[argparse.ArgumentParser]):
show_parser = subparsers.add_parser(
"show",
parents=parents,
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Visualize Rasa Stack data")
show_subparsers = show_parser.add_subparsers()
show_stories_subparser = show_subparsers.add_parser(
"stories",
conflict_handler='resolve',
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Show Rasa Core stories")
add_core_visualization_params(show_stories_subparser)
add_config_param(show_stories_subparser)
show_stories_subparser.set_defaults(func=show_stories)
show_parser.set_defaults(func=lambda _: show_parser.print_help(None))
def add_core_visualization_params(parser: argparse.ArgumentParser):
from rasa.core.cli.visualization import add_visualization_arguments
add_visualization_arguments(parser)
add_domain_param(parser)
add_stories_param(parser)
def show_stories(args: argparse.Namespace):
import rasa.core.visualize
loop = asyncio.get_event_loop()
args.config = args.config
args.url = None
args.stories = data.get_core_directory(args.stories)
if os.path.exists(DEFAULT_DATA_PATH):
args.nlu_data = data.get_nlu_directory(DEFAULT_DATA_PATH)
loop.run_until_complete(
rasa.core.visualize(args.config, args.domain,
args.stories, args.nlu_data,
args.output, args.max_history))
```
#### File: rasa/core/run.py
```python
import asyncio
from functools import partial
import argparse
import logging
from sanic import Sanic
from sanic_cors import CORS
from typing import List, Optional, Text
import rasa.core.cli.arguments
import rasa.utils
import rasa.core
from rasa.core import constants, utils, cli
from rasa.core.channels import (BUILTIN_CHANNELS, InputChannel, console)
from rasa.core.interpreter import NaturalLanguageInterpreter
from rasa.core.tracker_store import TrackerStore
from rasa.core.utils import AvailableEndpoints, read_yaml_file
logger = logging.getLogger() # get the root logger
def create_argument_parser():
"""Parse all the command line arguments for the run script."""
parser = argparse.ArgumentParser(
description='starts the bot')
parser.add_argument(
'-d', '--core',
required=True,
type=str,
help="core model to run")
parser.add_argument(
'-u', '--nlu',
type=str,
help="nlu model to run")
cli.arguments.add_logging_option_arguments(parser)
cli.run.add_run_arguments(parser)
return parser
def create_http_input_channels(
channel: Optional[Text],
credentials_file: Optional[Text]
) -> List['InputChannel']:
"""Instantiate the chosen input channel."""
if credentials_file:
all_credentials = read_yaml_file(credentials_file)
else:
all_credentials = {}
if channel:
return [_create_single_channel(channel, all_credentials.get(channel))]
else:
return [_create_single_channel(c, k)
for c, k in all_credentials.items()]
def _create_single_channel(channel, credentials):
from rasa.core.channels import BUILTIN_CHANNELS
if channel in BUILTIN_CHANNELS:
return BUILTIN_CHANNELS[channel].from_credentials(credentials)
else:
# try to load channel based on class name
try:
input_channel_class = utils.class_from_module_path(channel)
return input_channel_class.from_credentials(credentials)
except (AttributeError, ImportError):
raise Exception(
"Failed to find input channel class for '{}'. Unknown "
"input channel. Check your credentials configuration to "
"make sure the mentioned channel is not misspelled. "
"If you are creating your own channel, make sure it "
"is a proper name of a class in a module.".format(channel))
def configure_app(input_channels=None,
cors=None,
auth_token=None,
enable_api=True,
jwt_secret=None,
jwt_method=None,
route="/webhooks/",
port=None):
"""Run the agent."""
from rasa.core import server
if enable_api:
app = server.create_app(cors_origins=cors,
auth_token=auth_token,
jwt_secret=jwt_secret,
jwt_method=jwt_method)
else:
app = Sanic(__name__)
CORS(app,
resources={r"/*": {"origins": cors or ""}},
automatic_options=True)
if input_channels:
rasa.core.channels.channel.register(input_channels,
app,
route=route)
else:
input_channels = []
if logger.isEnabledFor(logging.DEBUG):
utils.list_routes(app)
# configure async loop logging
async def configure_logging():
if logger.isEnabledFor(logging.DEBUG):
utils.enable_async_loop_debugging(asyncio.get_event_loop())
app.add_task(configure_logging)
if "cmdline" in {c.name() for c in input_channels}:
async def run_cmdline_io(running_app: Sanic):
"""Small wrapper to shut down the server once cmd io is done."""
await asyncio.sleep(1) # allow server to start
await console.record_messages(
server_url=constants.DEFAULT_SERVER_FORMAT.format(port))
logger.info("Killing Sanic server now.")
running_app.stop() # kill the sanic serverx
app.add_task(run_cmdline_io)
return app
def serve_application(core_model=None,
nlu_model=None,
channel=None,
port=constants.DEFAULT_SERVER_PORT,
credentials_file=None,
cors=None,
auth_token=None,
enable_api=True,
jwt_secret=None,
jwt_method=None,
endpoints=None
):
if not channel and not credentials_file:
channel = "cmdline"
input_channels = create_http_input_channels(channel, credentials_file)
app = configure_app(input_channels, cors, auth_token, enable_api,
jwt_secret, jwt_method, port=port)
logger.info("Starting Rasa Core server on "
"{}".format(constants.DEFAULT_SERVER_FORMAT.format(port)))
app.register_listener(
partial(load_agent_on_start, core_model, endpoints, nlu_model),
'before_server_start')
app.run(host='0.0.0.0', port=port,
access_log=logger.isEnabledFor(logging.DEBUG))
# noinspection PyUnusedLocal
async def load_agent_on_start(core_model, endpoints, nlu_model, app, loop):
"""Load an agent.
Used to be scheduled on server start
(hence the `app` and `loop` arguments)."""
from rasa.core import broker
from rasa.core.agent import Agent
_interpreter = NaturalLanguageInterpreter.create(nlu_model,
endpoints.nlu)
_broker = broker.from_endpoint_config(endpoints.event_broker)
_tracker_store = TrackerStore.find_tracker_store(
None, endpoints.tracker_store, _broker)
if endpoints and endpoints.model:
from rasa.core import agent
app.agent = Agent(interpreter=_interpreter,
generator=endpoints.nlg,
tracker_store=_tracker_store,
action_endpoint=endpoints.action)
await agent.load_from_server(app.agent,
model_server=endpoints.model)
else:
app.agent = Agent.load(core_model,
interpreter=_interpreter,
generator=endpoints.nlg,
tracker_store=_tracker_store,
action_endpoint=endpoints.action)
return app.agent
if __name__ == '__main__':
# Running as standalone python application
arg_parser = create_argument_parser()
cmdline_args = arg_parser.parse_args()
logging.getLogger('werkzeug').setLevel(logging.WARN)
logging.getLogger('engineio').setLevel(logging.WARN)
logging.getLogger('matplotlib').setLevel(logging.WARN)
logging.getLogger('socketio').setLevel(logging.ERROR)
rasa.utils.configure_colored_logging(cmdline_args.loglevel)
utils.configure_file_logging(cmdline_args.loglevel,
cmdline_args.log_file)
_endpoints = AvailableEndpoints.read_endpoints(cmdline_args.endpoints)
serve_application(cmdline_args.core,
cmdline_args.nlu,
cmdline_args.connector,
cmdline_args.port,
cmdline_args.credentials,
cmdline_args.cors,
cmdline_args.auth_token,
cmdline_args.enable_api,
cmdline_args.jwt_secret,
cmdline_args.jwt_method,
_endpoints)
```
#### File: tests/core/test_utils.py
```python
import os
import pytest
from aioresponses import aioresponses
from rasa.core import utils
from rasa.core.utils import EndpointConfig
from tests.core.utilities import latest_request, json_of_latest_request
@pytest.fixture(scope="session")
def loop():
from pytest_sanic.plugin import loop as sanic_loop
return utils.enable_async_loop_debugging(next(sanic_loop()))
def test_is_int():
assert utils.is_int(1)
assert utils.is_int(1.0)
assert not utils.is_int(None)
assert not utils.is_int(1.2)
assert not utils.is_int("test")
def test_subsample_array_read_only():
t = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
r = utils.subsample_array(t, 5,
can_modify_incoming_array=False)
assert len(r) == 5
assert set(r).issubset(t)
def test_subsample_array():
t = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# this will modify the original array and shuffle it
r = utils.subsample_array(t, 5)
assert len(r) == 5
assert set(r).issubset(t)
def test_on_hot():
r = utils.one_hot(4, 6)
assert (r[[0, 1, 2, 3, 5]] == 0).all()
assert r[4] == 1
def test_on_hot_out_of_range():
with pytest.raises(ValueError):
utils.one_hot(4, 3)
def test_list_routes(default_agent):
from rasa.core import server
app = server.create_app(default_agent, auth_token=None)
routes = utils.list_routes(app)
assert set(routes.keys()) == {'hello',
'version',
'execute_action',
'append_event',
'replace_events',
'list_trackers',
'retrieve_tracker',
'retrieve_story',
'respond',
'predict',
'parse',
'train_stack',
'evaluate_intents',
'log_message',
'load_model',
'evaluate_stories',
'get_domain',
'continue_training',
'status',
'tracker_predict'}
def test_cap_length():
assert utils.cap_length("mystring", 6) == "mys..."
def test_cap_length_without_ellipsis():
assert utils.cap_length("mystring", 3,
append_ellipsis=False) == "mys"
def test_cap_length_with_short_string():
assert utils.cap_length("my", 3) == "my"
def test_pad_list_to_size():
assert (utils.pad_list_to_size(["e1", "e2"], 4, "other") ==
["e1", "e2", "other", "other"])
def test_read_lines():
lines = utils.read_lines("data/test_stories/stories.md",
max_line_limit=2,
line_pattern=r"\*.*")
lines = list(lines)
assert len(lines) == 2
async def test_endpoint_config():
with aioresponses() as mocked:
endpoint = EndpointConfig(
"https://example.com/",
params={"A": "B"},
headers={"X-Powered-By": "Rasa"},
basic_auth={"username": "user",
"password": "<PASSWORD>"},
token="mytoken",
token_name="letoken",
type="redis",
port=6379,
db=0,
password="password",
timeout=30000
)
mocked.post('https://example.com/test?A=B&P=1&letoken=mytoken',
payload={"ok": True},
repeat=True,
status=200)
await endpoint.request("post", subpath="test",
content_type="application/text",
json={"c": "d"},
params={"P": "1"})
r = latest_request(mocked, 'post',
"https://example.com/test?A=B&P=1&letoken=mytoken")
assert r
assert json_of_latest_request(r) == {"c": "d"}
assert r[-1].kwargs.get("params", {}).get("A") == "B"
assert r[-1].kwargs.get("params", {}).get("P") == "1"
assert r[-1].kwargs.get("params", {}).get("letoken") == "mytoken"
# unfortunately, the mock library won't report any headers stored on
# the session object, so we need to verify them separately
async with endpoint.session() as s:
assert s._default_headers.get("X-Powered-By") == "Rasa"
assert s._default_auth.login == "user"
assert s._default_auth.password == "<PASSWORD>"
os.environ['USER_NAME'] = 'user'
os.environ['PASS'] = '<PASSWORD>'
def test_read_yaml_string():
config_without_env_var = """
user: user
password: <PASSWORD>
"""
r = utils.read_yaml_string(config_without_env_var)
assert r['user'] == 'user' and r['password'] == '<PASSWORD>'
def test_read_yaml_string_with_env_var():
config_with_env_var = """
user: ${USER_NAME}
password: ${<PASSWORD>}
"""
r = utils.read_yaml_string(config_with_env_var)
assert r['user'] == 'user' and r['password'] == '<PASSWORD>'
def test_read_yaml_string_with_multiple_env_vars_per_line():
config_with_env_var = """
user: ${USER_NAME} ${PASS}
password: ${<PASSWORD>}
"""
r = utils.read_yaml_string(config_with_env_var)
assert r['user'] == 'user pass' and r['password'] == '<PASSWORD>'
def test_read_yaml_string_with_env_var_prefix():
config_with_env_var_prefix = """
user: db_${USER_NAME}
password: <PASSWORD>}
"""
r = utils.read_yaml_string(config_with_env_var_prefix)
assert r['user'] == 'db_user' and r['password'] == '<PASSWORD>'
def test_read_yaml_string_with_env_var_postfix():
config_with_env_var_postfix = """
user: ${USER_NAME}_admin
password: ${<PASSWORD>
"""
r = utils.read_yaml_string(config_with_env_var_postfix)
assert r['user'] == 'user_admin' and r['password'] == '<PASSWORD>'
def test_read_yaml_string_with_env_var_infix():
config_with_env_var_infix = """
user: db_${USER_NAME}_admin
password: <PASSWORD>
"""
r = utils.read_yaml_string(config_with_env_var_infix)
assert r['user'] == 'db_user_admin' and r['password'] == '<PASSWORD>'
def test_read_yaml_string_with_env_var_not_exist():
config_with_env_var_not_exist = """
user: ${USER_NAME}
password: ${PASSWORD}
"""
with pytest.raises(ValueError):
utils.read_yaml_string(config_with_env_var_not_exist)
```
|
{
"source": "JeanWin25/jeanwin-api-coins",
"score": 3
}
|
#### File: python/batch_sell_orders/batch_sell_orders.py
```python
import csv
import hashlib
import hmac
import time
import json
import requests
import sys
import argparse
class HMACRequestSignatureHandler(object):
def __init__(self, api_secret='', body=''):
self.api_secret = api_secret
def get_nonce(self):
"""Return a nonce based on the current time.
A nonce should only use once and should always be increasing.
Using the current time is perfect for this.
"""
return int(time.time() * 1e6)
def __call__(self, url, nonce=None, body=None):
"""Return an HMAC signature based on the request."""
if nonce is None:
nonce = self.get_nonce()
if body is None:
# GET requests don't have a body, so we'll skip that for signing
message = str(nonce) + url
else:
body = json.dumps(body, separators=(',', ':'))
message = str(nonce) + url + body
return str(
hmac.new(
str(self.api_secret),
message,
hashlib.sha256
).hexdigest()
)
class SellOrderApi(object):
def __init__(self, api_key='', api_secret=''):
self.url = 'https://coins.ph/api/v2/sellorder'
self.api_key = api_key
self.api_secret = api_secret
self.hmac_handler = HMACRequestSignatureHandler(
api_secret=self.api_secret
)
def sign_request(self, nonce, body):
return self.hmac_handler(
self.url,
nonce=nonce,
body=body
)
def post(self, body):
nonce = self.hmac_handler.get_nonce()
signature = self.sign_request(nonce, body)
headers = {
'ACCESS_SIGNATURE': signature,
'ACCESS_KEY': self.api_key,
'ACCESS_NONCE': nonce,
'Content-Type': 'application/json',
'Accept': 'application/json'
}
body = json.dumps(body, separators=(',', ':'))
return requests.post(self.url, headers=headers, data=body).json()
class SellOrderCSVParser(object):
def __init__(self, filename):
self.filename = filename
def __call__(self):
parsed_orders = []
count = 0
with open(self.filename, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
header = []
for row in reader:
if count == 0:
header = row
count += 1
continue
data = {}
for field, value in zip(header, row):
data[field] = value
parsed_orders.append(data)
return parsed_orders
def process_requests(config, filename):
with open(config) as config:
config = json.loads(config.read())
parser = SellOrderCSVParser(filename=filename)
api = SellOrderApi(
api_key=config['api_key'],
api_secret=config['api_secret']
)
results = []
for body in parser():
bank_account_name = body['bank_account_name']
print 'Creating a sell order for bank account {}'.format(
bank_account_name
)
result = api.post(body)
if 'errors' in result:
print 'Error creating sell order for bank account {}'.format(
bank_account_name
)
else:
print 'Created sell order {} for {}'.format(
result['order']['id'],
bank_account_name
)
def main():
arg_parser = argparse.ArgumentParser(
description='Batch process sell orders provided by a csv file'
)
arg_parser.add_argument('filename', nargs=1, type=str)
arg_parser.add_argument('--config', default='config.json', type=str)
args = arg_parser.parse_args()
process_requests(args.config, args.filename[0])
if __name__ == '__main__':
main()
```
|
{
"source": "jean-ye/project_spring_2020",
"score": 3
}
|
#### File: project_spring_2020/tests/motion_displacement_test.py
```python
from Jeans_Package.compute_motion_displacement import *
def test_column_avg1():
sample_data = {'roll':[1, 1, 1], 'pitch':[2, 2, 2], 'yaw':[3, 3, 3], 'z':[4, 4, 4], 'x':[5, 5, 5], 'y':[6, 6, 6]}
sample_df = pd.DataFrame(data = sample_data)
results = compute_mean_for_each_column(sample_df)
assert len(results) == 6
def test_column_avg2():
sample_data = {'roll':[1, 1, 1], 'pitch':[2, 2, 2], 'yaw':[3, 3, 3], 'z':[4, 4, 4], 'x':[5, 5, 5], 'y':[6, 6, 6]}
sample_df = pd.DataFrame(data = sample_data)
results = compute_mean_for_each_column(sample_df)
output = [1,2,3,4,5,6]
assert results == output
def test_column_avg3():
"""This is to make sure the function can handle negative numbers too"""
sample_data = {'roll':[0, 1, -4], 'pitch':[2, 2, -10], 'yaw':[-6, 3, 3], 'z':[4, -8, 4], 'x':[5, 5, 5], 'y':[6, 6, 6]}
sample_df = pd.DataFrame(data = sample_data)
results = compute_mean_for_each_column(sample_df)
output = [-1,-2,0,0,5,6]
assert results == output
def test_avg_sum1():
sample_data = {'roll':[1, 1, 1], 'pitch':[2, 2, 2], 'yaw':[3, 3, 3], 'z':[4, 4, 4], 'x':[5, 5, 5], 'y':[6, 6, 6]}
sample_df = pd.DataFrame(data = sample_data)
results = compute_mean_for_each_column(sample_df)
sum_results = compute_mean_of_all_columns(results)
assert sum_results == 3.5
def test_avg_sum2():
"""This is to make sure the function can handle negative numbers too"""
sample_data = {'roll':[0, 1, -4], 'pitch':[2, 2, -10], 'yaw':[-6, 3, 3], 'z':[4, -8, 4], 'x':[3, 3, 3], 'y':[6, 6, 6]}
sample_df = pd.DataFrame(data = sample_data)
results = compute_mean_for_each_column(sample_df)
sum_results = compute_mean_of_all_columns(results)
assert sum_results == 1
```
|
{
"source": "jeanyvesb9/DataAnalysis-Lib",
"score": 3
}
|
#### File: DataAnalysis-Lib/DataAnalysisLib/dataset.py
```python
import warnings as _warnings
import typing as _typing
from scipy import optimize as _opt
import inspect as _inspect
import numpy as _np
import matplotlib.pyplot as _plt
import pandas as _pd
import global_funcs as _gf
import global_enums as _ge
DEFAULT_DATASET_NAME = 'v'
class Dataset(object):
def __init__(self, v: _typing.Any, error: _typing.Any = None, errorFn: _typing.Callable[[float], float] = None, name: str = None, units: str = None):
self.v = _np.array(v)
if self.v.ndim != 1:
_warnings.warn('Incorrect dimension of v.')
if error is not None:
if isinstance(error, _np.ndarray) or isinstance(error, list):
if errorFn is None:
if len(error) != len(self.v):
self.error = None
_warnings.warn('len(error) != len(v): Default error (None) selected.')
else:
self.error = error
else:
self.error = None
_warnings.warn('error overdefined: explicit and functional definition of error given. \
Default error (None) selected.')
else:
self.error = _np.ones(len(self.v)) * error
else:
if errorFn is not None:
self.error = errorFn(self.v)
else:
self.error = None
self.name = name #None type checking in setter
self.units = units #empty and None type checking in setter
#Idiot proofing the library:
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, value: str):
self._name = value if value is not None else DEFAULT_DATASET_NAME
@property
def units(self) -> str:
return self._units
@units.setter
def units(self, value: str):
self._units = value if value is not None and value != '' else None
#End of idiot proofing.
def prettyName(self) -> str:
return self.name if self.units is None else self.name + ' (' + self.units + ')'
def cut(self, initialIndex: int = None, finalIndex: int = None):
if (initialIndex is not None or finalIndex is not None) and isinstance(initialIndex, (int, None)) and isinstance(finalIndex, (int, None)):
if initialIndex is not None:
if initialIndex in range(0, len(self.v)):
self.v = self.v[initialIndex:]
self.error = self.error[initialIndex:]
else:
_warnings.warn("initialIndex is out of range, setting default values")
if finalIndex is not None:
if finalIndex in range(0, len(self.v)):
self.v = self.v[:finalIndex - initialIndex + 1]
self.error = self.error[:finalIndex - initialIndex + 1]
else:
_warnings.warn("finalIndex is out of range, setting default values")
else:
_warnings.warn("initialIndex/finalIndex type is not int, not executing")
def purge(self, step: int): #step >= 1
if isinstance(step, int):
if step in range(1, len(self.v)):
self.v = self.v[::step]
self.error = self.error[::step] if self.error is not None else self.error
else:
_warnings.warn("step is out of range (1 <= step <= len(v)), not executing")
else:
_warnings.warn("step type is not int, not executing")
def remove(self, index: int):
self.v = _np.delete(self.v, index)
self.error = _np.delete(self.error, index)
def indexAtValue(self, value: float, exact: bool = True) -> int:
return _np.where(self.v == value) if exact else _gf.findNearestValueIndex(self.v, value)
def getMean(self) -> float:
return _np.mean(self.v)
def getStdDev(self) -> float:
return _np.std(self.v, ddof = 1)
def getStdDevOfMean(self) -> float:
return self.getStdDev()/_np.sqrt(len(self.v))
def getWeightedMean(self) -> float:
if _np.count_nonzero(self.error) != len(self.error):
_warnings.warn('Some values of self.error are 0. Returning unweighted mean.')
return self.getMean()
weights = 1/self.error**2
return _np.sum(self.v * weights)/_np.sum(weights)
def getWeightedMeanError(self) -> float:
if _np.count_nonzero(self.error) != len(self.error):
_warnings.warn('Some values of self.error are 0. Returning 0.')
return 0
weights = 1/self.error**2
return 1/_np.sqrt(_np.sum(weights**2))
def quickHistogram(self, bins: int = 'auto', range: _typing.Tuple[float, float] = None, normalized: bool = False):
_plt.hist(self.v, bins, range = range, density = normalized)
_plt.xlabel(self.prettyName())
_plt.ylabel('Probability' if normalized else 'Counts')
_plt.grid(True)
_plt.show()
def dataFrame(self, rounded: bool = True, separatedError: bool = False, relativeError: bool = False, saveCSVFile: str = None, \
CSVSep: str = ',', CSVDecimal: str = '.'):
table = _gf.createSeriesPanda(self.v, error = self.error, label = self.name, units = self.units, relativeError = relativeError, \
separated = separatedError, rounded = rounded)
if saveCSVFile is not None:
table.to_csv(saveCSVFile, sep = CSVSep, decimal = CSVDecimal)
return table
```
|
{
"source": "jeanyvesb9/Dual-Sine-Generator",
"score": 2
}
|
#### File: jeanyvesb9/Dual-Sine-Generator/gen.py
```python
import sys
import threading
import queue
import numpy as np
import scipy as sp
import scipy.signal
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import pyaudio
import tkinter
import serial
import serial.tools.list_ports
f_sampling = 44100
duration = 10.0
sample_len = int(f_sampling * duration)
t = np.arange(sample_len)
stereo_signal = np.zeros([sample_len, 2], dtype=np.float32)
index = 0
def sound_callback(in_data, frame_count, time_info, status):
global index
cut_index = (index + frame_count) if (index + frame_count) <= sample_len else sample_len
data = stereo_signal[index:cut_index, :]
if cut_index != sample_len:
index = cut_index
else:
index = frame_count - len(data)
data = np.concatenate([np.asarray(data), np.asarray(stereo_signal[0:index, :])])
return (data, pyaudio.paContinue)
class MainWindow(tkinter.Frame):
def __init__(self, root, port):
tkinter.Frame.__init__(self, root)
self.root = root
root.title("Noche de los Museos")
root.geometry("1000x630")
root.protocol('WM_DELETE_WINDOW', self.close_fn)
self.bind('<Return>', self.updateGenerator)
self.samples_1 = np.zeros(sample_len)
self.samples_2 = np.zeros(sample_len)
self.is_playing_1 = False
self.is_playing_2 = False
self.remote = False
self.remote_port = port
self.remote_offset = 0
self.remote_thread_runninng = False
self.remote_queue = queue.Queue()
self.p_audio = pyaudio.PyAudio()
self.stream = self.p_audio.open(format=pyaudio.paFloat32, channels=2, rate=f_sampling, \
output=True, stream_callback=sound_callback)
self.stream.start_stream()
vcmd = (self.register(self.onFloatValidate),'%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W')
tkinter.Label(self, text = 'Vibración del Agua:').grid(row = 0)
self.button_toggle_1 = tkinter.Button(self, text='Activar', command=self.press_button_toggle_1)
self.button_toggle_1.grid(row=1)
tkinter.Label(self, text = 'Frecuencia (Hz):').grid(row = 1, column=1)
self.freq_1_entry_text = tkinter.StringVar()
self.freq_1_entry = tkinter.Entry(self, validate='key', validatecommand=vcmd, \
textvariable=self.freq_1_entry_text)
self.freq_1_entry.grid(row=1, column=2)
self.freq_1_entry_text.set('25')
self.freq_1_update = tkinter.Button(self, text='Aplicar', command=self.updateGenerator)
self.freq_1_update.grid(row=1, column=3)
self.freq_1_up = tkinter.Button(self, text='↑', command=self.freq_1_up_command)
self.freq_1_up.grid(row=1, column=4)
self.freq_1_down = tkinter.Button(self, text='↓', command=self.freq_1_down_command)
self.freq_1_down.grid(row=1, column=5)
tkinter.Label(self, text = 'Fase:').grid(row=1, column=6)
self.phase_1_slider = tkinter.Scale(self, from_=0, to=2*np.pi, resolution=0.01, \
orient=tkinter.HORIZONTAL, command=self.updateGenerator)
self.phase_1_slider.grid(row=1, column=7)
tkinter.Label(self, text = 'Intensidad:').grid(row = 1, column=8)
self.intensity_1_slider = tkinter.Scale(self, from_=0, to=1, resolution=0.01, \
orient=tkinter.HORIZONTAL, command=self.updateGenerator)
self.intensity_1_slider.grid(row=1, column=9)
self.intensity_1_slider.set(1)
tkinter.Label(self, text = 'Luz Estroboscópica:').grid(row = 2)
self.button_toggle_2 = tkinter.Button(self, text='Activar', command=self.press_button_toggle_2)
self.button_toggle_2.grid(row=3)
tkinter.Label(self, text = 'Frecuencia (Hz):').grid(row = 3, column=1)
self.freq_2_entry_text = tkinter.StringVar()
self.freq_2_entry = tkinter.Entry(self, validate='key', validatecommand=vcmd, \
textvariable=self.freq_2_entry_text)
self.freq_2_entry.grid(row=3, column=2)
self.freq_2_entry_text.set('25')
self.freq_1_update = tkinter.Button(self, text='Aplicar', command=self.updateGenerator)
self.freq_1_update.grid(row=3, column=3)
self.freq_2_up = tkinter.Button(self, text='↑', command=self.freq_2_up_command)
self.freq_2_up.grid(row=3, column=4)
self.freq_2_down = tkinter.Button(self, text='↓', command=self.freq_2_down_command)
self.freq_2_down.grid(row=3, column=5)
tkinter.Label(self, text = 'Fase:').grid(row=3, column=6)
self.phase_2_slider = tkinter.Scale(self, from_=0, to=2*np.pi, resolution=0.01, \
orient=tkinter.HORIZONTAL, command=self.updateGenerator)
self.phase_2_slider.grid(row=3, column=7)
tkinter.Label(self, text = 'Intensidad:').grid(row = 3, column=8)
self.intensity_2_slider = tkinter.Scale(self, from_=0, to=1, resolution=0.01, \
orient=tkinter.HORIZONTAL, command=self.updateGenerator)
self.intensity_2_slider.grid(row=3, column=9)
self.intensity_2_slider.set(1)
self.defaults_button_25 = tkinter.Button(self, text="Default 25Hz", command=self.default_config_25)
self.defaults_button_25.grid(column=10, row=0, rowspan=2)
self.defaults_button_30 = tkinter.Button(self, text="Default 30Hz", command=self.default_config_30)
self.defaults_button_30.grid(column=10, row=2, rowspan=2)
self.remote_control_button = tkinter.Button(self, text='Remoto', command=self.toggle_remote, relief="raised")
self.remote_control_button.grid(row=2, column=11, rowspan=2)
if self.remote_port is None:
self.remote_control_button.config(state='disabled')
self.remote_control_offset = tkinter.Label(self, text='25')
self.remote_control_offset.grid(row = 2, column=12, rowspan=2)
self.plot_fig = plt.Figure(figsize=(10,5), dpi=100)
self.plot_ax1 = self.plot_fig.add_subplot(311)
self.plot_samples_1 = self.plot_ax1.plot(t, self.samples_1)[0]
self.plot_ax1.set_ylim(-1.1, 1.1)
self.plot_ax1.set_xlim(0, t[-1] * 0.01)
self.plot_ax1.xaxis.set_ticklabels([])
self.plot_ax1.set_ylabel('Agua')
self.plot_ax2 = self.plot_fig.add_subplot(312)
self.plot_samples_2 = self.plot_ax2.plot(t, self.samples_2)[0]
self.plot_ax2.set_ylim(-0.1, 1.1)
self.plot_ax2.set_xlim(0, t[-1] * 0.01)
self.plot_ax2.xaxis.set_ticklabels([])
self.plot_ax2.set_ylabel('Luz')
self.plot_ax3 = self.plot_fig.add_subplot(313)
self.plot_samples_3 = self.plot_ax3.plot(t, self.samples_1 * self.samples_2)[0]
self.plot_ax3.set_ylim(-1.1, 1.1)
self.plot_ax3.set_xlim(0, t[-1] * 0.01)
self.plot_ax3.set_ylabel('Superposición')
self.plot_ax3.set_xlabel('t')
self.plot_canvas = FigureCanvasTkAgg(self.plot_fig, master=self)
self.plot_canvas.draw()
self.plot_canvas.get_tk_widget().grid(row=5, columnspan=13)
self.after(200, self.listen_for_result)
if self.remote_port is not None:
self.remote_thread = threading.Thread(target=self.read_remote_port)
self.remote_port.reset_input_buffer()
self.remote_thread_runninng = True
self.remote_thread.start()
def onFloatValidate(self, d, i, P, s, S, v, V, W):
try:
if P == '':
return True
float(P)
return True
except ValueError:
self.bell()
return False
def freq_1_up_command(self):
self.freq_1_entry_text.set(str(round(float(self.freq_1_entry_text.get()) + 0.1, 2)))
self.updateGenerator()
def freq_1_down_command(self):
f = float(self.freq_1_entry_text.get())
if f >= 0.1:
self.freq_1_entry_text.set(str(f - 0.1))
else:
self.freq_1_entry_text.set(0)
self.updateGenerator()
def freq_2_up_command(self):
self.freq_2_entry_text.set(str(round(float(self.freq_2_entry_text.get()) + 0.1, 2)))
self.updateGenerator()
def freq_2_down_command(self):
f = float(self.freq_2_entry_text.get())
if f >= 0.1:
self.freq_2_entry_text.set(str(f - 0.1))
else:
self.freq_2_entry_text.set(0)
self.updateGenerator()
def updateGenerator(self, *argv):
t1 = self.freq_1_entry_text.get()
if t1 == '' or float(t1) < 0:
self.freq_1_entry_text.set('0')
t2 = self.freq_2_entry_text.get()
if t2 == '' or float(t2) < 0:
self.freq_2_entry_text.set('0')
f2 = float(self.freq_2_entry_text.get())
if self.remote:
f2 += self.remote_offset
if f2 < 0:
f2 = 0
self.remote_control_offset.config(text='%.2f' % round(f2, 2))
if self.is_playing_1:
self.samples_1 = self.create_sin(float(self.freq_1_entry_text.get()), \
self.phase_1_slider.get(), \
self.intensity_1_slider.get())
else:
self.samples_1 = np.zeros(sample_len)
if self.is_playing_2:
self.samples_2 = self.create_square(f2, \
self.phase_2_slider.get(), \
self.intensity_2_slider.get())
else:
self.samples_2 = np.zeros(sample_len)
stereo_signal[:, 0] = self.samples_1[:] #1 for right speaker, 0 for left
stereo_signal[:, 1] = self.samples_2[:] #1 for right speaker, 0 for left
self.plot_samples_1.set_ydata(self.samples_1)
self.plot_samples_2.set_ydata(self.samples_2)
self.plot_samples_3.set_ydata(self.samples_1 * self.samples_2)
self.plot_canvas.draw()
self.plot_canvas.flush_events()
def create_sin(self, f=25, phase=0, v=1):
return (np.sin(2 * np.pi * t * f / f_sampling + phase)).astype(np.float32) * v
def create_square(self, f=25, phase=0, v=1):
return (sp.signal.square(2 * np.pi * t * f / f_sampling + phase) + 1).astype(np.float32) * v/2
def press_button_toggle_1(self):
if self.is_playing_1:
self.is_playing_1 = False
self.button_toggle_1.config(text="Activar")
else:
self.is_playing_1 = True
self.button_toggle_1.config(text="Desactivar")
self.updateGenerator()
def press_button_toggle_2(self):
if self.is_playing_2:
self.is_playing_2 = False
self.button_toggle_2.config(text="Activar")
else:
self.is_playing_2 = True
self.button_toggle_2.config(text="Desactivar")
self.updateGenerator()
def default_config_25(self):
self.freq_1_entry_text.set(25)
self.freq_2_entry_text.set(25)
self.phase_1_slider.set(0)
self.phase_2_slider.set(0)
self.intensity_1_slider.set(1)
self.intensity_2_slider.set(1)
self.is_playing_1 = True
self.button_toggle_1.config(text="Desactivar")
self.is_playing_2 = True
self.button_toggle_2.config(text="Desactivar")
self.updateGenerator()
def default_config_30(self):
self.freq_1_entry_text.set(30)
self.freq_2_entry_text.set(30)
self.phase_1_slider.set(0)
self.phase_2_slider.set(0)
self.intensity_1_slider.set(1)
self.intensity_2_slider.set(1)
self.is_playing_1 = True
self.button_toggle_1.config(text="Desactivar")
self.is_playing_2 = True
self.button_toggle_2.config(text="Desactivar")
self.updateGenerator()
def toggle_remote(self):
if self.remote:
self.remote_control_button.config(relief='raised')
self.remote = False
self.freq_2_entry.config(fg='black')
else:
self.remote_control_button.config(relief='sunken')
with self.remote_queue.mutex:
self.remote_queue.queue.clear()
self.remote = True
self.freq_2_entry.config(fg='red')
self.updateGenerator()
def read_remote_port(self):
while self.remote_thread_runninng:
self.remote_queue.put(float(self.remote_port.read_until())/1023 * 3 - 1.5)
def listen_for_result(self):
if self.remote:
try:
self.remote_offset = self.remote_queue.get(0)
self.after(300, self.listen_for_result)
self.updateGenerator()
except queue.Empty:
self.after(300, self.listen_for_result)
else:
self.after(300, self.listen_for_result)
def close_fn(self):
self.stream.stop_stream()
self.stream.close()
self.p_audio.terminate()
self.root.destroy()
if self.remote:
self.remote = False
if self.remote_port:
self.remote_thread_runninng = False
def main():
port = None
if len(sys.argv) > 1:
if sys.argv[1] == '--list_interfaces':
for p in serial.tools.list_ports.comports():
print(p.device, '-', p.name, '-', p.description)
return
elif sys.argv[1] == '-c':
port = serial.Serial(sys.argv[2], baudrate=9600)
else:
print('Unknown command. Options:')
print('--list_interfaces')
print('-c <device_port>')
return
root = tkinter.Tk()
MainWindow(root, port).pack(fill="both", expand=True)
root.mainloop()
if __name__ == '__main__':
main()
```
|
{
"source": "Jeanyvesbourdoncle/Traffic-Sign-Recognition-Classifier",
"score": 3
}
|
#### File: Jeanyvesbourdoncle/Traffic-Sign-Recognition-Classifier/CNN_fonctions.py
```python
import tensorflow as tf
# Fonction Normalize
def normalize(x):
return (x.astype(float) - 128) / 128
#-------------------------------------------
# Fonction evaluate the model
# X_data (input data : # X_data (input data : differents features), y_data (label))
# total_accuracy / num_examples = model accuracy
#make the mean of the accuracy for every batch, and finally caculate the accuracy of all the model
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
#--------------------------------------------
```
|
{
"source": "jeanyves-yang/automatic_tractography",
"score": 2
}
|
#### File: automatic_tractography/rundir/script.py
```python
__author__ = 'jeanyves'
import os
import sys
import logging
import signal
import subprocess
import errno
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
dtiprocess = "/tools/bin_linux64/dtiprocess"
DTIReg = "/tools/bin_linux64/DTI-Reg"
fiberprocess = "/tools/bin_linux64/fiberprocess"
ResampleDTIVolume = "/tools/Slicer4/Slicer-4.3.1-linux-amd64//lib/Slicer-4.3/cli-modules/ResampleDTIVolume"
ImageMath = "/tools/bin_linux64/ImageMath"
TractographyLabelMapSeeding = "/tools/Slicer4/Slicer-4.3.1-linux-amd64" \
"/lib/Slicer-4.3/cli-modules/TractographyLabelMapSeeding"
FiberPostProcess = "/tools/bin_linux64/FiberPostProcess"
#FiberPostProcess = "/NIRAL/work/jeanyves/FiberPostProcess-build/bin/FiberPostProcess"
polydatatransform = "/tools/bin_linux64/polydatatransform"
PolyDataCompression = "/tools/bin_linux64/PolyDataCompression"
unu = "/tools/Slicer4/Slicer-4.3.1-linux-amd64//bin/unu"
MDT = "/NIRAL/work/jeanyves/MDT-build/bin/MaurerDistanceTransform"
DTItarget = "/NIRAL/work/jeanyves/PycharmProjects/automatic_tractography/Data/PediatricAtlas_071714FinalAtlasDTI.nrrd"
DTIsource = "/NIRAL/work/jeanyves/PycharmProjects/automatic_tractography/Data/FinalAtlasDTI.nrrd"
displacementField = "/NIRAL/work/jeanyves/PycharmProjects/automatic_tractography/displacementField.nrrd"
inputdir = "/NIRAL/work/jeanyves/PycharmProjects/automatic_tractography/Data/Fibers_Jan132015/"
workdir = "/NIRAL/work/jeanyves/PycharmProjects/automatic_tractography/"
fibersMappedDir = workdir + "fibers_mapped/"
dilationRadius = "2"
seedspacing = "1" #0.5, or inferior to 1 -> more tracts (slower to load, but mb better results)
clthreshold = "0.15" #0
minimumlength = "10"
#maximum length 800 (default)
stoppingvalue = "0.12" #0.08
stoppingcurvature = "0.3" #0.5 (reduce it if tracts display high curv)
integrationsteplength = "0.4" #0.5
nbThresholds = "3"
nbHistogramBins = "128"
labelOffset = "0"
otsuPara = nbThresholds + "," + labelOffset + "," + nbHistogramBins
upsampledImage = workdir + "/upsampledImage.nrrd"
step1 = 1
step2 = 1
step3 = 1
step4 = 1
step5 = 0
step5a = 1
step5b = 0
step5b1 = 1
step5b2 = 1
step5b3 = 0
step5b4 = 0
# 1/ co-register DTI atlases
if(step1 == 0):
print "Step: Co-registering atlases & creation of the displacement field ..."
subprocess.call([DTIReg, "--movingVolume", DTItarget, "--fixedVolume", DTIsource, "--method useScalar-ANTS",
"--ANTSRegistrationType", "GreedyDiffeo", "--ANTSSimilarityMetric",
"CC","--ANTSSimilarityParameter", "4", "--outputDisplacementField", displacementField])
print "Step: Co-registration DONE"
# 2/ mapping
if(step2 == 0):
print "Step: Mapping reference tracts ..."
make_sure_path_exists(workdir + "fibers_mapped")
for file in os.listdir(inputdir):
fiberMapped = fibersMappedDir + os.path.splitext(file)[0] + "_t.vtk"
subprocess.call([polydatatransform, "--fiber_file", inputdir + file,
"-o", fiberMapped, "-D",
displacementField, "--inverty", "--invertx"] )
print "Step: Mapping DONE"
#3/ voxelize all tracts, dilate by 2 voxels and apply transform to label maps (NN) => R../../processing.cxxOIs in new atlas - OK (need ResampleVolume2?)
if(step3 == 0):
print "Step: Dilation & voxelization of mapped reference tracts ..."
make_sure_path_exists(workdir + "dilated_images")
for file in os.listdir(fibersMappedDir):
if(file.endswith("_t.vtk")):
print file
labelmap = os.path.splitext(file)[0] + ".nrrd"
subprocess.call([fiberprocess, "--voxelize", workdir + "dilated_images/" + labelmap,
"--fiber_file", fibersMappedDir + file, "-T", DTIsource])
dilatedImage = os.path.splitext(file)[0] + "_dil.nrrd"
subprocess.call([ImageMath, workdir + "dilated_images/" + labelmap, "-dilate", str(dilationRadius) + ",1", "-outfile",
workdir + "dilated_images/" + dilatedImage])
print "Step: Dilation & voxelization DONE"
#4/ Use whole tract as ROI for labelmap seeding
if(step4 == 0):
print "Step: TractographyLabelMapSeeding ... "
for file in os.listdir(workdir + "dilated_images"):
if(file.endswith("_dil.nrrd")):
print file
make_sure_path_exists(workdir + "fibers_processed")
fiber = workdir + "fibers_processed/" + file[:-9] + "_1ss.vtp"
subprocess.check_call([TractographyLabelMapSeeding, DTIsource, fiber, "-a", workdir + "dilated_images/" + file,
"-s", seedspacing,
"--clthreshold", clthreshold,
"--minimumlength", minimumlength,
"--stoppingvalue", stoppingvalue,
"--stoppingcurvature", stoppingcurvature,
"--integrationsteplength", integrationsteplength])
print "Step: Tractography using label map seeding DONE"
#5/ post processing: cut ends with FA or WM roi
if(step5 == 0):
print "Step: Processing tracts ..."
FAImage = workdir + "FinalAtlasDTI_FA.nrrd"
MDImage = workdir + "FinalAtlasDTI_MD.nrrd"
WMmask = workdir + "WMmask.nrrd"
MDmask = workdir + "MDmask.nrrd"
# a/ create WM mask and MD mask
if( step5a == 0 ):
print "Creation of WM mask ..."
#for now need to create MD mask or provide it (could create it automatically with the atlas provided + dtiprocess)
subprocess.call([ImageMath, FAImage, "-outfile", WMmask, "-dilate", "10,10"])
subprocess.call([ImageMath, WMmask, "-otsu", "-outfile", WMmask])
print "DONE"
print "creation of CSF mask ..."
subprocess.call([dtiprocess, "--dti_image", DTIsource, "-m", MDImage])
subprocess.call([ImageMath, MDImage, "-outfile", MDmask, "-otsuMultipleThresholds", "-otsuPara", otsuPara])
subprocess.call([ImageMath, MDmask, "-outfile", MDmask, "-erode", "2,1"])
print "DONE"
# b/ process
if( step5b == 0 ):
if(step5b1 == 0):
print "creation of upsampled image ..."
subprocess.call([unu, "resample", "-i", FAImage, "-o", upsampledImage, "-s", "x2", "x2", "x2" ])
subprocess.call([ResampleDTIVolume, DTIsource, upsampledImage, "-R", upsampledImage ])
print "DONE"
if(step5b2 ==0):
print "Step: Cropping reference tracts ..."
for file in os.listdir(fibersMappedDir):
if(file.endswith("_t.vtk")):
print file
fiberCropped = fibersMappedDir + os.path.splitext(file)[0] + "_cleanEnds.vtk"
subprocess.call([FiberPostProcess, "-i", fibersMappedDir + file,
"-o", fiberCropped, "--crop", "-m", WMmask,
"--thresholdMode", "above"])
print "Step: Cropping reference tracts DONE"
for file in os.listdir(workdir + "fibers_processed/" ):
if(file.endswith("_t_1ss.vtp")):
print file
dilatedImage = workdir + "dilated_images/" + os.path.splitext(file)[0] + "_dil.nrrd"
outputCrop = workdir + "fibers_processed/" + os.path.splitext(file)[0] + "_cleanEnds.vtp"
outputMaskCSF = workdir + "fibers_processed/" + os.path.splitext(file)[0] + "_maskCSF.vtp"
outputMaskTract = workdir + "fibers_processed/" + os.path.splitext(file)[0] + "_maskTract.vtp"
outputLengthMatch = workdir + "fibers_processed/" + os.path.splitext(file)[0] + "_lengthMatch.vtp"
outputFiber5 = workdir + "fibers_processed/" + os.path.splitext(file)[0] + "_threshold5.vtp"
outputFiber3 = workdir + "fibers_processed/" + os.path.splitext(file)[0] + "_threshold3.vtp"
outputFiber2 = workdir + "fibers_processed/" + os.path.splitext(file)[0] + "_threshold2.vtp"
lengthMatchFiber = fibersMappedDir + os.path.splitext(file)[0] + "_cleanEnds.vtk"
if(step5b3 == 0):
print "cropping using WM mask..."
subprocess.call([FiberPostProcess, "-i", workdir + "fibers_processed/" + file, "-o", outputCrop, "--crop", "-m", WMmask,
"--thresholdMode", "above"])
print "DONE"
print "masking with CSF mask ..."
subprocess.call([FiberPostProcess, "-i", outputCrop, "-o", outputMaskCSF, "--mask", "--clean", "-m", MDmask,
"--thresholdMode", "above", "-t", "0.001"])
print "DONE"
print "masking with dilated reference image ..."
subprocess.call([FiberPostProcess, "-i", outputMaskCSF, "-o", outputMaskTract, "--mask", "-m",
dilatedImage, "--thresholdMode", "below", "-t", "0.6", "--clean"
])
print "DONE"
print "matching length with reference tract ..."
subprocess.call([FiberPostProcess, "-i", outputMaskTract, "--lengthMatch", lengthMatchFiber,
"-o", outputLengthMatch])
print "DONE"
if(step5b4 == 0):
make_sure_path_exists(workdir + "ref_fibers_voxelized")
voxelizedImage = workdir + "ref_fibers_voxelized/" + file[:-4] + "_voxelized.nrrd"
print "voxelization of the tract ..."
subprocess.call([fiberprocess, "--voxelize", voxelizedImage, "--fiber_file", lengthMatchFiber, "-T", upsampledImage])
distanceMap = inputdir + "/" + file[:-8] + "_distanceMap.nrrd"
print "DONE"
print "creation of the distance map of the reference tract ..."
subprocess.call([MDT, voxelizedImage, distanceMap ])
print "DONE"
print "matching tract with the distance map ..."
subprocess.call([FiberPostProcess, "-i", outputLengthMatch, "-o", outputFiber5, "-m",
distanceMap, "--threshold", "5", "--mask", "--clean", "--thresholdMode", "above" ])
subprocess.call([FiberPostProcess, "-i", outputLengthMatch, "-o", outputFiber3, "-m",
distanceMap, "--threshold", "3", "--mask", "--clean", "--thresholdMode", "above" ])
subprocess.call([FiberPostProcess, "-i", outputLengthMatch, "-o", outputFiber2, "-m",
distanceMap, "--threshold", "2", "--mask", "--clean", "--thresholdMode", "above" ])
print "DONE"
print "Step: Processing tracts DONE"
```
|
{
"source": "jean/ZopeSkel",
"score": 2
}
|
#### File: ZopeSkel/zopeskel/interfaces.py
```python
class IVar:
"""Variables in a ZopeSkel template.
"""
# actual variable name, eg "description"
name = ""
# human-facing variable name, eg "Product Description"
title = ""
# Short, 1-sentence description
# e.g., "Short description of this product."
description = ""
# Longer, potentially multi-paragraph help for users
# to explain this option
#
# e.g., "Products in Plone have a description that is used for ..."
help = ""
# Default value
default = None
# Should Echo # wtf? is this used?
should_echo = True
# Modes that question should appear in
# 'easy', 'intermediate', 'advanced'
modes = ()
# Widget hint?
# XXX Todo
# strawman: ('text','multitext','tf','int')
def full_description():
"""Returns variable name and description."""
def print_vars():
""" wtf? """
def validate(value):
"""Check validity of entered value; exception on error.
Check validity of entered data and raise exception if
value is invalid.
If this value is valid, this method will return a
normalized version of it (eg, "yes" -> True, for boolean
questions).
"""
```
#### File: zopeskel/localcommands/archetype.py
```python
import os
from zopeskel.base import var
from zopeskel.localcommands import ZopeSkelLocalTemplate
from Cheetah.Template import Template as cheetah_template
class ArchetypeSubTemplate(ZopeSkelLocalTemplate):
use_cheetah = True
parent_templates = ['archetype']
class ContentType(ArchetypeSubTemplate):
"""
A Content Type skeleton
"""
_template_dir = 'templates/archetype/contenttype'
summary = "A content type skeleton"
vars = [
var('contenttype_name', 'Content type name ', default='Example Type'),
var('contenttype_description', 'Content type description ',
default='Description of the Example Type'),
var('folderish', 'True/False: Content type is Folderish ',
default=False),
var('global_allow', 'True/False: Globally addable ',
default=True),
var('allow_discussion', 'True/False: Allow discussion ',
default=False),
]
def pre(self, command, output_dir, vars):
vars['contenttype_classname'] = vars['contenttype_name'].replace(" ", "")
vars['schema_name'] = vars['contenttype_classname'] + "Schema"
vars['content_class_filename'] = vars['contenttype_classname'].lower()
vars['types_xml_filename'] = vars['contenttype_name'].replace(" ", "_")
vars['interface_name'] = "I" + vars['contenttype_name'].replace(" ", "")
vars['add_permission_name'] = vars['package_dotted_name'] + ': Add ' + vars['contenttype_name']
class ATSchemaField(ArchetypeSubTemplate):
"""
A handy AT schema builder
"""
_template_dir = 'templates/archetype/atschema'
summary = "A handy AT schema builder"
marker_name = "Your Archetypes field definitions here ..."
# mapping of ATSchema types to zope.schema types
typemap = {'boolean': 'Bool',
'computed': 'TextLine',
'cmfobject': 'TextLine',
'datetime': 'Date',
'file': 'Bytes',
'fixedpoint': 'Float',
'float': 'Float',
'image': 'Bytes',
'integer': 'Int',
'lines': 'List',
'reference': 'Object',
'string': 'TextLine',
'text': 'Text',
'unknown': 'TextLine'}
# fieldtypes-map to (widget, validator)
fieldtypes = {
'boolean': ('boolean', None),
'computed': ('computed', None),
'cmfobject': ('file', None),
'datetime': ('calendar', 'isValidDate'),
'file': ('file', 'isNonEmptyFile'),
'fixedpoint': ('decimal', 'isDecimal'),
'float': ('decimal', 'isDecimal'),
'image': ('image', 'isNonEmptyFile'),
'integer': ('integer', 'isInt'),
'lines': ('lines', None),
'reference': ('reference', None),
'string': ('string', None),
'text': ('textarea', None),
}
vars = [
var('content_class_filename',
'What is the module (file)name of your content class?',
default='exampletype'),
var('field_name',
'What would you like to name this field?',
default='newfield'),
var('field_type',
'What kind of field should I make for you?\nSome examples: ['+','.join(fieldtypes.keys())+']\n',
default='string'),
var('widget_type',
'What kind of widget do you want to use (example: Password)?',
default='default'),
var('field_label',
'What should be the label of this field (title)?',
default='New Field'),
var('field_desc',
'What should be the description of this field (help text)?',
default='Field description'),
var('required',
'Is this field required?',
default='False'),
var('default',
"If you'd like a default type it here, otherwise leave it blank",
default=''),
var('validator',
"Enter a validator (isEmail), or None, or get a default validator for your specified field type.",
default='use default validator'),
]
def check_vars(self, *args, **kwargs):
"""
Overloading check_vars to print welcome message
"""
print "Welcome to the ATSchema Builder. Field names/widgets can be specified in lowercase or upper case."
print "NOTE: No need to add 'widget' or 'field' to the names. atschema does the work for you!"
print "See "
print " http://plone.org/documentation/manual/developer-manual/archetypes/fields/fields-reference/"
print "and "
print " http://plone.org/documentation/manual/developer-manual/archetypes/fields/widgets-reference/"
print "for field and widget details"
return super(ATSchemaField, self).check_vars(*args, **kwargs)
def run(self, command, output_dir, vars):
"""
By-passing the base run so I can do multiple inserts
with different marker names
"""
(vars['namespace_package'],
vars['namespace_package2'],
vars['package']) = command.get_parent_namespace_packages()
if vars['namespace_package2']:
vars['package_dotted_name'] = "%s.%s.%s" % \
(vars['namespace_package'],
vars['namespace_package2'],
vars['package'])
else:
vars['package_dotted_name'] = "%s.%s" % \
(vars['namespace_package'],
vars['package'])
vars['a_validator'] = ''
if vars['validator'] == 'use default validator':
## take default Validator...
val = ATSchemaField.fieldtypes[vars['field_type'].lower()][1]
if val is not None:
vars['a_validator'] = """'%s'""" % val
elif vars['validator'] != 'None': ## user providing 'aValidator'
vars['a_validator'] = """'%s'""" % vars['validator']
self.pre(command, output_dir, vars)
interface_insert_template = open(os.path.join(self.template_dir(), 'interfaces/+interface_name+.py_insert')).read()
atschema_insert_template = open(os.path.join(self.template_dir(),'content/+content_class_filename+.py_insert')).read()
bridges_insert_template = open(os.path.join(self.template_dir(),'content/schema_field_bridge.txt_insert')).read()
content_messagefactory_insert_template = open(os.path.join(self.template_dir(), 'content/messagefactory_insert.txt_insert')).read()
interface_additional_imports_template = open(os.path.join(self.template_dir(), 'interfaces/additional_imports.txt_insert')).read()
# insert_into_file really wants the inserted text to end with a newline
interface_insert = str(cheetah_template(interface_insert_template, vars))+"\n"
atschema_insert = str(cheetah_template(atschema_insert_template, vars))+"\n"
bridges_insert = str(cheetah_template(bridges_insert_template, vars))+"\n"
content_messagefactory_insert = str(cheetah_template(content_messagefactory_insert_template, vars))+"\n"
interface_additional_imports = str(cheetah_template(interface_additional_imports_template, vars))+"\n"
# self.write_files(command, output_dir, vars)
command.insert_into_file(os.path.join(command.dest_dir(), 'content', '%s.py' % (vars['content_class_filename'])), self.marker_name, atschema_insert)
command.insert_into_file(os.path.join(command.dest_dir(), 'interfaces', '%s.py' % (vars['content_class_filename'])), 'schema definition goes here', interface_insert)
command.insert_into_file(os.path.join(command.dest_dir(), 'content', '%s.py' % (vars['content_class_filename'])), 'Your ATSchema to Python Property Bridges Here ...', bridges_insert)
command.insert_into_file(os.path.join(command.dest_dir(), 'content', '%s.py' % (vars['content_class_filename'])), 'Message Factory Imported Here', content_messagefactory_insert)
command.insert_into_file(os.path.join(command.dest_dir(), 'interfaces', '%s.py' % (vars['content_class_filename'])), 'Additional Imports Here', interface_additional_imports)
self.post(command, output_dir, vars)
def pre(self, command, output_dir, vars):
file = vars['content_class_filename']
if file.endswith('.py'):
file = os.path.splitext(file)[0]
vars['field_type'] = vars['field_type'].capitalize()
if vars['widget_type'].lower() == 'default':
vars['widget_type'] = self.fieldtypes[vars['field_type'].lower()][0]
vars['widget_type'] = vars['widget_type'].capitalize()
# camelcase multiword names
if vars['field_type'].lower() == 'fixedpoint':
vars['field_type'] = 'FixedPoint'
if vars['field_type'].lower() == 'datetime':
vars['field_type'] = 'DateTime'
if vars['field_type'].lower() == 'date':
vars['field_type'] = 'DateTime'
if vars['widget_type'].lower() == 'inandout':
vars['widget_type'] = 'InAndOut'
if vars['widget_type'].lower() == 'multiselection':
vars['widget_type'] = 'MultiSelection'
if vars['widget_type'].lower() == 'picklist':
vars['widget_type'] = 'PickList'
if vars['widget_type'].lower() == 'referencebrowser':
vars['widget_type'] = 'ReferenceBrowser'
if vars['widget_type'].lower() == 'textarea':
vars['widget_type'] = 'TextArea'
# try to get the zope.schema type, but default to TextLine if no dice
try:
vars['zopeschema_type'] = self.typemap[vars['field_type'].lower()]
except:
vars['zopeschema_type'] = self.typemap['unknown']
# if the widget is the RichWidget, set the type to 'SourceText'
if vars['widget_type'].lower() == 'rich':
vars['zopeschema_type'] = 'SourceText'
# if not vars['i18n_domain']:
# vars['i18n_domain'] = vars['package_dotted_name']
vars['content_class_filename'] = file
```
#### File: zopeskel/localcommands/plone.py
```python
from zopeskel.base import var
from zopeskel.localcommands import ZopeSkelLocalTemplate
class PloneSubTemplate(ZopeSkelLocalTemplate):
use_cheetah = True
parent_templates = ['plone', 'archetype']
class Portlet(PloneSubTemplate):
"""
A plone 3 portlet skeleton
"""
_template_dir = 'templates/plone/portlet'
summary = "A Plone 3 portlet"
vars = [
var('portlet_name', 'Portlet name (human readable)', default="Example portlet"),
var('portlet_type_name', 'Portlet type name (should not contain spaces)', default="ExamplePortlet"),
var('description', 'Portlet description', default=""),
]
def pre(self, command, output_dir, vars):
"""
you can use package_namespace, package_namespace2, package
and package_dotted_name of the parent package here. you get them
for free in the vars argument
"""
vars['portlet_filename'] = vars['portlet_type_name'].lower()
vars['dotted_name'] = "%s.portlets" % vars['package_dotted_name']
class View(PloneSubTemplate):
"""
A browser view skeleton
"""
_template_dir = 'templates/plone/view'
summary = "A browser view skeleton"
vars = [
var('view_name', 'Browser view name', default="Example"),
]
def pre(self, command, output_dir, vars):
"""
you can use package_namespace, package_namespace2, package
and package_dotted_name of the parent package here. you get them
for free in the vars argument
"""
vars['view_filename'] = vars['view_name'].lower().replace(' ', '')
vars['view_classname'] = vars['view_name'].replace(' ', '')
class ZCMLMetaDirective(PloneSubTemplate):
"""
A zcml meta directive skeleton
"""
_template_dir = 'templates/plone/zcmlmeta'
summary = "A ZCML meta directive skeleton"
vars = [
var('directive_name', 'The directive name', default="mydirective"),
var('directive_namespace', 'The directive namespace', default="mynamespace"),
]
def pre(self, command, output_dir, vars):
"""
you can use package_namespace, package_namespace2, package
and package_dotted_name of the parent package here. you get them
for free in the vars argument
"""
vars['directive_class_name'] = vars['directive_name'].title()
class I18nLocale(PloneSubTemplate):
"""
A skeleton for an i18n language
"""
_template_dir = 'templates/plone/i18nlocales'
summary = "An i18n locale directory structure"
vars = [
var('language_code', 'The iso-code of the language'),
]
def pre(self, command, output_dir, vars):
"""
you can use package_namespace, package_namespace2, package
and package_dotted_name of the parent package here. you get them
for free in the vars argument
"""
# There is no default for language_code, because that makes no sense
# To accomodate testing, we introduce a default here.
language_iso_code = vars['language_code'].lower().strip()
vars['language_iso_code'] = language_iso_code and language_iso_code or 'nl'
class Form(PloneSubTemplate):
"""
A form skeleton
"""
_template_dir = 'templates/plone/form'
summary = "A form skeleton"
vars = [
var('form_name', 'Form class name', default="ExampleForm"),
var('form_label', "Form Title", default='Example Form'),
var('form_description', "Form Description", default=''),
var('form_actions', 'Comma separated list of form actions', default="Submit"),
var('form_invariants', 'Comma separated list of invariants', default=""),
]
def pre(self, command, output_dir, vars):
"""
you can use package_namespace, package_namespace2, package
and package_dotted_name of the parent package here. you get them
for free in the vars argument
"""
splitCSV = lambda in_str: [x.strip() for x in in_str.split(",")]
vars['form_filename'] = vars['form_name'].lower()
vars['form_actions'] = splitCSV(vars['form_actions'])
vars['form_invariants'] = splitCSV(vars['form_invariants'].strip())
class Z3cForm(PloneSubTemplate):
"""
A zc3 form skeleton
"""
_template_dir = 'templates/archetype/form'
summary = "A form skeleton"
vars = [
var('form_name', 'Form name', default="Example"),
]
def pre(self, command, output_dir, vars):
"""
you can use package_namespace, package_namespace2, package
and package_dotted_name of the parent package here. you get them
for free in the vars argument
"""
vars['form_filename'] = vars['form_name'].lower()
class FormField(PloneSubTemplate):
"""
A template to add a form field to a form. Essentially this
adds a field to Zope 3 schema.
"""
_template_dir = 'templates/plone/formfield'
summary = "Schema field for a form"
_supported_fields = [
("Bool", "Field containin a truth value."),
("Text", "Field containing unicode text."),
("TextLine", "Field containing a single line of unicode text."),
("Datetime", "Field containing a DateTime."),
("Date", "Field containing a date."),
("Choice", "Obect from a source or vocabulary."),
("Password", "Field containing a unicode string without newlines that is a password.")
]
_field_description = "\n".join(
[" "* 25 + x[0].lower() + " : " + x[1] for x in _supported_fields]
)
vars = [
var('form_filename', "Name of the file containing the form in browser.", default="exampleform"),
var('field_name', "Name of the field (this should be a unique identifier).", default='examplefield'),
var('field_type', "Type of field. Use one of the following \n\n"+_field_description + "\n", default='textline'),
var('field_title', '', default='A short summary or label'),
var('field_description', 'A description of the field (to be displayed as a hint)', default=''),
var('field_required', 'Tells whether a field requires its value to exist (True/False)', default=False),
var('field_readonly', "If true, the field's value cannot be changed (True/False)", default=False),
var('field_default', 'The field default value may be None or a legal field value', default='None'),
var('field_missing_value', 'If a field has no assigned value, set it to this value', default=''),
var('field_constraint', 'Specify the name of a function to use for validation', default=''),
]
def pre(self, command, output_dir, vars):
"""
you can use package_namespace, package_namespace2, package
and package_dotted_name of the parent package here. you get them
for free in the vars argument
"""
# XXX this should be handled by _map_boolean in base.py
# but this template does not inherit from BaseTemplate
for var in FormField.vars:
if var.name in vars and (type(vars[var.name])==str) and var.default in [True, False, None]:
lowered = vars[var.name].lower().strip()
if lowered in ['t', 'y', 'true']:
vars[var.name] = True
elif lowered in ['f', 'n', 'false']:
vars[var.name] = False
elif lowered == 'none':
vars[var.name] = None
# make the field type case insensitive, if the field type is not in the list of enumerated types
# simple use the provided one
vars['field_type'] = dict([(x[0].lower(), x) for x in self._supported_fields]).get(vars['field_type'].lower(), (vars['field_type'],))[0]
class BrowserLayer(PloneSubTemplate):
"""
A browserlayer skeleton
"""
_template_dir = 'templates/plone/browserlayer'
summary = "A Plone browserlayer"
vars = [
var('interface_name', 'Interface name for the browserlayer', default="IMyPackageBrowserLayer"),
var('layer_name', "Browser layer name", default='MyPackage'),
]
def check_vars(self, vars, cmd):
"""
Overloading check_vars to print welcome message and provide sensitive default values
"""
print "A BrowserLayer is generally used in packages to be installed in a Plone Site."
print "If you didn't choose Register Profile option when creating this package"
print "you should probably add a <genericsetup:registerProfile /> directive in"
print "the main configure.zcml.\n"
package_dotted_name = [vars['namespace_package']]
if 'namespace_package2' in vars:
package_dotted_name.append(vars['namespace_package2'])
package_dotted_name.append(vars['package'])
layer_name = ''.join([x.capitalize() for x in package_dotted_name])
self.vars[1].default = layer_name
self.vars[0].default = 'I%sLayer' % (layer_name)
return super(BrowserLayer, self).check_vars(vars, cmd)
def pre(self, command, output_dir, vars):
"""
you can use package_namespace, package_namespace2, package
and package_dotted_name of the parent package here. you get them
for free in the vars argument
"""
vars['interface_filename'] = vars['layer_name'].lower() + 'layer'
```
#### File: ZopeSkel/zopeskel/plone_pas.py
```python
import copy
from zopeskel import abstract_zope
from zopeskel.base import get_var
class PlonePas(abstract_zope.AbstractNestedZope):
_template_dir = 'templates/plone_pas'
summary = "A project for a Plone PAS plugin"
help = """
This create a project for developing a PAS ('pluggable authentication
system') plugin.
"""
category = "Plone Development"
required_templates = ['nested_namespace']
use_cheetah = True
use_local_commands = True
vars = copy.deepcopy(abstract_zope.AbstractNestedZope.vars)
get_var(vars, 'namespace_package2').default = 'pas'
def pre(self, command, output_dir, vars):
vars['multiplugin_name'] = vars['package'].title()
super(PlonePas, self).pre(command, output_dir, vars)
```
#### File: zopeskel/tests/test_zopeskel_script.py
```python
import unittest
import sys
import StringIO
from zopeskel.zopeskel_script import checkdots, process_args, run, DESCRIPTION
from zopeskel.ui import list_sorted_templates
def capture_stdout(function):
def _capture_stdout(*args, **kw):
newout = StringIO.StringIO()
oldout = sys.stdout
sys.stdout = newout
try:
function(*args, **kw)
finally:
sys.stdout = oldout
newout.seek(0)
return newout.read()
return _capture_stdout
run = capture_stdout(run)
class test_zopeskel(unittest.TestCase):
"""Tests for ZopeSkel script.
"""
def test_checkdots_none(self):
"""Verify that checkdots works with templates without ndots hint."""
class FauxTemplate: pass
t = FauxTemplate()
checkdots(t, "anything is legal; not a package")
def test_checkdots_two(self):
"""Verify that checkdots validates templates with ndots hint."""
class FauxTemplate: pass
t = FauxTemplate()
t.ndots = 2
self.assertRaises(ValueError, checkdots, t, "nodots")
self.assertRaises(ValueError, checkdots, t, "one.dot")
self.assertRaises(ValueError, checkdots, t, "three.dots.in.this")
self.assertRaises(ValueError, checkdots, t, "two.dots.but not legal")
checkdots(t, "two.dots.legal")
def test_process_args(self):
"""Ensure that process_args correctly processes command-line arguments"""
oldargv = sys.argv
sys.argv = ['zopskel']
self.assertRaises(SyntaxError, process_args)
sys.argv.append('archetype')
processed = process_args()
self.failUnlessEqual(processed[0], 'archetype')
self.failIf(processed[1])
self.failIf(processed[2])
sys.argv.append('my.project')
processed = process_args()
self.failUnlessEqual(processed[0], 'archetype')
self.failUnlessEqual(processed[1], 'my.project')
self.failIf(processed[2])
sys.argv.append('--bob=kate')
processed = process_args()
self.failUnlessEqual(processed[0], 'archetype')
self.failUnlessEqual(processed[1], 'my.project')
self.failUnlessEqual(processed[2]['--bob'], 'kate')
# process_args will allow us to skip the project name argument
sys.argv.pop(2)
processed = process_args()
self.failUnlessEqual(processed[0], 'archetype')
self.failIf(processed[1])
self.failUnlessEqual(processed[2]['--bob'], 'kate')
# providing arguments in '-name val' form is _not_ allowed
sys.argv = ['zopeskel', 'archetype', 'my.project', '-bob', 'kate']
self.assertRaises(SyntaxError, process_args)
# the --svn-repository argument is _not_ allowed in any form
sys.argv = sys.argv[:3] + ['--svn-repository=svn://svn.junk.org/svn/blah']
self.assertRaises(SyntaxError, process_args)
sys.argv[3] = 'svn-repository=svn://svn.junk.org/svn/blah'
self.assertRaises(SyntaxError, process_args)
# providing args in a '-name val' format is not supported
sys.argv = sys.argv[:3] + ['bob', 'kate']
self.assertRaises(SyntaxError, process_args)
sys.argv = oldargv
def test_script_errors(self):
"""Verify that the run method catches errors correctly"""
oldargv = sys.argv
# non-existent templates are not caught until in 'run'
sys.argv = ['zopeskel', 'no-template', 'my.package']
output = run()
self.failUnless('ERROR: No such template' in output)
# calling the script with no arguments at all prints usage
sys.argv = sys.argv[:1]
output = run()
self.failUnless('Usage:' in output)
sys.argv = oldargv
def test_script_features(self):
"""Verify that the help features of the script function correctly"""
oldargv = sys.argv
# --help produces the DESCRIPTION string
sys.argv = ['zopeskel', '--help']
output = run()
self.failUnless(DESCRIPTION in output, '--help produces incorrect output: %s' % output)
# --list produces a verbose list of all templates by category
sys.argv = ['zopeskel', '--list']
output = run()
cats = list_sorted_templates()
catnames = cats.keys()
templates = sum(cats.values(), [])
tempnames = [t['name'] for t in templates]
tempsums = [t['summary'] for t in templates]
for cat in catnames:
self.failUnless(cat in output, '%s not in --list output' % cat)
for tname in tempnames:
self.failUnless(tname in output, '%s not in --list output' % tname)
for summary in tempsums:
self.failUnless(summary in output, '%s not in --list output' % summary)
# --make-config-file produces a config file with headings for each template
sys.argv = ['zopeskel', '--make-config-file']
output = run()
for theading in ['[' + name + ']' for name in tempnames]:
self.failUnless(theading in output, '%s does not appear in .zopeskel' % theading)
# --version should output a version number. make sure it finds something
sys.argv = ['zopeskel', '--version']
output = run()
self.failIf('unable' in output)
sys.argv = oldargv
def test_suite():
suite = unittest.TestSuite([
unittest.makeSuite(test_zopeskel)])
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
```
#### File: ZopeSkel/zopeskel/zopeskel_script.py
```python
import sys
import pkg_resources
from cStringIO import StringIO
from textwrap import TextWrapper
from zopeskel.base import wrap_help_paras
from paste.script.command import get_commands
from zopeskel.ui import list_sorted_templates
USAGE = """
Usage:
zopeskel <template> <output-name> [var1=value] ... [varN=value]
zopeskel --help Full help
zopeskel --list List template verbosely, with details
zopeskel --make-config-file Output .zopeskel prefs file
zopeskel --version Print installed version
%s
Warning: use of the --svn-repository argument is not allowed with this script
For further help information, please invoke this script with the
option "--help".
"""
DESCRIPTION = """
This script allows you to create basic skeletons for plone and zope
products and buildouts based on best-practice templates.
It is a wrapper around PasteScript ("paster"), providing an easier
syntax for invoking and better help.
Invoking this script
--------------------
Basic usage::
zopeskel <template>
(To get a list of the templates, run the script without any arguments;
for a verbose list with full descriptions, run the ``zopeskel --list``)
For example::
zopeskel archetypes
To create an Archetypes-based product for Plone. This will prompt you
for the name of your product, and for other information about it.
If you to specify your output name (resulting product, egg, or buildout,
depending on the template being used), you can also do so::
zopeskel <template> <output-name>
For example::
zopeskel archetypes Products.Example
In addition, you can pass variables to this that would be requested
by that template, and these will then be used. This is an advanced
feature mostly useful for scripted use of this::
zopeskel archetypes Products.Example author_email=<EMAIL>
(You can specify as many of these as you want, in name=value pairs.
To get the list of variables that a template expects, you can ask for
this with ``paster create -t <template-name> --list-variables``).
Interactive Help
----------------
While being prompted on each question, you can enter with a single
question mark to receive interactive help for that question.
For example::
Description (One-line description of the project) ['']: ?
| This should be a single-line description of your project. It will
| be used in the egg's setup.py, and, for Zope/Plone projects, may be
| used in the GenericSetup profile description.
Providing defaults
------------------
It is also possible to set up default values to be used for any template by
creating a file called ``.zopeskel`` in your home directory. This file
should be in INI format.
For example, our ``$HOME/.zopeskel`` could contain::
[DEFAULT]
author_email = <EMAIL>
license_name = GPL
master_keywords = my common keywords here
[plone3_theme]
empty_styles = False
license_name = BSD
keywords = %(master_keywords)s additional keywords
You can generate a starter .zopeskel file by running this script with
the --make-config-file option. This output can be redirected into
your ``.zopeskel`` file::
bin/zopeskel --make-config-file > /path/to/home/.zopeskel
Notes:
1) "empty_styles" applies only to themes; we can make this setting
in the template-specific section of this file. This setting will
not be used for other templates.
2) For a common setting, like our email address, we can set this in
a section called DEFAULT; settings made in this section are used
for all templates.
3) We can make a setting in DEFAULT and then override it for a
particular template. In this example, we might generally prefer the GPL,
but issue our themes under the BSD license.
4) You can refer to variables from the same section or from the
DEFAULT section using Python string formatting. In this example,
we have a common set of keywords set in DEFAULT and extend it
for the theming template by referring to the master list.
Differences from the 'paster create' command
--------------------------------------------
1) The --svn-repository argument that can be provided to 'paster create' is not
allowed when using the zopeskel script. It will raise an error. The reasons
for this are discussed at length in the zopeskel mailing list and in the
zopeskel issue tracker:
http://plone.org/products/zopeskel/issues/34
http://plone.org/products/zopeskel/issues/35
If this argument is desired, the user should revert to calling 'paster create'
directly. However, be warned that buildout templates will not work with the
argument due to assumptions in the base paster code.
Questions
---------
If you have further questions about the usage of bin/zopeskel, please feel
free to post your questions to the zopeskel mailing list or jump onto the
plone IRC channel (#plone) at irc.freenode.net.
To see the templates supported, run this script without any options.
For a verbose listing with help, use ``zopeskel --list``.
"""
DOT_HELP = {
0: """
This template expects a project name with no dots in it (a simple
Python package name, like 'foo').
""",
1: """
This template expects a project name with 1 dot in it (a 'basic
namespace', like 'foo.bar').
""",
2: """
This template expects a project name with 2 dots in it (a 'nested
namespace', like 'foo.bar.baz').
"""
}
def checkdots(template, name):
"""Check if project name appears legal, given template requirements.
Templates can provide number of namespaces they expect (provided
in 'ndots' attributes for number-of-dots in name). This checks that
provided project name is has correct number of namespaces and that
each part is a legal Python identifier.
"""
ndots = getattr(template, 'ndots', None)
if ndots is None: return # No validation possible
cdots = name.count(".")
if ndots != cdots:
raise ValueError(
"Project name expected %i dots, supplied '%s' has %i dots" % (
ndots, name, cdots))
for part in name.split("."):
# Check if Python identifier, http://code.activestate.com/recipes/413487/
try:
class test(object): __slots__ = [part]
except TypeError:
raise ValueError(
"Not a valid Python dotted name: %s ('%s' is not an identifier)" % (name, part))
def usage():
templates = list_printable_templates()
print USAGE % templates
def show_help():
print DESCRIPTION
def show_version():
try:
dist = pkg_resources.get_distribution('zopeskel')
print dist.version
except pkg_resources.DistributionNotFound:
print 'unable to identify zopeskel version'
def list_verbose():
"""List templates verbosely, with full help."""
textwrapper = TextWrapper(
initial_indent=" ", subsequent_indent=" ")
cats = list_sorted_templates()
for title, items in cats.items():
print "\n"+ title
print "-" * len(title)
for temp in items:
print "\n%s: %s\n" % (temp['name'], temp['summary'])
if temp['help']:
wrap_help_paras(textwrapper, temp['help'])
print
def list_printable_templates():
"""
Printable list of all templates, sorted into two categories.
"""
s = StringIO()
cats = list_sorted_templates()
templates = sum(cats.values(), []) # flatten into single list
max_name = max([len(x['name']) for x in templates])
for title, items in cats.items():
print >>s, "\n%s\n" % title
for entry in items:
print >>s, "| %s:%s %s\n" % (
entry['name'],
' '*(max_name-len(entry['name'])),
entry['summary']),
s.seek(0)
return s.read()
def generate_dotzopeskel():
"""Make an example .zopeskel file for user."""
cats = list_sorted_templates()
print """
# This file can contain preferences for zopeskel.
# To do so, uncomment the lines that look like:
# variable_name = Default Value
[DEFAULT]
"""
for temp in sum(cats.values(), []):
print "\n[%(name)s]\n" % temp
tempc = temp['entry'].load()
for var in tempc.vars:
if hasattr(var, 'pretty_description'):
print "# %s" % var.pretty_description()
print "# %s = %s\n" % ( var.name, var.default )
def process_args():
""" return a tuple of template_name, output_name and everything else
everything else will be returned as a dictionary of key/value pairs
"""
args = sys.argv[1:]
try:
template_name = args.pop(0)
except IndexError:
raise SyntaxError('No template name provided')
output_name = None
others = {}
for arg in args:
eq_index = arg.find('=')
if eq_index == -1 and not output_name:
output_name = arg
elif eq_index > 0:
key, val = arg.split('=')
# the --svn-repository argument to paster does some things that cause
# it to be pretty much incompatible with zopeskel. See the following
# zopeskel issues:
# http://plone.org/products/zopeskel/issues/35
# http://plone.org/products/zopeskel/issues/34
# For this reason, we are going to disallow using the --svn-repository
# argument when using the zopeskel wrapper. Those who wish to use it
# can still do so by going back to paster, with the caveat that there
# are some templates (particularly the buildout ones) for which the
# argument will always throw errors (at least until the problems are
# fixed upstream in paster itself).
if 'svn-repository' in key:
msg = 'for a number of reasons, the --svn-repository argument '
msg += 'is not allowed with the zopeskel script. '
msg += "Try --help for more information"
raise SyntaxError(msg)
others[key] = val
else:
raise SyntaxError(arg)
return template_name, output_name, others
def run():
""" """
if "--help" in sys.argv:
show_help()
return
if "--make-config-file" in sys.argv:
generate_dotzopeskel()
return
if "--list" in sys.argv:
list_verbose()
return
if "--version" in sys.argv:
show_version()
return
if len(sys.argv) == 1:
usage()
return
try:
template_name, output_name, opts = process_args()
except SyntaxError, e:
usage()
print "ERROR: There was a problem with your arguments: %s\n" % e
return
rez = pkg_resources.iter_entry_points(
'paste.paster_create_template',
template_name)
rez = list(rez)
if not rez:
usage()
print "ERROR: No such template: %s\n" % template_name
return
template = rez[0].load()
print "\n%s: %s" % (template_name, template.summary)
help = getattr(template, 'help', None)
if help:
print template.help
create = get_commands()['create'].load()
command = create('create')
if output_name:
try:
checkdots(template, output_name)
except ValueError, e:
print "ERROR: %s\n" % e
return
else:
ndots = getattr(template, 'ndots', None)
help = DOT_HELP.get(ndots)
while True:
if help: print help
try:
output_name = command.challenge("Enter project name")
checkdots(template, output_name)
except ValueError, e:
print "\nERROR: %s" % e
else:
break
print """
If at any point, you need additional help for a question, you can enter
'?' and press RETURN.
"""
optslist = [ '%s=%s' % (k,v) for k, v in opts.items() ]
if output_name is not None:
optslist.insert(0, output_name)
command.run( [ '-q', '-t', template_name ] + optslist )
```
|
{
"source": "Jeapwu/Siamfc",
"score": 2
}
|
#### File: Siamfc/siamfc/tracker.py
```python
import numpy as np
import cv2
import torch
import torch.nn.functional as F
import time
import warnings
import torchvision.transforms as transforms
from torch.autograd import Variable
from .alexnet import SiameseAlexNet
from .config import config
from .custom_transforms import ToTensor
from .utils import get_exemplar_image, get_pyramid_instance_image, get_instance_image
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
import numpy as np
torch.set_num_threads(1) # otherwise pytorch will take all cpus
class SiamFCTracker:
def __init__(self, model_path, gpu_id):
self.gpu_id = gpu_id
with torch.cuda.device(gpu_id):
self.model = SiameseAlexNet(gpu_id, train=False)
self.model.load_state_dict(torch.load(model_path))
self.model = self.model.cuda()
self.model.eval()
self.transforms = transforms.Compose([
ToTensor()
])
def _cosine_window(self, size):
"""
get the cosine window
"""
cos_window = np.hanning(int(size[0]))[:, np.newaxis].dot(np.hanning(int(size[1]))[np.newaxis, :])
cos_window = cos_window.astype(np.float32)
cos_window /= np.sum(cos_window)
return cos_window
def init(self, frame, bbox):
""" initialize siamfc tracker
Args:
frame: an RGB image
bbox: one-based bounding box [x, y, width, height]
"""
self.bbox = (bbox[0]-1, bbox[1]-1, bbox[0]-1+bbox[2], bbox[1]-1+bbox[3]) # zero based
self.pos = np.array([bbox[0]-1+(bbox[2]-1)/2, bbox[1]-1+(bbox[3]-1)/2]) # center x, center y, zero based
self.target_sz = np.array([bbox[2], bbox[3]]) # width, height
# get exemplar img
self.img_mean = tuple(map(int, frame.mean(axis=(0, 1))))
exemplar_img, scale_z, s_z = get_exemplar_image(frame, self.bbox,
config.exemplar_size, config.context_amount, self.img_mean)
# get exemplar feature
exemplar_img = self.transforms(exemplar_img)[None,:,:,:]
with torch.cuda.device(self.gpu_id):
exemplar_img_var = Variable(exemplar_img.cuda())
self.model((exemplar_img_var, None, None, None))
self.penalty = np.ones((config.num_scale)) * config.scale_penalty
self.penalty[config.num_scale//2] = 1
# create cosine window
self.interp_response_sz = config.response_up_stride * config.response_sz
self.cosine_window = self._cosine_window((self.interp_response_sz, self.interp_response_sz))
# create scalse
self.scales = config.scale_step ** np.arange(np.ceil(config.num_scale/2)-config.num_scale,
np.floor(config.num_scale/2)+1)
# create s_x
self.s_x = s_z + (config.instance_size-config.exemplar_size) / scale_z
# arbitrary scale saturation
self.min_s_x = 0.2 * self.s_x
self.max_s_x = 5 * self.s_x
def update(self, frame, idx):
"""track object based on the previous frame
Args:
frame: an RGB image
Returns:
bbox: tuple of 1-based bounding box(xmin, ymin, xmax, ymax)
"""
size_x_scales = self.s_x * self.scales
pyramid = get_pyramid_instance_image(frame, self.pos, config.instance_size, size_x_scales, self.img_mean)
instance_imgs = torch.cat([self.transforms(x)[None,:,:,:] for x in pyramid], dim=0)
with torch.cuda.device(self.gpu_id):
instance_imgs_var = Variable(instance_imgs.cuda())
response_maps = self.model((None, None, instance_imgs_var, None))
response_maps = response_maps.data.cpu().numpy().squeeze()
response_maps_up = [cv2.resize(x, (self.interp_response_sz, self.interp_response_sz), cv2.INTER_CUBIC)
for x in response_maps]
# get max score
max_score = np.array([x.max() for x in response_maps_up]) * self.penalty
# penalty scale change
scale_idx = max_score.argmax()
response_map = response_maps_up[scale_idx]
response_map -= response_map.min()
response_map /= response_map.sum()
response_map = (1 - config.window_influence) * response_map + \
config.window_influence * self.cosine_window
###############################################################################
temp =response_map
x = np.arange(0, 272, 1) # len = 11
y = np.arange(0, 272, 1) # len = 7
fig, ax = plt.subplots()
ax.pcolormesh(x, y, temp)
path ='E:\\img\\'+str(idx)+'.jpg'
plt.savefig(path)
###############################################################################
max_r, max_c = np.unravel_index(response_map.argmax(), response_map.shape)
# displacement in interpolation response
disp_response_interp = np.array([max_c, max_r]) - (self.interp_response_sz-1) / 2.
# displacement in input
disp_response_input = disp_response_interp * config.total_stride / config.response_up_stride
# displacement in frame
scale = self.scales[scale_idx]
disp_response_frame = disp_response_input * (self.s_x * scale) / config.instance_size
# position in frame coordinates
self.pos += disp_response_frame
# scale damping and saturation
self.s_x *= ((1 - config.scale_lr) + config.scale_lr * scale)
self.s_x = max(self.min_s_x, min(self.max_s_x, self.s_x))
self.target_sz = ((1 - config.scale_lr) + config.scale_lr * scale) * self.target_sz
bbox = (self.pos[0] - self.target_sz[0]/2 + 1, # xmin convert to 1-based
self.pos[1] - self.target_sz[1]/2 + 1, # ymin
self.pos[0] + self.target_sz[0]/2 + 1, # xmax
self.pos[1] + self.target_sz[1]/2 + 1) # ymax
return bbox
```
|
{
"source": "jearistiz/cs50x-artificial-intelligence",
"score": 4
}
|
#### File: project_0/degrees/util.py
```python
class Node():
def __init__(self, state, parent, action):
self.state = state
self.parent = parent
self.action = action
def __eq__(self, o) -> bool:
if not isinstance(o, Node):
return False
elif self.state == o.state:
return True
else:
return False
class StackFrontier():
def __init__(self):
self.frontier = []
def add(self, node):
self.frontier.append(node)
def contains_state(self, state):
return any(node.state == state for node in self.frontier)
def empty(self):
return len(self.frontier) == 0
def remove(self):
if self.empty():
raise Exception("empty frontier")
else:
node = self.frontier[-1]
self.frontier = self.frontier[:-1]
return node
class QueueFrontier(StackFrontier):
def remove(self):
if self.empty():
raise Exception("empty frontier")
else:
node = self.frontier[0]
self.frontier = self.frontier[1:]
return node
```
|
{
"source": "jearistiz/guane-intern-fastapi",
"score": 2
}
|
#### File: api/routers/tasks.py
```python
from typing import Any, Awaitable, Dict
from fastapi import APIRouter, Depends, HTTPException, Request, status
from celery.result import AsyncResult
from app import schemas
from app.config import sttgs
from app.crud import superuser_crud
from app.worker.celery_app import celery_app
tasks_router = APIRouter()
@tasks_router.post(
'/celery_task',
response_model=schemas.CeleryTaskResponse,
status_code=status.HTTP_201_CREATED,
)
async def celery_task(
task_complexity: int,
request: Request,
current_superuser: schemas.SuperUser = Depends(
superuser_crud.get_current_active_user
)
) -> Any:
return await run_task_post_to_uri(
task_complexity=task_complexity,
get_task_result=False,
)
@tasks_router.post(
'/celery_task_not_async',
response_model=schemas.CeleryTaskResponse,
status_code=status.HTTP_201_CREATED,
)
async def celery_task_not_async(
task_complexity: int,
request: Request,
current_superuser: schemas.SuperUser = Depends(
superuser_crud.get_current_active_user
)
) -> Any:
"""Same functionality as last endpoint but this one returns the external
server (guane's) response completely at the expense of loosing the async
property of celery because of the call to ``task_result.get()``. Keep in
mind that a request to this endpoint will take at least as many seconds as
the ``task_complexity`` query parameter.
This one is just for fun, and to test that guane's server is getting the
request and giving us an appropriate response.
Do not use a query parameter greater than 9, since the endpoint calls
internally ``task_result.get(timeout=10)`` and it would result in a server
error.
"""
return await run_task_post_to_uri(
task_complexity=task_complexity,
get_task_result=True,
get_result_timeout=10.0
)
async def run_task_post_to_uri(
task_complexity: int = 0,
*,
get_task_result: bool,
get_result_timeout: float = 10.0,
) -> Awaitable[Dict[str, Any]]:
"""If ``get_task_result`` is set to ``True``, the async nature of the
celerymtask will be lost, since we make a call to ``task_result.get``.
``get_result_timeout`` only makes sense when ``get_task_result`` is set to
true. This is the maximum ammount of time the server will wait for the
task to complete.
"""
response: Dict[str, Any] = {
'task_complexity': task_complexity
}
query_uri = (
sttgs.get('GUANE_WORKER_URI') + f'?task_complexity={task_complexity}'
)
try:
task_result: AsyncResult = celery_app.send_task(
'app.worker.tasks.post_to_uri_task',
kwargs={'query_uri': query_uri}
)
# If next code block is executed, the async nature of the task will
# be lost since task_result.get waits until the task is complete.
if get_task_result:
ext_server_response = task_result.get(timeout=get_result_timeout)
if ext_server_response:
response['server_message'] = ext_server_response
except Exception:
response['success'] = False
response['status'] = 'Internal server error'
raise HTTPException(status_code=500, detail=response)
return response
```
#### File: app/crud/web_crud.py
```python
from typing import Dict, List
from fastapi import HTTPException
from pydantic import BaseModel
from sqlalchemy.orm import Session
from app.crud.base import CRUDBase
from app.models.base_class import Base
class WebCRUDWrapper:
"""Wrapper class to avoid duplicate code in API basic crud operations.
"""
def __init__(
self,
crud: CRUDBase,
*,
enty_name: str
) -> None:
self.crud = crud
self.enty_name: str = enty_name.lower()
self.enty_name_plural = self.enty_name + 's'
def get_all_entries(self, db: Session) -> Dict[str, List[Base]]:
"""Get all db entries of entity."""
all_enties = {
self.enty_name_plural: [
self.crud.model(**entity._asdict())
for entity in self.crud.get_multi(db)
]
}
if all_enties.get(self.enty_name_plural):
return all_enties
else:
raise HTTPException(
400,
detail=f'No {self.enty_name_plural} found'
)
def get_enty_by_name(self, db: Session, name: str) -> Base:
enty_by_name = self.crud.get_by_name(db, name_in=name)
if not enty_by_name:
raise HTTPException(
400,
detail=f'{self.enty_name.title()} with name \'{name}\' '
'not found.'
)
return enty_by_name
def post_enty_by_name(
self,
db: Session,
*,
name: str,
enty_info: BaseModel
) -> Base:
try:
created_enty = self.crud.create(db, obj_in=enty_info)
except Exception:
raise HTTPException(
500,
detail=f'Error while creating {self.enty_name} \'{name}\' in '
'database.'
)
if not created_enty:
raise HTTPException(
400,
detail=f'Create query of {self.enty_name} \'{name}\' finished '
'but was not saved.'
)
return created_enty
def put_enty_by_name(
self,
db: Session,
*,
name: str,
enty_new_info: BaseModel
):
try:
updated_enty = self.crud.update_by_name(
db, name_in_db=name, obj_in=enty_new_info
)
except Exception:
raise HTTPException(
500,
f'Error while updating {self.enty_name} \'{name}\' in '
f'database. Probably the {self.enty_name} does not exist in '
'database.'
)
if not updated_enty:
raise HTTPException(
400,
f'{self.enty_name.title()} \'{name}\' was not updated.'
)
return updated_enty
def delete_enty_by_name(
self,
db: Session,
*,
name: str
):
try:
deleted_enty = self.crud.remove_one_by_name(db, name=name)
except Exception:
raise HTTPException(
500,
f'Error while deleting {self.enty_name} \'{name}\' from '
f'database. Probably the {self.enty_name} does not exist in '
'database.'
)
if not deleted_enty:
raise HTTPException(
400,
f'{self.enty_name.title()} \'{name}\' was not deleted.'
)
return deleted_enty
```
#### File: app/utils/paths.py
```python
from pathlib import Path
def join_relative_path(path: Path, rel_path: str) -> Path:
for node in rel_path.split('/'):
path /= node
return path
```
#### File: api/routers/test_users.py
```python
from typing import Dict
from fastapi.testclient import TestClient
from app.config import sttgs
from mock_data.db_test_data import users_mock_dicts
from tests.utils.handle_db_test import HandleDBTest
class TestUsersRouter(HandleDBTest):
users_api_prefix = sttgs.get('API_PREFIX') + sttgs.get('USERS_API_PREFIX')
def users_name_route(self, name):
return self.users_api_prefix + '/' + name
def assert_users_data(self, *, reference: dict, compare: dict):
assert compare['name'] == reference['name']
assert compare['email'] == reference['email']
assert 'create_date' in compare
assert 'id' in compare
def test_get_users(self, app_client: TestClient):
response = app_client.get(self.users_api_prefix)
assert response.status_code == 200
content = response.json()
assert isinstance(content['users'], list)
users = content['users']
user_names = [ref_user['name'] for ref_user in users_mock_dicts]
for user in users:
assert user['name'] in user_names
def test_get_users_name(self, app_client: TestClient):
data = users_mock_dicts[0]
get_users_name_route = self.users_name_route(data.get('name'))
response = app_client.get(get_users_name_route, json=data)
assert response.status_code == 200
content = response.json()
self.assert_users_data(reference=data, compare=content)
def test_post_users_name(
self, app_client: TestClient, superuser_token_headers: Dict[str, str]
) -> None:
data = users_mock_dicts[0].copy()
data.update({'name': 'Juan'})
post_users_name_route = self.users_name_route(data.get('name'))
response = app_client.post(
post_users_name_route, json=data, headers=superuser_token_headers
)
assert response.status_code == 201
content = response.json()
self.assert_users_data(reference=data, compare=content)
def test_put_users_name(
self, app_client: TestClient, superuser_token_headers: Dict[str, str]
) -> None:
data = users_mock_dicts[0].copy()
old_name = data['name']
data.update({'name': 'Juan'})
post_users_name_route = self.users_name_route(old_name)
response = app_client.put(
post_users_name_route, json=data, headers=superuser_token_headers
)
assert response.status_code == 200
content = response.json()
self.assert_users_data(reference=data, compare=content)
def test_delete_users_name(
self, app_client: TestClient, superuser_token_headers: Dict[str, str]
) -> None:
data = users_mock_dicts[0]
get_users_name_route = self.users_name_route(data.get('name'))
response = app_client.delete(
get_users_name_route, json=data, headers=superuser_token_headers
)
assert response.status_code == 200
content = response.json()
self.assert_users_data(reference=data, compare=content)
```
#### File: app/crud/test_dog_crud.py
```python
from sqlalchemy.orm import Session
from app import crud
from mock_data.db_test_data import adopted_dogs_dicts
from tests.utils.handle_db_test import HandleDBTest
from tests.utils.parse_dict import update_dict_fmt_item
class TestDogCrud(HandleDBTest):
def test_get_adopter(self, db: Session):
adopted_dogs_out = crud.dog.get_adopted(db)
for adopted_dog_out in adopted_dogs_out:
adopted_dog_dict = adopted_dog_out._asdict()
adopted_dog_dict.pop('id')
update_dict_fmt_item(adopted_dog_dict, 'create_date', str)
assert adopted_dog_dict in adopted_dogs_dicts
```
#### File: app/utils/test_http_requests.py
```python
from app.config import sttgs
from app.utils.http_request import post_to_uri
def test_post_to_uri():
task_complexity = 0
task_query_url = (
sttgs.get('GUANE_WORKER_URI') + f'?task_complexity={task_complexity}'
)
response = post_to_uri(
task_query_url,
message={'task_complexity': task_complexity}
)
assert response.status_code == 201
assert response.json()['status']
```
#### File: app/worker/test_celery_tasks.py
```python
from app.worker.tasks import post_to_uri_task
def test_task_post_to_uri():
task_data = post_to_uri_task()
assert task_data['status_code'] == 201
assert task_data['data']
```
#### File: tests/utils/handle_db_test.py
```python
from tests.mock.db_session import ( # noqa
setup_test_db,
teardown_test_db,
)
class HandleDBTest:
"""This Class assures that all tests within a subclass are done in
the same database-circumstances
"""
def setup_method(self):
# populate_test_tables
setup_test_db()
def teardown_method(self):
teardown_test_db()
@classmethod
def teardown_class(cls):
setup_test_db()
```
#### File: tests/utils/uri.py
```python
from app.config import sttgs
def task_uri(specific_endpoint: str, task_complexity: int = 0) -> str:
return (
sttgs.get('API_PREFIX')
+ sttgs.get('CELERY_TASKS_PREFIX')
+ specific_endpoint
+ '?task_complexity='
+ str(task_complexity)
)
```
|
{
"source": "jearl4/python-charting",
"score": 3
}
|
#### File: jearl4/python-charting/LatexGraph.py
```python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
def func(x):
return 0.5 * np.exp(x) + 1
a, b = 0.5, 1.5 #integral limits
x = np.linspace(0, 2)
y = func(x)
fig, ax = plt.subplots(figsize=(7,5))
plt.plot(x, y, 'b', linewidth=2)
plt.ylim(ymin=0)
#illustrate the integral value, i.e. the area under the function
Ix = np.linspace(a, b)
Iy = func(Ix)
verts = [(a, 0)] + list(zip(Ix, Iy)) + [(b, 0)]
poly = Polygon(verts, facecolor = '0.7', edgecolor='0.5')
ax.add_patch(poly)
plt.text(0.5 * (a + b), 1, r"$\int_a^b f(x)\mathrm{d}x$",
horizontalalignment='center', fontsize=20)
plt.figtext(0.9, 0.075, '$x$')
plt.figtext(0.075, 0.9, '$f(x)$')
ax.set_xticks((a,b))
ax.set_xticklabels(('$a$', '$b$'))
ax.set_yticks([func(a), func(b)])
ax.set_yticklabels(('$f(a)$', '$f(b)$'))
plt.grid(True)
```
|
{
"source": "jearlcalkins/USPSperformance",
"score": 3
}
|
#### File: jearlcalkins/USPSperformance/usps_pdf2csv.py
```python
import re
import PyPDF2
import argparse
import sys
from datetime import datetime
Areas = ['Capital Metro', 'Eastern', 'Great Lakes', 'Pacific', 'Northeast', 'Southern', 'Western']
headings = ['Area', 'District', 'Week', 'First-Class Mail']
FCMdataset = {}
def update_FCMdataset(a_data_point):
[Area, District, Week,FirstClassMailSuccess] = a_data_point
index = Area + District
if index in FCMdataset:
FCMdataset[Area+District].append(a_data_point)
else:
FCMdataset[Area+District] = []
FCMdataset[Area+District].append(a_data_point)
def parse_a_page(result):
length = len(result)
for i in range(length):
if result[i] in Areas:
line_list = result[i:i+4:1]
stat = line_list[3].replace('%','')
stat = float(stat) / 100.0
line_list[3] = '%1.4f' % stat
line = ",".join(line_list)
#print(line)
update_FCMdataset(line_list)
parser = argparse.ArgumentParser()
parser.add_argument('-f', type=str, required=True, help="file pdf name")
args = parser.parse_args()
fn = args.f
pdfFileObj = open(fn, 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
page_ct = pdfReader.numPages
for page_number in range(page_ct):
pageObj = pdfReader.getPage(page_number)
result = pageObj.extractText().splitlines()
parse_a_page(result)
pdfFileObj.close()
header = False
for areas in FCMdataset:
area_list = FCMdataset[areas]
time_series = []
buckets = []
for point in area_list:
[Area, District, Week, FirstClassMail] = point
time_series.append(FirstClassMail)
buckets.append(Week)
if header == False:
buckets_line = ",".join(buckets)
begin_string = "Area, District, "
print(begin_string, buckets_line)
header = True
time_series_line = ",".join(time_series)
begin_string = Area + "," + District + ","
print(begin_string, time_series_line)
```
|
{
"source": "jease0502/baseball_video_to_txt",
"score": 3
}
|
#### File: jease0502/baseball_video_to_txt/cutimage.py
```python
import xml.dom.minidom as minidom
class Cutimage(object):
def __init__(self , xml_path, savePath):
self.xml_path = xml_path
self.savePath = savePath
def xml(self):
doc = minidom.parse(self.xml_path)
root = doc.documentElement
size = root.getElementsByTagName('object')
for i in range(size.length):
name = size[i].getElementsByTagName('name')[0].childNodes[0].nodeValue
xmin = size[i].getElementsByTagName('xmin')[0].childNodes[0].nodeValue
ymin = size[i].getElementsByTagName('ymin')[0].childNodes[0].nodeValue
xmax = size[i].getElementsByTagName('xmax')[0].childNodes[0].nodeValue
ymax = size[i].getElementsByTagName('ymax')[0].childNodes[0].nodeValue
if name in self.task_dict.keys():
cutImage = self.img[int(ymin) : int(ymax), int(xmin) : int(xmax)]
count = count + 1 if last_name == name else count
cv2.imwrite(self.task_dict[name] + xmlName + str(count) + '.jpg', cutImage)
# print(np.shape(cutImage))
data = self.Predict2Txt.predict_txt(name,cutImage)
print(data)
f.write(name + " : " + str(data) + "\n")
return size
```
#### File: model_process/boardnumber/tb_net.py
```python
import tensorflow as tf
import tensorflow.keras as K
from tensorflow.keras.applications import resnet
import efficientnet.tfkeras as efn
def CNN():
model = efn.EfficientNetB0(input_shape=[64, 192, 3], include_top=False)
inputs = model.input
x = model.output
x = K.layers.GlobalAveragePooling2D()(x)
x = K.layers.Dense(512, activation='relu')(x)
x = K.layers.Dropout(0.5)(x)
x = K.layers.Dense(256, activation='relu')(x)
x = K.layers.Dropout(0.5)(x)
x = K.layers.Dense(1, activation='sigmoid')(x)
return K.Model(inputs, x)
```
|
{
"source": "jeasinema/dm_control",
"score": 2
}
|
#### File: mujoco/testing/decorators.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import sys
import threading
# Internal dependencies.
import six
from six.moves import range
def run_threaded(num_threads=4, calls_per_thread=10):
"""A decorator that executes the same test repeatedly in multiple threads.
Note: `setUp` and `tearDown` methods will only be called once from the main
thread, so all thread-local setup must be done within the test method.
Args:
num_threads: Number of concurrent threads to spawn. If None then the wrapped
method will be executed in the main thread instead.
calls_per_thread: Number of times each thread should call the test method.
Returns:
Decorated test method.
"""
def decorator(test_method):
"""Decorator around the test method."""
@functools.wraps(test_method) # Needed for `named_parameters` to work.
def decorated_method(self, *args, **kwargs):
"""Actual method this factory will return."""
exceptions = []
def worker():
try:
for _ in range(calls_per_thread):
test_method(self, *args, **kwargs)
except: # pylint: disable=bare-except
# Appending to Python list is thread-safe:
# http://effbot.org/pyfaq/what-kinds-of-global-value-mutation-are-thread-safe.htm
exceptions.append(sys.exc_info())
if num_threads is not None:
threads = [threading.Thread(target=worker, name='thread_{}'.format(i))
for i in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
worker()
for exc_class, old_exc, tb in exceptions:
six.reraise(exc_class, old_exc, tb)
return decorated_method
return decorator
```
#### File: dm_control/suite/lqr_solver.py
```python
r"""Optimal policy for LQR levels.
LQR control problem is described in
https://en.wikipedia.org/wiki/Linear-quadratic_regulator#Infinite-horizon.2C_discrete-time_LQR
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Internal dependencies.
from absl import logging
from dm_control.mujoco import wrapper
import numpy as np
from six.moves import range
try:
import scipy.linalg as sp # pylint: disable=g-import-not-at-top
except ImportError:
sp = None
def _solve_dare(a, b, q, r):
"""Solves the Discrete-time Algebraic Riccati Equation (DARE) by iteration.
Algebraic Riccati Equation:
```none
P_{t-1} = Q + A' * P_{t} * A -
A' * P_{t} * B * (R + B' * P_{t} * B)^{-1} * B' * P_{t} * A
```
Args:
a: A 2 dimensional numpy array, transition matrix A.
b: A 2 dimensional numpy array, control matrix B.
q: A 2 dimensional numpy array, symmetric positive definite cost matrix.
r: A 2 dimensional numpy array, symmetric positive definite cost matrix
Returns:
A numpy array, a real symmetric matrix P which is the solution to DARE.
Raises:
RuntimeError: If the computed P matrix is not symmetric and
positive-definite.
"""
p = np.eye(len(a))
for _ in range(1000000):
a_p = a.T.dot(p) # A' * P_t
a_p_b = np.dot(a_p, b) # A' * P_t * B
# Algebraic Riccati Equation.
p_next = q + np.dot(a_p, a) - a_p_b.dot(
np.linalg.solve(b.T.dot(p.dot(b)) + r, a_p_b.T))
p_next += p_next.T
p_next *= .5
if np.abs(p - p_next).max() < 1e-12:
break
p = p_next
else:
logging.warn('DARE solver did not converge')
try:
# Check that the result is symmetric and positive-definite.
np.linalg.cholesky(p_next)
except np.linalg.LinAlgError:
raise RuntimeError('ARE solver failed: P matrix is not symmetric and '
'positive-definite.')
return p_next
def solve(env):
"""Returns the optimal value and policy for LQR problem.
Args:
env: An instance of `control.EnvironmentV2` with LQR level.
Returns:
p: A numpy array, the Hessian of the optimal total cost-to-go (value
function at state x) is V(x) = .5 * x' * p * x.
k: A numpy array which gives the optimal linear policy u = k * x.
beta: The maximum eigenvalue of (a + b * k). Under optimal policy, at
timestep n the state tends to 0 like beta^n.
Raises:
RuntimeError: If the controlled system is unstable.
"""
n = env.physics.model.nq # number of DoFs
m = env.physics.model.nu # number of controls
# Compute the mass matrix.
mass = np.zeros((n, n))
wrapper.mjbindings.mjlib.mj_fullM(env.physics.model.ptr, mass,
env.physics.data.qM)
# Compute input matrices a, b, q and r to the DARE solvers.
# State transition matrix a.
stiffness = np.diag(env.physics.model.jnt_stiffness.ravel())
damping = np.diag(env.physics.model.dof_damping.ravel())
dt = env.physics.model.opt.timestep
j = np.linalg.solve(-mass, np.hstack((stiffness, damping)))
a = np.eye(2 * n) + dt * np.vstack(
(dt * j + np.hstack((np.zeros((n, n)), np.eye(n))), j))
# Control transition matrix b.
b = env.physics.data.actuator_moment.T
bc = np.linalg.solve(mass, b)
b = dt * np.vstack((dt * bc, bc))
# State cost Hessian q.
q = np.diag(np.hstack([np.ones(n), np.zeros(n)]))
# Control cost Hessian r.
r = env.task.control_cost_coef * np.eye(m)
if sp:
# Use scipy's faster DARE solver if available.
solve_dare = sp.solve_discrete_are
else:
# Otherwise fall back on a slower internal implementation.
solve_dare = _solve_dare
# Solve the discrete algebraic Riccati equation.
p = solve_dare(a, b, q, r)
k = -np.linalg.solve(b.T.dot(p.dot(b)) + r, b.T.dot(p.dot(a)))
# Under optimal policy, state tends to 0 like beta^n_timesteps
beta = np.abs(np.linalg.eigvals(a + b.dot(k))).max()
if beta >= 1.0:
raise RuntimeError('Controlled system is unstable.')
return p, k, beta
```
|
{
"source": "JeasonLaung/thinkphp",
"score": 3
}
|
#### File: version0.1/sc/1.py
```python
import pymysql
import sys,os
class sql():
host = 'localhost'
port= 3306
user = ''
passwd = ''
db = ''
charset = 'utf8'
cursor = None
_table = ''
_where = ''
_limit = ''
_field = ''
lastSql = ''
data = {}
# 魔术方法
def __init__(self,**arg):
# 去除第一个值判断类型,如果是dict则按照dict赋值数据库
if type(arg['db']) == type({}):
for key in arg['db']:
setattr(self,key,arg['db'][key])
self.cursor = pymysql.Connect(
host=self.host,
port=self.port,
user=self.user,
passwd=<PASSWORD>,
db=self.db,
charset='utf8'
).cursor()
def __setattr__(self,key,value):
if hasattr(self,key) is False :
if value:
value = value.replace('"','\\"')
value = value.replace("'","\\'")
self.data[key] = '\"%s\"' % value
else:
# setattr(self,key,value) #两种都可以
self.__dict__[key] = value
def table(self,table):
self._table = table
return self
def where(self,where):
if where:
self._where = ' WHERE ' + where
return self
def limit(self,limit):
if limit:
self._limit = ' LIMIT ' + limit
return self
def field(self,field):
if field:
self._field = field
return self
def add(self):
add_str = 'INSERT INTO ' + self._table+ '(' + ','.join(self.data.keys()) + ') VALUES(' + ','.join(self.data.values()) + ')'
self.query(add_str)
def set(self):
temp = ''
for x in self.data.keys():
temp += '%s = %s,' % (x,self.data[x])
temp = temp[:-1]
set_str = 'UPDATE ' + self._table+ ' SET ' + temp
self.query(set_str)
def get(self,pk=False):
if self._field:
field = self._field
else:
field = '*'
if type(pk) == type(1):
self.where('id = %s'% pk)
get_str = 'SELECT '+field+' FROM ' + self._table
self.query(get_str)
return self.cursor.fetchall()
def convert(self,data):
f = self.do('SHOW COLUMNS FROM `%s`' % self._table)
# print(data)
# field_name_arr = []
temp = []
for y in range(len(data)):
# 遍历data y:第几个
one = data[y]
# print(temp[y])
# if type(temp[y]) != type([]):
# 创建数组
temp2 = {}
for x in range(len(f)-1):
# key
key = f[x][0]
# print(str(key))
temp2[key] = one[x]
# 每个data
temp.append(temp2)
# for i in data:
return temp
def do(self,sql):
self.cursor.execute(sql)
self.cursor.connection.commit()
return self.cursor.fetchall()
def query(self,query_str):
query_str = query_str + self._where + self._limit
self.lastSql = query_str
# print(query_str)
self.cursor.execute(query_str)
self.cursor.connection.commit()
self.data = {}
self._where = ''
self._field = ''
return self
def getLastSql(self):
return self.lastSql
def select(self):
return self.cursor.fetchall()
db = {
"host":'localhost',
"port":3306,
"user":'root',
"passwd":'<PASSWORD>',
"db":'fuck_life',
"charset":'utf8'
}
m = sql(db=db)
# m.table('school')
# print(m.convert(m.get(100)))
# sys.exit()
# m.field(' id as uid ')
# print(m.get(100))
# sys.exit()
import json
# m = sql(db=db)
# m.table('student')
# m.base_birth_month = '我是'
# m.add()
import requests
print(fuck_life)
sys.exit()
cookies = {
'COMPANY':'Wc9I4mG8Vsh8ACpzqrAziGqqR1rfCSF5MtDXMyNaWZHtCkSU3MDspVA0oNA6xkg7ySRi7P2GK%252FXMvLRDf2ppMw%253D%253D;'
}
# # print(cookies)
# # print(res.text)
m=sql(db=db)
with open('51job.json',encoding='utf-8') as f:
data = f.read()
i = 0
m.table('school_year')
import time
while i < 145:
i = i+1
m.where('id = %s' % i)
data = m.get()
if data is None or len(data)<1:
print(1)
continue
else:
year = data[0][1]
school_id = data[0][2]
m.table('major')
# # res = requests.get('http://www.51renc.com/api/v3/school/get_school_info?school_id='+str(i),cookies=cookies)
# m.table('school_year')
# res = requests.get('http://www.51renc.com/api/v3/school/get_all_year?school_id='+str(i),cookies=cookies)
res = requests.get('http://www.51renc.com/api/v3/school/get_all_major?school_id=%s&year=%s' % (school_id,year))
# print('http://www.51renc.com/api/v3/school/get_all_major?school_id=%s&year=%s' % (school_id,year))
# continue
# # print(res.status_code)
# # break
# print(res)
if res.status_code == 200:
res = res.json()
else:
continue
for x in res['data']:
m.school_id = str(school_id)
for y in x:
m.__setattr__(y,str(x[y]))
m.add()
print(m.getLastSql())
m.table('school_year')
# m.table('major')
# m.name = str()
# m.year = str(x)
# m.add()
# print(m.getLastSql())
# if res['status'] == 1:
# continue
# m.__setattr__('id',str(i))
# for x in res['data']:
# y = res['data'][x]
# m.__setattr__(x,y)
# m.add()
# print(m.getLastSql())
# pass
# print(data)
# m.table('student')
# i=0
# # print([] == [])
# # print(type({'a':1})==type({}))
# while 1:
# i = i+1
# res = requests.get('http://m.v2.51renc.com/api/v2/company/resume_search?page='+str(i),cookies=cookies).json()
# if res['data'] == []:
# break
# for x in res['data']:
# for y in x:
# if type(x[y])==type({}) or type(x[y])==type([]):
# x[y] = json.dumps(x[y])
# m.__setattr__(y,str(x[y]))
# m.add()
# print('success INSERT'+str(i))
# filename='51company.json'
# with open(filename,encoding='utf-8') as f:
# data = json.loads(f.read())['data']['data']
# m = sql(db=db)
# m.table('user')
# # m.where('degree = \'3.5\'')
# change_json = {
# # 'vip_modified_time':'vip_modified_time',
# 'created':'created_time',
# # 'modified':'modified_time'
# }
# for x in data:
# m.where('username = ' + '\''+str(x['username'])+'\'')
# for y in x:
# if y in change_json:
# m.__setattr__(change_json[y],x[y])
# m.set()
# print(str(x['id'])+'成功')
# province_name_arr = []
# for one in x:
# m.table('province')
# province_name = one['mergername'].split(',')[1]
# m.field('id')
# m.where(' name = \"'+province_name+'\"')
# province_id = m.get()[0][0]
# m.table('city')
# m.province_id = province_id
# for key in one:
# m.__setattr__(key,one[key])
# m.add()
# province_name_arr.append('\''+province_name+'\' OR ')
# sql = 'name = '+'name = '.join(province_name_arr)[:-3]
# m.where(sql)
# m.field('id')
# m.get()
# m.__setattr__(key,x[key])
# m.add()
# print(x['name']+'添加成功')
# import json
# db = {
# "host":'localhost',
# "port":3306,
# "user":'root',
# "passwd":'<PASSWORD>',
# "db":'51ren',
# "charset":'utf8'
# }
# filename = '51job.json'
# with open(filename,encoding='utf-8') as f:
# data = json.loads(f.read())['data']
# print(data[0])
# mapField = {
# 'is_vip':'is_vip',
# 'vip_modified_time':'vip_modified_time',
# 'is_available':'is_available',
# 'id':'id',
# 'name':'name',
# 'created':'created_time',
# 'modified':'modified_time',
# 'brief':'description',
# 'province':'province',
# 'city':'city',
# 'logo':'logo',
# 'license':'license_pic',
# 'credit_code':'credit_code',
# 'has_fix':'has_fixed',
# 'expired_time':'expired_time',
# 'brief_name':'brief_name'
# }
# print(eval(data))
# mapField = {
# 'id':'id',
# 'company_id':'company_id',
# 'title':'title',
# 'location':'location',
# 'degree':'degree',
# 'experience':'experience',
# 'phone':'phone',
# 'description':'description',
# 'status':'status',
# 'created':'created_time',
# 'modified':'modified_time',
# 'welfare':'welfare',
# 'province':'province',
# 'city':'city',
# 'max_salary':'max_salary',
# 'min_salary':'min_salary',
# 'has_fix':'has_fixed'
# }
# import hashlib,time
# def gettime(s):
# timeArray = time.strptime(s, "%Y-%m-%d %H:%M:%S")
# return int(time.mktime(timeArray))
# s = hashlib.sha256()
# s.update(('123456'+'shengsheng').encode('utf-8'))
# mima = str(s.hexdigest())
# m = sql(db=db)
# m.table('job')
# n = 0
# for x in data:
# for y in x:
# if y in mapField:
# m.__setattr__(mapField[y],x[y])
# m.created_time = gettime(x['created'])
# m.modified_time = gettime(x['modified'])
# m.add()
# print('成功添加'+ str(x['id']))
# for x in data:
# # for y in x:
# # if y in mapField:
# # m.__setattr__(mapField[y],x[y])
# n = n+1
# m.table('user')
# m.username = x['username']
# m.password = '%s' % <PASSWORD>
# timeArray = time.strptime(x['created'], "%Y-%m-%d %H:%M:%S")
# m.created_time = int(time.mktime(timeArray))
# m.add()
# m.table('company')
# for y in x:
# if y in mapField:
# m.__setattr__(mapField[y],x[y])
# m.created_time = gettime(x['created'])
# m.vip_modified_time = gettime(x['vip_modified_time'])
# m.modified_time = gettime(x['modified'])
# m.user_id = n
# m.add()
# print('成功添加'+ str(n))
```
#### File: sc/py/sql.py
```python
import hashlib
# print(hashlib.hash_md5('123'))
m = hashlib.sha256()
m.update(('123456jeason').encode('utf-8'))
print(m.hexdigest())
# #coding=utf-8
# import pymysql
# class sql():
# host = 'localhost'
# port = 3306
# user = 'root'
# password = '<PASSWORD>'
# db = 'jobweb'
# charset = 'utf8'
# cursor = None
# trim = False
# fieldArray = {}
# where = ''
# def __init__(self):
# self.cursor = pymysql.Connect(
# host=self.host,
# port=self.port,
# user=self.user,
# password=<PASSWORD>,
# db=self.db,
# charset=self.charset
# ).cursor()
# pass
# def table(self,table):
# self.table = table
# return self
# def handleSql(self):
# pass
# def trim():
# self.trim = True
# return False
# pass
# def add(self):
# s = 'insert into name(name) values (\'%s\')' % '你好'
# return self.__exec(s)
# pass
# def get(self,limit='',choose='',sort=False):
# if limit:
# limit_str = 'LIMIT '+str(limit)
# else:
# limit_str = ''
# if sort == 1:
# sort_str = 'ASC'
# elif sort == -1:
# sort_str = 'DESC'
# else:
# sort_str = ''
# where = self.where(self.fieldArray)
# if self.trim and type(choose) == type([]):
# s = 'AND '
# for x in choose:
# s += x + '!=\'\' AND '
# trim_str =s[0:-5]
# if bool(where) is False:
# trim_str = 'WHERE '+trim_str[3:]
# else:
# trim_str = ''
# if type(choose) is type([]):
# choose_str = ','.join(choose)
# s = 'SELECT %s FROM %s %s %s ORDER BY %s %s %s' % (choose_str,self.table,where,trim_str,choose[0],sort_str,limit_str)
# elif choose is '':
# s = 'SELECT * FROM %s %s %s %s ' % (self.table,where,trim_str,limit_str)
# else:
# s = 'SELECT * FROM %s %s %s ORDER BY %s %s %s' % (self.table,where,trim_str,choose,sort_str,limit_str)
# print(s)
# self.__exec(s)
# return self.cursor.fetchall()
# def where(self,json):
# temp = ''
# if json:
# temp = 'WHERE '
# for x in json:
# temp += str(x)+'=\''+str(json[x])+'\' AND '
# temp = temp[0:-4]
# return temp
# pass
# def count(self):
# where = self.where(self.fieldArray)
# s = 'SELECT COUNT(*) FROM %s %s' % (self.table,where)
# print(s)
# self.__exec(s)
# return self.cursor.fetchall()[0][0]
# def __exec(self,query):
# self.__reset()
# return self.cursor.execute(query)
# pass
# def __reset(self):
# self.fieldArray = {}
# pass
# def __setattr__(self,key,value):
# if hasattr(self,key) is False:
# self.fieldArray[key] = value
# else:
# self.__dict__[key] = value
# a = sql()
# # print(a.get(5,'id',1))
# a.table('student')
# # a.b= 1
# # a.base_gender = 'female'
# # print(a.count())
# a.base_gender = 'female'
# num = a.count()
# host = 'https://m.v2.51renc.com'
# url_data_arr = a.get('',['base_logo'])
# print(url_data_arr)
# import requests,time
# # for x in data:
# active = 0
# while(True):
# url = host+url_data_arr[active][0]
# data = requests.get(url).content
# # print(data)
# try:
# with open(url.split('/')[-1],'wb') as f:
# f.write(data)
# # # data = requests.get(url).content
# print('成功爬取:'+url.split('/')[-1])
# except ValueError as e:
# print('爬取失败:'+url.split('/')[-1] + '\n错误原因:'+e)
# if active>=num:
# break
# active = active+1
# time.sleep(0.5)
# # print(a.fieldArray)
# # def fnc(*arg):
# # print(arg);
# # fnc(1)
```
|
{
"source": "jeasonlau/todo",
"score": 2
}
|
#### File: source/todo/todo.py
```python
import os, sys, sqlite3, functools, configparser, textwrap
import os.path as op
from datetime import datetime, timezone
from . import cli_parser, utils, data_access, core
from .rainbow import ColoredStr, cstr
from .data_access import DataAccess
from .utils import (
DATA_DIR, DB_PATH, VERSION_PATH, DATAFILE_PATH, NOW,
CannotOpenEditorError
)
__version__ = '3.2.1'
# Icons used to print tasks' properties in the terminal.
# True is the ASCII version for challenged terminals.
# False is the Unicode version.
CONTEXT_ICON = {True: '#', False: '#'}
TIME_ICON = {True: '~', False: '⌛'}
PRIORITY_ICON = {True: '!', False: '★'}
WIDE_HIST_THRESHOLD = 120
TASK_MUTATORS = {
'deadline': datetime.max,
'start': None,
'priority': 1,
'title': None,
'created': data_access.DATETIME_MIN
}
CONTEXT_MUTATORS = {
'priority': 1,
'visibility': 'normal'
}
CONFIG_FILE = op.expanduser(op.join('~', '.toduhrc'))
if os.name == 'posix':
COLORS = 'on'
else:
COLORS = 'off'
DEFAULT_CONFIG = configparser.ConfigParser()
DEFAULT_CONFIG['App'] = {
'layout': 'basic',
'todo_fashion': 'tidy',
'show_empty_contexts': True
}
DEFAULT_CONFIG['Colors'] = {
'colors': COLORS,
'palette': '8',
'id': 'yellow',
'content': 'default',
'context': 'cyan',
'overtime': 'red',
'deadline': 'cyan',
'priority': 'green',
'done': 'green'
}
DEFAULT_CONFIG['Word-wrapping'] = {
'title': True,
'content': True,
'smart': False,
'width': -1
}
CONFIG = configparser.ConfigParser(
allow_no_value=True,
strict=True
)
# Loading the config with the default config
CONFIG.read_dict(DEFAULT_CONFIG)
# Loading the user config. Will complete/overwrite the default config
# but will keep default config entries that the user might have removed
CONFIG.read(CONFIG_FILE)
# Editor election: in config file? No -> in OS EDITOR variable? No -> vim
EDITOR = CONFIG.get('App', 'editor', fallback=None)
if EDITOR is None:
EDITOR = os.environ.get('EDITOR', 'vim')
if CONFIG.getboolean('Colors', 'colors'):
cstr = functools.partial(
cstr,
palette=CONFIG.get('Colors', 'palette')
)
else:
cstr = functools.partial(
cstr,
no_color=True
)
DONE_STR = '[DONE]'
def main():
argv = sys.argv[1:]
if len(argv) == 1 and argv[0] == 'doduh':
print('Beethoven - Symphony No. 5')
sys.exit(0)
args = cli_parser.parse_cli()
if args['version']:
print(__version__)
elif args['location']:
print(DATA_DIR)
else:
report = cli_parser.parse_args(args)
if len(report) > 0:
for error in report:
print(error)
sys.exit(1)
current_version = get_installed_version()
if not op.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if current_version != __version__:
with open(VERSION_PATH, 'w') as version_file:
version_file.write(__version__)
daccess = get_data_access(current_version)
result = dispatch(args, daccess)
if result is not None:
feedback_code, *data = result
globals()['feedback_'+feedback_code](*data)
daccess.exit()
def get_installed_version():
if op.exists(VERSION_PATH):
with open(VERSION_PATH) as version_file:
return version_file.read()
else:
if op.exists(DB_PATH):
return '3.0.1'
elif op.exists(DATAFILE_PATH):
return '2.1'
else:
return None
def get_data_access(current_version):
data_access.setup_data_access(current_version)
connection = sqlite3.connect(DB_PATH)
return DataAccess(connection)
# HANDLERS
# Do something with the args dictionary and the data access object then return
# a feedback code as well as some data about how things went. Feedback
# functions (whose names are feedback_<feedback_code>) will then be passed the
# data and should print some feedback based on the data. A handler can also
# return nothing (or None) if no feedback is intended.
def add_task(args, daccess):
context = args.get('context')
options = get_options(args, TASK_MUTATORS, {'deadline': {'None': None}})
if context is None:
context = ''
if args['edit']:
title, content = core.editor_edit_task(args['title'], None, EDITOR)
else:
title, content = args['title'], None
id_ = daccess.add_task(title, content, context, options)
return 'add_task', id_
def manage_task(args, daccess):
tid = args['id'][0]
context = args.get('context')
options = get_options(args, TASK_MUTATORS, {'deadline': {'None': None}})
if len(options) == 0 and context is None:
return show_task(tid, daccess)
else:
upt_count = daccess.update_task(tid, context, options)
if upt_count == 0:
return 'task_not_found', tid
def show_task(tid, daccess):
task = daccess.get_task(tid)
if task is None:
return 'task_not_found', tid
# w3 = word-wrap width
w3 = CONFIG.getboolean('Word-wrapping', 'content')
if w3:
w3 = CONFIG.getint('Word-wrapping', 'width')
if w3 == -1:
w3 = utils.get_terminal_width()
else:
w3 = None
full_content = core.get_task_full_content(
task['title'],
task['content'],
wrap_width=w3,
smart_wrap=CONFIG.getboolean('Word-wrapping', 'smart')
)
return 'show_task', task, full_content
def edit_task(args, daccess):
tid = args['id'][0]
task = daccess.get_task(tid)
if task is None:
return 'task_not_found', tid
can_edit = daccess.take_editing_lock(tid)
if not can_edit:
return 'cannot_edit', tid
try:
try:
new_title, new_content = core.editor_edit_task(
task['title'],
task['content'],
EDITOR
)
except CannotOpenEditorError as err:
return 'cannot_open_editor', err.editor
daccess.update_task(tid, options=[
('title', new_title),
('content', new_content)
])
finally:
daccess.release_editing_lock(tid)
def do_task(args, daccess):
not_found = daccess.set_done_many(args['id'])
return 'multiple_tasks_done', not_found
def remove_task(args, daccess):
not_found = daccess.remove_many(args['id'])
return 'multiple_tasks_update', not_found
def manage_context(args, daccess):
path = args['context']
name = args.get('name')
options = get_options(args, CONTEXT_MUTATORS)
exists = True
if len(options) == 0 and name is None:
return todo(args, daccess)
else:
if name is not None:
renamed = data_access.rename_context(args['context'], name)
rcount = daccess.rename_context(args['context'], name)
if rcount is None:
return 'target_name_exists', renamed
else:
if rcount > 0:
path = renamed
else:
exists = False
if len(options) > 0:
daccess.set_context(path, options)
elif not exists:
return 'not_exists', path
def move(args, daccess):
ctx1 = args['ctx1']
ctx2 = args['ctx2']
source_exists = daccess.context_exists(ctx1)
if not source_exists:
return 'not_exists', ctx1
else:
daccess.move_all(ctx1, ctx2)
def remove_context(args, daccess):
ctx = args['context']
if not daccess.context_exists(ctx):
return 'not_exists', ctx
force = args['force']
go_ahead = False
if not force:
nb_tasks, nb_subctx = daccess.get_basic_context_tally(ctx)
ans = input('This context contains {} direct undone task(s) and '
'{} subcontext(s). Continue? y/* '.format(nb_tasks, nb_subctx))
go_ahead = ans == 'y'
else:
go_ahead = True
if go_ahead:
daccess.remove_context(ctx)
def todo(args, daccess):
fashion = 'flat' if args['flat'] else None
if fashion is None:
fashion = 'tidy' if args['tidy'] else None
if fashion is None:
fashion = CONFIG.get('App', 'todo_fashion')
ctx = args.get('context', '')
if ctx is None:
ctx = ''
tasks = daccess.todo(ctx, recursive=(fashion == 'flat'))
if fashion == 'tidy':
get_empty = CONFIG.getboolean('App', 'show_empty_contexts')
subcontexts = daccess.get_subcontexts(ctx, get_empty)
else:
subcontexts = []
return 'todo', ctx, tasks, subcontexts
def get_contexts(args, daccess):
path = args['context']
if path is None:
path = ''
contexts = daccess.get_descendants(path)
return 'contexts', contexts
def get_history(args, daccess):
tasks = daccess.history()
gid = daccess.get_greatest_id()
return 'history', tasks, gid
def purge(args, daccess):
force = args['force']
before = args['before']
go_ahead = False
if force:
go_ahead = True
else:
if before is None:
q = "This will delete all done tasks. "
else:
q = "This will delete all done tasks created before "+\
"{}. ".format(before)
q += "This operation is irreversible. Continue? y/* "
ans = input(q)
if ans == 'y':
go_ahead = True
if go_ahead:
count = daccess.purge(before)
return 'purge', count
def search(args, daccess):
term = args['term']
done = None
if args['done']:
done = True
elif args['undone']:
done = False
if args['context'] is None:
ctx = ''
else:
ctx = args['context']
tasks = daccess.search(
term,
ctx=ctx,
done=done,
before=args.get('before'),
after=args.get('after'),
case=args['case']
)
return 'todo', '', tasks, [], (term, args['case'])
## DISPATCHING
# Map of the names of the commands to handlers defined above.
DISPATCHER = {
'add': add_task,
'task': manage_task,
'edit': edit_task,
'done': do_task,
'rm': remove_task,
'ctx': manage_context,
'rmctx': remove_context,
'mv': move,
'contexts': get_contexts,
'history': get_history,
'purge': purge,
'search': search
}
def dispatch(args, daccess):
if 'command' in args:
return DISPATCHER[args['command']](args, daccess)
# If no command, fallback to the todo handler
return todo(args, daccess)
def get_options(args, mutators, converters={}):
"""
Returns a list of 2-tuple in the form (option, value) for all options
contained in the `mutators` collection if they're also keys of the `args`
and have a non-None value. If the option (non- prefixed) is also a key of
the `converters` dictionary then the associated value should be another
dictionary indicating convertions to be done on the value found in `args`.
e.g.
args = {'deadline': 'none'}
mutators = {'deadline'}
converters = {'deadline': {'none': None}}
=> [('deadline', None)]
"""
options = []
for mutator in mutators:
if mutator in args and args[mutator] is not None:
val = args[mutator]
convertions = converters.get(mutator)
if convertions is not None and val in convertions:
val = convertions[val]
options.append((mutator, val))
return options
## FEEDBACK FUNCTIONS
TASK_SUBCTX_SEP = '-'*40
def feedback_add_task(id_):
pass
def feedback_task_not_found(tid):
print('Task {} not found'.format(utils.to_hex(tid)))
def feedback_cannot_edit(tid):
print('Task {} is already being edited'.format(utils.to_hex(tid)))
def feedback_cannot_open_editor(editor):
print('Cannot open editor: {}'.format(editor))
def feedback_multiple_tasks_update(not_found):
if len(not_found) > 0:
s = 's' if len(not_found) > 1 else ''
string = ', '.join(utils.to_hex(tid) for tid in not_found)
print('Task{} not found: {}'.format(s, string))
def feedback_multiple_tasks_done(not_found):
if len(not_found) > 0:
string = ', '.join(utils.to_hex(tid) for tid in not_found)
print('Not found or already done: {}'.format(string))
def feedback_todo(context, tasks, subcontexts, highlight=None):
layout = CONFIG.get('App', 'layout')
if layout == 'multiline':
stringyfier = get_multiline_task_string
else:
stringyfier = get_basic_task_string
if len(tasks) != 0:
id_width = max(len(utils.to_hex(task['id'])) for task in tasks)
else:
id_width = 1
for task in tasks:
partial = functools.partial(stringyfier, context, id_width, task,
highlight=highlight)
safe_print(partial)
if len(subcontexts) > 0:
print(TASK_SUBCTX_SEP)
for ctx in subcontexts:
partial = functools.partial(get_context_string, context, id_width, ctx)
safe_print(partial)
def feedback_target_name_exists(renamed):
print('Context already exists: {}'.format(
utils.get_relative_path('', renamed)
)
)
def feedback_not_exists(ctx):
print("Context does not exist: {}".format(
utils.get_relative_path('', ctx)
)
)
def feedback_contexts(contexts):
def get_tally(ctx):
return '{} ({})'.format(ctx['total_tasks'], ctx['own_tasks'])
struct = [
('context', lambda a: a, '<', 'path', lambda a: a[1:]),
('visibility', 10, '<', 'visibility', None),
('priority', 8, '<', 'priority', None),
('undone tasks', 12, '<', None, get_tally)
]
utils.print_table(struct, contexts, is_context_default)
def feedback_history(tasks, gid):
if gid is None:
print('No history.')
else:
struct = get_history_struct(gid)
utils.print_table(struct, tasks, is_task_default)
def feedback_purge(count):
s = 's' if count > 1 else ''
print('{} task{} deleted'.format(count, s))
def feedback_show_task(task, full_content):
print(cstr(" ID:", '6'), utils.to_hex(task['id']))
print(cstr("Created:", '6'), utils.sqlite_date_to_local(task['created']))
if task['start'] == task['created']:
print(cstr(" Start:", '6'), "@created")
else:
print(cstr(" Start:", '6'), utils.sqlite_date_to_local(task['start']))
print(
cstr(" Status:", '6'),
"DONE" if task['done'] is not None else "TODO"
)
def print_metaline(ascii_):
c = get_task_string_components(task, '', ascii_, highlight=None)
if task['done'] is None:
stuff = ['deadline', 'priority', 'context']
else:
stuff = ['priority', 'context']
metaline = ' '.join(c[a] for a in stuff if c[a] != '')
if len(metaline) > 0:
return ' ' + metaline
else:
return None
safe_print(print_metaline)
print(cstr('-'*utils.get_terminal_width(), '3'))
print(full_content)
# String building for todo feedback
# Those functions return a string. They accept a boolean `ascii_` argument
# that indicates whether to build the returned string with ASCII characters
# only (True) or whether non-ASCII characters are allowed (False). Those
# functions will then be partially called with all arguments set except the
# `ascii_` one and the resulting partial will be passed to `safe_print` which
# will take care of trying to print the non-ASCII version and then fallback to
# the ASCII version in case of error from the terminal.
def get_basic_task_string(context, id_width, task, highlight=None, ascii_=False):
c = get_task_string_components(task, context, ascii_, highlight=highlight)
if isinstance(c['id'], ColoredStr):
ansi_offset = c['id'].lenesc
else:
ansi_offset = 0
result = ' {id:>{width}} | '.format(width=id_width + ansi_offset, **c)
left_width = id_width + 4
init_indent = left_width
if len(c['done']):
adding = c['done'] + ' '
result += adding
init_indent += len(DONE_STR) + 1 # [DONE] followed by space
wrap_width = CONFIG.getint('Word-wrapping', 'width')
if wrap_width == -1:
wrap_width = utils.get_terminal_width()
if CONFIG.getboolean('Word-wrapping', 'title'):
title_subindent = ' '*left_width
# The correct way to wrap would be to order textwrap to wrap the whole
# ` {id} | {title}` with the subsequent indent being the length of `
# {id} | `. However, {id} containing ANSI escape characters for
# coloring will mess up textwrap character counting, so what we do
# instead is wrapping only {title} prefixed with the length of ` {id}
# | `, and we remove the prefix afterwards, ` {id} | ` taking its
# place.
lines = textwrap.wrap(
' '*init_indent + c['title'],
width=wrap_width,
subsequent_indent=' '*left_width
)
lines[0] = lines[0][init_indent:]
else:
lines = [c['title']]
len_last_line = len(lines[-1])
title = '\n'.join(lines)
start_title = len(result)
result += title
end_title = len(result)
metadata = [c['deadline'], c['priority'], c['context']]
metatext = ' '.join(stuff for stuff in metadata if stuff != '')
if len(metatext) > 0:
wrap_title = CONFIG.getboolean('Word-wrapping', 'title')
not_enough_space = wrap_width - len_last_line <= 0
if wrap_title and not_enough_space:
result += '\n' + ' '*left_width
else:
result += ' '
result += metatext
return result
def get_multiline_task_string(context, id_width, task, highlight=None, ascii_=False):
c = get_task_string_components(task, context, ascii_, highlight=highlight)
template = ' {id} {done} / {deadline} {priority} {context}\n'
result = template.format(**c)
wrap_width = CONFIG.getint('Word-wrapping', 'width')
if wrap_width == -1:
wrap_width = utils.get_terminal_width()
title = c['title']
if CONFIG.getboolean('Word-wrapping', 'title'):
title = '\n'.join(textwrap.wrap(c['title'], width=wrap_width))
result += title + '\n'
return result
def get_task_string_components(task, ctx, ascii_=False, highlight=None):
id_str = cstr(utils.to_hex(task['id']), clr('id'))
if highlight is not None and CONFIG.getboolean('Colors', 'colors'):
term, case = highlight
content_str = utils.get_highlights_term(
task['title'],
term,
(clr('content'), CONFIG.get('Colors', 'palette')),
case=case
)
else:
content_str = cstr(task['title'], clr('content'))
remaining_str = ''
deadline = get_datetime(task['deadline'])
if deadline is not None:
remaining = deadline - NOW
user_friendly , isOvertime = utils.parse_remaining(remaining)
remaining_str = '{} {} remaining'.format(
TIME_ICON[ascii_],
user_friendly
)
remaining_str = cstr(remaining_str, clr('overtime')) if isOvertime else cstr(remaining_str, clr('deadline'))
prio_str = ''
priority = task['priority']
if not is_task_default(task, 'priority'):
prio_str = '{}{}'.format(PRIORITY_ICON[ascii_], priority)
prio_str = cstr(prio_str, clr('priority'))
ctx_path = utils.get_relative_path(ctx, task['ctx_path'])
if ctx_path == '':
ctx_str = ''
else:
ctx_str = '{}{}'.format(CONTEXT_ICON[ascii_], ctx_path)
ctx_str = cstr(ctx_str, clr('context'))
done_str = ''
if task['done'] is not None:
done_str = DONE_STR
if done_str != '':
done_str = cstr(done_str, clr('done'))
return {
'id': id_str,
'title': content_str,
'deadline': remaining_str,
'priority': prio_str,
'context': ctx_str,
'done': done_str
}
def get_context_string(context, id_width, ctx, ascii_=False):
hash_str = cstr('#', clr('id'))
if isinstance(hash_str, ColoredStr):
ansi_offset = hash_str.lenesc
else:
ansi_offset = 0
path = utils.get_relative_path(context, ctx['path'])
string = '{hash:>{width}} | {path} ({nbr})'.format(
hash=hash_str,
width=id_width + ansi_offset + 1,
path=path,
nbr=ctx['total_tasks']
)
priority = ctx['priority']
if not is_task_default(ctx, 'priority'):
prio_str = ' {}{}'.format(PRIORITY_ICON[ascii_], priority)
string += cstr(prio_str, clr('priority'))
return string
def safe_print(partial):
try:
result = partial(ascii_=False)
if result is not None:
print(result)
except UnicodeEncodeError:
result = partial(ascii_=True)
if result is not None:
print(result)
def get_datetime(db_dt):
""" Get a datetime object from the string retrieved from the database."""
if db_dt is None:
return None
return datetime\
.strptime(db_dt, utils.SQLITE_DT_FORMAT)\
.replace(tzinfo=timezone.utc)
def is_task_default(task, prop):
if prop == 'start':
return task['start'] == task['created']
return is_default(task, prop, TASK_MUTATORS)
def is_context_default(ctx, prop):
return is_default(ctx, prop, CONTEXT_MUTATORS)
def is_default(dico, prop, defaults):
default = defaults.get(prop, None)
if default is None:
return False
else:
return dico[prop] == default
def get_history_struct(gid):
gid_len = len(utils.to_hex(gid))
struct = [
('id', gid_len + 1, '>', 'id', utils.to_hex),
('title', lambda a: 3 * (a//4), '<', 'title', None),
('created', 19, '<', 'created', utils.sqlite_date_to_local),
]
if utils.get_terminal_width() > WIDE_HIST_THRESHOLD:
struct += [
('start', 19, '<', 'start', utils.sqlite_date_to_local),
('deadline', 19, '<', 'deadline', utils.sqlite_date_to_local),
('priority', 8, '>', 'priority', None)
]
struct += [
('context', lambda a: a//4 + a%4, '<', 'ctx_path', lambda a: a[1:]),
('status', 7, '<', 'done', lambda a: 'DONE' if a is not None else '')
]
return struct
def clr(component):
return CONFIG.get('Colors', component)
```
#### File: source/todo/utils.py
```python
import re, os
import os.path as op
from datetime import datetime, timedelta, timezone
from . import rainbow
DATA_DIR_NAME = '.toduh'
DATAFILE_NAME = 'data.json'
DATABASE_NAME = 'data.sqlite'
DATA_CTX_NAME = 'contexts'
VER_FILE_NAME = 'version'
# If a .toduh exists in the current working directory, it's used by the
# program. Otherwise the one in the home is used.
if op.exists(DATA_DIR_NAME) and op.isdir(DATA_DIR_NAME):
DATA_DIR = DATA_DIR_NAME
else:
DATA_DIR = op.expanduser(op.join('~', '.toduh'))
DB_PATH = op.join(DATA_DIR, DATABASE_NAME)
VERSION_PATH = op.join(DATA_DIR, VER_FILE_NAME)
DATAFILE_PATH = op.join(DATA_DIR, DATAFILE_NAME)
ISO_SHORT = '%Y-%m-%d'
ISO_DATE = ISO_SHORT+'T%H:%M:%SZ'
USER_DATE_FORMATS = [
ISO_SHORT,
ISO_SHORT+'T%H:%M:%S',
ISO_SHORT+' %H:%M:%S'
]
REMAINING = {
'w': 7*24*3600,
'd': 24*3600,
'h': 3600,
'm': 60,
's': 1
}
REMAINING_RE = re.compile('\A([0-9]+)([wdhms])\Z')
SQLITE_DT_FORMAT = '%Y-%m-%d %H:%M:%S'
NOW = datetime.utcnow().replace(tzinfo=timezone.utc)
def print_table(struct, iterable, is_default=lambda obj, p: False):
""" This function, which is responsible for printing tables to the
terminal, awaits a "structure", an iterable and a function. The structure
describes the columns of the table and their properties. It's a list of
tuples where each tuple describes one column of the table. A tuple has 5
elements corresponding to the following pieces of information:
1. The header of the column, a string
2. The width of the column given in number of characters. The width can
either be an integer or a function accepting one argument. Widths
given as integers will be subtracted from the terminal's width to
obtain the "available space". After that, widths given as functions
will be evaluated with the available space given as their argument and
the functions should return an integer being the actual width of the
corresponding column.
3. How the name of the column should be aligned in the table header.
Value should be either ">", "<", "=" or "^". See Python's format mini-
language.
4. For mappings, the name of the key of the map to print the value of. If
this element is set to None, the object itself will be used for
printing.
5. A function which takes as argument the value obtained according to the
previous element and return the string to finally print.
The function `is_default` should accept a yielded object and the element 4
of the tuple and returns True if this objects contains a "default value"
at the given key. Such values aren't printed in the table.
See the function get_history_struct to have an example of structure."""
term_width = get_terminal_width()
occupied = sum(w if isinstance(w, int) else 0 for _, w, *_ in struct)
available = term_width - occupied - (len(struct) - 1)
template, separator = '', ''
widths = {}
for header, width, align, *_ in struct:
w = max(0, width if isinstance(width, int) else width(available))
widths[header] = w
template = ' '.join([template, '{{: {}{}}}'.format(align, w)])
separator = ' '.join([separator, '-'*w])
template, separator = template[1:], separator[1:] # Starting space
table = template.format(*(t[0] for t in struct))
table = '\n'.join([table, separator])
for obj in iterable:
values = []
for h, _, _, a, f in struct:
f = f if f is not None else lambda a: a
if is_default(obj, a):
value = ''
else:
if a is None:
value = obj
else:
value = obj[a]
value = f(value)
if value is None:
value = ''
value = str(value).split('\n')[0]
value = limit_str(str(value), widths[h])
values.append(value)
line = template.format(*values)
table = '\n'.join([table, line])
print(table)
def limit_str(string, length):
if len(string) <= length:
return string
else:
if length <= 3:
return string[:length]
else:
return string[:length-3] + '...'
def get_datetime(string, now, direction=1):
"""Parse the string `string` representating a datetime. The string can be
a delay such `2w` which means "two weeks". In this case, the datetime is
the datetime `now` plus/minus the delay. The `direction` option indicates
if the delay needs to be added to now (+1) or substracted from now (-1).
In any case, this returns a datetime object."""
match = REMAINING_RE.match(string)
if match is not None:
value, unit = match.groups()
seconds = int(value) * REMAINING[unit]
offset = direction * timedelta(seconds=seconds)
return now + offset
else:
dt = None
for pattern in USER_DATE_FORMATS:
try:
dt = datetime.strptime(string, pattern)
except ValueError:
continue
else:
dt = datetime.utcfromtimestamp(dt.timestamp())
dt = dt.replace(tzinfo=timezone.utc)
break
return dt
def parse_remaining(delta):
isOvertime = False
null_delta = timedelta(0)
if delta >= null_delta:
seconds = 3600 * 24 * delta.days + delta.seconds
if seconds >= 2 * 24 * 3600:
string = '{} days'.format(delta.days)
elif seconds >= 2*3600:
string = '{} hours'.format(24*delta.days + delta.seconds // 3600)
elif seconds >= 2*60:
string = '{} minutes'.format(seconds // 60)
else:
string = '{} seconds'.format(seconds)
else:
isOvertime = True
string = 'no time'
return string , isOvertime
def input_from_editor(init_content, editor):
import subprocess
with CustomTemporaryFile() as filename:
with open(filename, 'w') as edit_file:
edit_file.write(init_content)
try:
subprocess.call([editor, filename])
except FileNotFoundError:
raise CannotOpenEditorError('Cannot open editor', editor=editor)
with open(filename) as edit_file:
new_content = edit_file.read()
return new_content
class CannotOpenEditorError(Exception):
def __init__(self, message, editor):
super().__init__(message)
self.editor = editor
class CustomTemporaryFile:
def __enter__(self):
import uuid
self.path = op.join(DATA_DIR, '.todoedit-'+uuid.uuid4().hex)
return self.path
def __exit__(self, type_, value, traceback):
os.remove(self.path)
return type_ is None
def get_relative_path(parent, desc):
rel = desc[len(parent):]
if rel.startswith('.'):
rel = rel[1:]
return rel
def to_hex(integer):
return hex(integer)[2:] # 0x...
def get_terminal_width():
try:
size = os.get_terminal_size()[0]
except OSError:
size = 80
return size
def sqlite_date_to_local(sqlite_date):
if sqlite_date is None:
return ''
try:
dt = datetime.strptime(sqlite_date, SQLITE_DT_FORMAT)
except ValueError:
return ''
dt = dt.replace(tzinfo=timezone.utc)
try:
local_dt = dt.astimezone(tz=None)
except ValueError:
# Some exotic dates such as '0001-01-01 00:00:00' don't work well
return sqlite_date
return local_dt.strftime(SQLITE_DT_FORMAT)
def get_highlights_term(string, term, str_color, case=False):
if str_color is not None:
escape = rainbow.get_escape(*str_color)
if escape is None:
str_color = None
if len(term) > 0:
def term_repl(matchobj):
repl = '\33[1;31m' + matchobj.group(0) + '\33[0m'
if str_color is not None:
repl = '\33[0m' + repl + escape
return repl
args = dict(
pattern=term,
repl=term_repl,
string=string
)
if not case:
args.update(flags=re.IGNORECASE)
highlighted = re.sub(**args)
if str_color:
highlighted = escape + highlighted + '\33[0m'
return highlighted
else:
if str_color is not None:
return escape + string + '\33[0m'
else:
return string
def compare_versions(vA, vB):
# ((major, minor, patch), tag, tagNumber)
(releaseA, tagA, tagnumA), (releaseB, tagB, tagnumB) = [
parse_version(v) for v in [vA, vB]
]
if releaseA != releaseB:
return 1 if releaseA > releaseB else -1
else:
if tagA is None:
if tagB is None:
return 0
elif tagB < 'final':
return 1
else:
return -1
elif tagB is None:
if tagA < 'final':
return -1
else:
return 1
elif tagA != tagB:
return 1 if tagA > tagB else -1
else:
if tagnumA > tagnumB:
return 1
elif tagnumA == tagnumB:
return 0
else:
return -1
def parse_version(v):
"""
A version is in the form <major>.<minor>.<patch><tag><tagnumber>
<minor> and <patch> can be omitted (in which case they count for 0)
<tag> and <tagnumber> are optional
"""
undotted = v.split('.')
if len(undotted) == 0:
raise ValueError("Versio number cannot be empty")
if len(undotted) > 3:
raise ValueError("Version number cannot have more than 3 dots")
tag_match = re.match('([0-9]+)([a-z]+)([0-9]+)?', undotted[-1])
if tag_match is not None:
least_number, tag, tagnumber = tag_match.groups()
else:
least_number, tag, tagnumber = undotted[-1], None, None
if tagnumber is None:
tagnumber = 0
release = tuple(undotted[:-1]) + (least_number,)
while len(release) < 3:
release = release + (0,)
release = tuple(int(n) for n in release)
return (release, tag, int(tagnumber))
```
|
{
"source": "Jeasonlee313/paperdev_Phy_SORT-",
"score": 2
}
|
#### File: Jeasonlee313/paperdev_Phy_SORT-/app.py
```python
import os
import argparse
import torch
import warnings
import cv2
import numpy as np
from utils.parser import get_config
from utils.log import get_logger
from utils.draw import draw_boxes
from utils.io import write_results
from deep_sort import build_tracker
from deep_sort.sort.kalman_filter import KalmanFilter as kf
h0 = np.array([[0.176138, 0.647589, -63.412272],
[-0.180912, 0.622446, -0.125533],
[-0.000002, 0.001756, 0.102316]])
h1 = np.array([[0.177291, 0.004724, 31.224545],
[0.169895, 0.661935, -79.781865],
[-0.000028, 0.001888, 0.054634]])
h2 = np.array([[-0.104843, 0.099275, 50.734500],
[0.107082, 0.102216, 7.822562],
[-0.000054, 0.001922, -0.068053]])
h3 = np.array([[-0.142865, 0.553150, -17.395045],
[-0.125726, 0.039770, 75.937144],
[-0.000011, 0.001780, 0.015675]])
class VideoTracker(object):
def __init__(self, cfg, args, video_path):
self.cfg = cfg
self.args = args
self.video_path = video_path
self.logger = get_logger("root")
use_cuda = args.use_cuda and torch.cuda.is_available()
if not use_cuda:
warnings.warn("Running in cpu mode which maybe very slow!", UserWarning)
if args.display:
cv2.namedWindow("test", cv2.WINDOW_NORMAL)
cv2.resizeWindow("test", args.display_width, args.display_height)
if args.cam != -1:
print("Using webcam " + str(args.cam))
self.vdo = cv2.VideoCapture(args.cam)
else:
self.vdo = cv2.VideoCapture()
self.deepsort = build_tracker(cfg, use_cuda=use_cuda, h=h3)
def __enter__(self):
if self.args.cam != -1:
ret, frame = self.vdo.read()
assert ret, "Error: Camera error"
self.im_width = frame.shape[0]
self.im_height = frame.shape[1]
else:
assert os.path.isfile(self.video_path), "Video path error"
self.vdo.open(self.video_path)
self.im_width = int(self.vdo.get(cv2.CAP_PROP_FRAME_WIDTH))
self.im_height = int(self.vdo.get(cv2.CAP_PROP_FRAME_HEIGHT))
assert self.vdo.isOpened()
self.detections = np.loadtxt(self.args.detect_file, delimiter=',')
if self.args.save_path:
# os.makedirs(self.args.save_path, exist_ok=True)
self.save_video_path = os.path.join(self.args.save_path, self.args.save_name + ".avi")
self.save_results_path = os.path.join(self.args.save_path, self.args.save_name + ".txt")
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
self.writer = cv2.VideoWriter(self.save_video_path, fourcc, 25, (self.im_width, self.im_height))
self.logger.info("Save results to {}".format(self.args.save_path))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type:
print(exc_type, exc_val, exc_tb)
def run(self):
results = []
frame_ids = self.detections[:, 0]
xywhs = self.detections[:, 2:6]
xywhs[:, 0:2] += xywhs[:, 2:4] / 2
confs = self.detections[:, 6]
frame_id = 0
self.vdo.set(cv2.CAP_PROP_POS_FRAMES, 0)
while self.vdo.grab():
frame_id += 1
_, ori_im = self.vdo.retrieve()
# print("frame_id: ", frame_id, " ")
im = cv2.cvtColor(ori_im, cv2.COLOR_BGR2RGB)
mask = frame_ids == frame_id
xywh = xywhs[mask]
conf = confs[mask]
# print("frame_id: ", frame_id, " xywh", xywh.shape)
outputs = self.deepsort.update(xywh, conf, im)
if len(outputs) > 0:
tlwh = []
xyxys = outputs[:, :4]
ids = outputs[:, -1]
ori_im = draw_boxes(ori_im, xyxys, ids)
for xyxy in xyxys:
tlwh.append(self.deepsort._xyxy_to_tlwh(xyxy))
results.append((frame_id, tlwh, ids))
if self.args.display:
cv2.imshow("test", ori_im)
if cv2.waitKey(1) == 27:
break
if self.args.save_path:
self.writer.write(ori_im)
write_results(self.save_results_path, results, 'mot')
# self.logger.info("time: {:.03f}s, fps")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--VIDEO_PATH", type=str, default="./data/6p-c0.avi")
parser.add_argument("--config_deepsort", type=str, default="./configs/deep_sort.yaml")
parser.add_argument("--display", action="store_true", default=True)
parser.add_argument("--frame_interval", type=int, default=1)
parser.add_argument("--display_width", type=int, default=960)
parser.add_argument("--display_height", type=int, default=540)
parser.add_argument("--save_path", type=str, default="./output/")
parser.add_argument("--cpu", dest="use_cuda", action="store_false", default=True)
parser.add_argument("--camera", action="store", dest="cam", type=int, default="-1")
parser.add_argument("--save_name", type=str, default="6p-c0")
parser.add_argument("--detect_file", type=str, default="./data/6p-c0.txt")
# parser.add_argument("--h", type=str, default="")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
cfg = get_config()
cfg.merge_from_file(args.config_deepsort)
with VideoTracker(cfg, args, video_path=args.VIDEO_PATH) as vdo_trk:
# with torch.no_grad():
vdo_trk.run()
```
|
{
"source": "jeasonstudio/mo-dashboard",
"score": 2
}
|
#### File: jeasonstudio/mo-dashboard/run.py
```python
from flask import Flask, request
from flask_restful import Resource, Api
from flask import jsonify
from flask_cors import CORS
from xmnlp import XmNLP
import requests
from urllib.parse import quote
import time
from hashlib import md5
import json
def make_md5(s, encoding='utf-8'):
return md5(s.encode(encoding)).hexdigest()
def get_tc_res(valueText, time_stamp):
host = 'https://api.ai.qq.com/fcgi-bin/nlp/nlp_textpolar'
APP_ID = '1106884428'
APP_KEY = 'jCTZ5uhKuSv3gkR8'
nonce_str = 'fa577ce340859f9fe'
text = quote(valueText)
str = 'app_id=' + APP_ID + '&nonce_str=' + nonce_str + '&text=' + text + '&time_stamp=' + time_stamp
sig = make_md5(str + '&app_key=' + APP_KEY, encoding='utf-8').upper()
request = requests.session()
req = request.get(host + '?sign=' + sig + '&' + str)
return json.loads(req.text)['data']
app = Flask(__name__, static_url_path='')
CORS(app)
api = Api(app)
class Sentiment(Resource):
def get(self, text):
xm = XmNLP(text)
time_stamp = str(int(time.time()))
data = get_tc_res(text, time_stamp)
result = { 'polar': 0, 'confd': 0, 'sentiment': 0 }
result['sentiment'] = xm.sentiment()
if data['polar'] == 0:
result['polar'] = 1
else:
result['polar'] = data['polar']
# result['polar'] = data['polar'] if data['polar'] == 0 else 1
result['confd'] = data['confd']
return jsonify(result)
@app.route('/')
def index():
return app.send_static_file('index.html')
api.add_resource(Sentiment, '/query/<text>')
if __name__ == '__main__':
app.run(host='0.0.0.0', port='5002')
'''
GET /query/:text
response:
{
polar: Int, // -1,0,1
confd: Float, // 置信度
sentiment: Float // 情感极性
}
'''
```
|
{
"source": "jeastham1993/epsagon-dotnet",
"score": 2
}
|
#### File: epsagon-dotnet/scripts/update-all-packages.py
```python
from argparse import ArgumentParser
import shutil
import subprocess
import xml.etree.ElementTree as ET
parser = ArgumentParser()
parser.add_argument('--proj', help="path to the .csproj file to update")
args = parser.parse_args()
def find_version(package_name):
return subprocess.getoutput(f"dotnet search {package_name}"
f"| awk -F '[[:space:]][[:space:]]+' '$1 == \"{package_name}\" {{ print $4 }}'").strip()
tree = ET.parse(args.proj)
root = tree.getroot()
packages = root.findall('./ItemGroup/PackageReference')
for package in packages:
name = package.attrib['Include']
latest_version = find_version(name)
print(f'package: {name} -> {latest_version}')
package.attrib['Version'] = latest_version
tree.write(args.proj)
```
|
{
"source": "J-east/John-Bio",
"score": 2
}
|
#### File: John-Bio/pages/views.py
```python
from django.shortcuts import render
from django.core.urlresolvers import reverse
from django.shortcuts import render, HttpResponseRedirect, Http404
from django.contrib import messages
# Create your views here.
def home(request):
template = 'resume.html'
context = {}
return render(request, template, context)
def resume(request):
template = 'resume.html'
context = {}
return render(request, template, context)
def film(request):
template = 'film.html'
context = {}
return render(request, template, context)
def photos(request):
template = 'photos.html'
context = {}
return render(request, template, context)
def projects(request):
template = 'projects.html'
context = {}
return render(request, template, context)
def web_design(request):
template = 'web-design.html'
context = {}
return render(request, template, context)
def music(request):
template = 'music.html'
context = {}
return render(request, template, context)
def contact(request):
template = 'contact.html'
context = {}
return render(request, template, context)
```
|
{
"source": "Jeasun007/pytest_exzample",
"score": 2
}
|
#### File: src/_pytest/deprecated.py
```python
from warnings import warn
from _pytest.warning_types import PytestDeprecationWarning
from _pytest.warning_types import UnformattedWarning
# set of plugins which have been integrated into the core; we use this list to ignore
# them during registration to avoid conflicts
DEPRECATED_EXTERNAL_PLUGINS = {
"pytest_catchlog",
"pytest_capturelog",
"pytest_faulthandler",
}
FILLFUNCARGS = UnformattedWarning(
PytestDeprecationWarning,
"{name} is deprecated, use "
"function._request._fillfixtures() instead if you cannot avoid reaching into internals.",
)
PYTEST_COLLECT_MODULE = UnformattedWarning(
PytestDeprecationWarning,
"pytest.collect.{name} was moved to pytest.{name}\n"
"Please update to the new name.",
)
YIELD_FIXTURE = PytestDeprecationWarning(
"@pytest.yield_fixture is deprecated.\n"
"Use @pytest.fixture instead; they are the same."
)
MINUS_K_DASH = PytestDeprecationWarning(
"The `-k '-expr'` syntax to -k is deprecated.\nUse `-k 'not expr'` instead."
)
MINUS_K_COLON = PytestDeprecationWarning(
"The `-k 'expr:'` syntax to -k is deprecated.\n"
"Please open an issue if you use this and want a replacement."
)
WARNING_CAPTURED_HOOK = PytestDeprecationWarning(
"The pytest_warning_captured is deprecated and will be removed in a future release.\n"
"Please use pytest_warning_recorded instead."
)
FSCOLLECTOR_GETHOOKPROXY_ISINITPATH = PytestDeprecationWarning(
"The gethookproxy() and isinitpath() methods of FSCollector and Package are deprecated; "
"use self.session.gethookproxy() and self.session.isinitpath() instead. "
)
STRICT_OPTION = PytestDeprecationWarning(
"The --strict option is deprecated, use --strict-markers instead."
)
PRIVATE = PytestDeprecationWarning("A private pytest class or function was used.")
UNITTEST_SKIP_DURING_COLLECTION = PytestDeprecationWarning(
"Raising unittest.SkipTest to skip tests during collection is deprecated. "
"Use pytest.skip() instead."
)
ARGUMENT_PERCENT_DEFAULT = PytestDeprecationWarning(
'pytest now uses argparse. "%default" should be changed to "%(default)s"',
)
ARGUMENT_TYPE_STR_CHOICE = UnformattedWarning(
PytestDeprecationWarning,
"`type` argument to addoption() is the string {typ!r}."
" For choices this is optional and can be omitted, "
" but when supplied should be a type (for example `str` or `int`)."
" (options: {names})",
)
ARGUMENT_TYPE_STR = UnformattedWarning(
PytestDeprecationWarning,
"`type` argument to addoption() is the string {typ!r}, "
" but when supplied should be a type (for example `str` or `int`)."
" (options: {names})",
)
# You want to make some `__init__` or function "private".
#
# def my_private_function(some, args):
# ...
#
# Do this:
#
# def my_private_function(some, args, *, _ispytest: bool = False):
# check_ispytest(_ispytest)
# ...
#
# Change all internal/allowed calls to
#
# my_private_function(some, args, _ispytest=True)
#
# All other calls will get the default _ispytest=False and trigger
# the warning (possibly error in the future).
def check_ispytest(ispytest: bool) -> None:
if not ispytest:
warn(PRIVATE, stacklevel=3)
```
|
{
"source": "jeasung-pf/MORAN_v2",
"score": 3
}
|
#### File: jeasung-pf/MORAN_v2/create_dataset.py
```python
import os
import lmdb # install lmdb by "pip install lmdb"
import cv2
import numpy as np
def checkImageIsValid(imageBin):
if imageBin is None:
return False
imageBuf = np.fromstring(imageBin, dtype=np.uint8)
img = cv2.imdecode(imageBuf, cv2.IMREAD_GRAYSCALE)
imgH, imgW = img.shape[0], img.shape[1]
if imgH * imgW == 0:
return False
return True
def writeCache(env, cache):
with env.begin(write=True) as txn:
for k, v in cache.items():
txn.put(str(k).encode(), str(v).encode())
def createDataset(outputPath, imagePathList, labelList, lexiconList=None, checkValid=True):
"""
Create LMDB dataset for CRNN training.
ARGS:
outputPath : LMDB output path
imagePathList : list of image path
labelList : list of corresponding groundtruth texts
lexiconList : (optional) list of lexicon lists
checkValid : if true, check the validity of every image
"""
assert(len(imagePathList) == len(labelList))
nSamples = len(imagePathList)
env = lmdb.open(outputPath, map_size=1099511627776)
cache = {}
cnt = 1
for i in range(nSamples):
imagePath = imagePathList[i]
label = labelList[i]
if not os.path.exists(imagePath):
print('%s does not exist' % imagePath)
continue
with open(imagePath, 'rb') as f:
imageBin = f.read()
if checkValid:
if not checkImageIsValid(imageBin):
print('%s is not a valid image' % imagePath)
continue
imageKey = 'image-%09d' % cnt
labelKey = 'label-%09d' % cnt
cache[imageKey] = imageBin
cache[labelKey] = label
if lexiconList:
lexiconKey = 'lexicon-%09d' % cnt
cache[lexiconKey] = ' '.join(lexiconList[i])
if cnt % 1000 == 0:
writeCache(env, cache)
cache = {}
print('Written %d / %d' % (cnt, nSamples))
cnt += 1
nSamples = cnt-1
cache['num-samples'] = str(nSamples)
writeCache(env, cache)
print('Created dataset with %d samples' % nSamples)
if __name__ == '__main__':
out_path = "/dltraining/datasets/nips/lmdb"
image_files = []
labels = []
lexicon = []
nips_datasets_path = "/dltraining/datasets/nips"
if os.path.isfile(os.path.join(nips_datasets_path, "annotation_train.txt")):
with open(os.path.join(nips_datasets_path, "annotation_train.txt")) as file:
for line in file:
lines = line.strip("\n").split(" ")
image_path = os.path.join(nips_datasets_path, lines[0])
image_file = os.path.basename(image_path)
label = image_file.split("_")[1]
image_files.append(image_path)
labels.append(label)
# if os.path.isfile(os.path.join(nips_datasets_path, "lexicon.txt")):
# with open(os.path.join(nips_datasets_path, "lexicon.txt")) as file:
# for line in file:
# line = line.strip("\n")
# lexicon.append(line)
createDataset(out_path, image_files, labels)
```
|
{
"source": "jeasung-pf/pytorch-deeplab-xception",
"score": 3
}
|
#### File: pytorch-deeplab-xception/segmentation/mock_client.py
```python
from __future__ import print_function
import os
import grpc
import argparse
from PIL import Image, ImageFile
from tqdm import tqdm
from dataloaders import make_data_loader
from protocol import segmentation_pb2
from protocol import segmentation_pb2_grpc
from gateway.protocol import gateway_pb2
def guide_recv_feature(stub, args):
prefix = "/dltraining/test/VOCdevkit"
with open(os.path.join(prefix, "ImageSets/Segmentation/test.txt"), 'r') as f:
for line in tqdm(f):
line = line.strip()
line = line + "jpg"
with open(os.path.join(prefix, "JPEGImages", line)) as image:
image_encoded = image.read()
parser = ImageFile.Parser()
parser.feed(image)
image = parser.close()
feature = gateway_pb2.Feature(image_encoded=image_encoded,
image_filename=line,
image_format="jpg",
image_height=image.height,
image_width=image.width,
image_segmentation_class_encoded="",
image_segmentation_class_format="jpg")
try:
response = stub.recvFeature(feature)
except Exception as e:
print(e)
encoded = response.image_segmentation_class_encoded
segmentation = Image.frombytes('L', (image.width, image.height), encoded)
with open(os.path.join(prefix, "RESULT", line), "w+") as fp:
segmentation.save(fp)
if __name__ == "__main__":
with grpc.insecure_channel("localhost:50051") as channel:
stub = segmentation_pb2_grpc.SegmentationStub(channel)
print("---------- RecvFeature ----------")
guide_recv_feature(stub)
stub.recvFeature()
```
|
{
"source": "jeaubin/genielibs",
"score": 2
}
|
#### File: stages/iosxe/image_handler.py
```python
import os
# Genie
from genie.libs.clean.stages.image_handler import ImageHandler as CommonImageHandler
class ImageHandler(CommonImageHandler):
def __init__(self, device, images, *args, **kwargs):
super().__init__(device, images, *args, **kwargs)
# Ensure 'images' provided is valid
if isinstance(images, list):
# set in CommonImageHandler
pass
elif isinstance(images.get('image', {}).get('file', {}), list):
self.images = images['image']['file']
else:
raise Exception(
"For 'iosxe' images must be one of the following formats:\n\n"
" images: [<image>]\n\n"
"or\n\n"
" images:\n"
" image:\n"
" file: [<image>]")
def update_tftp_boot(self):
'''Update clean section 'tftp_boot' with image information'''
# Init 'tftp_boot' defaults
self.tftp_boot_images = self.device.clean.setdefault('tftp_boot', {}).\
setdefault('image', [])
# Add image to key 'files' in section tftp_boot
self.tftp_boot_images.extend(self.images)
def update_copy_to_linux(self):
'''Update clean section 'copy_to_linux' with image information'''
# Init 'copy_to_linux' defaults
self.ctl_files = self.device.clean.setdefault('copy_to_linux', {}).\
setdefault('origin', {}).\
setdefault('files', [])
# Add image to key 'files' in section copy_to_linux
self.ctl_files.extend(self.images)
def update_copy_to_device(self):
'''Update clean stage 'copy_to_device' with image information'''
# Init 'copy_to_device' defaults
self.ctd_files = self.device.clean.setdefault('copy_to_device', {}).\
setdefault('origin', {}).\
setdefault('files', [])
if not self.ctl_files:
# 'copy_to_linux' is not executed before 'copy_to_device'
self.ctd_files.extend(self.images)
else:
# 'copy_to_linux' is executed before 'copy_to_device'
# Get destination directory of 'copy_to_linux'
ctl_dest_dir = self.device.clean.get('copy_to_linux', {}).\
get('destination', {}).\
get('directory')
if not ctl_dest_dir:
raise Exception("Clean section 'copy_to_linux' missing "
"mandatory key 'destination' or 'directory'")
# Add all files from 'copy_to_linux' to 'copy_to_device'
for file in self.ctl_files:
# Get base filename
filename = os.path.basename(file)
# Add hostname
if self.append_hostname:
filename = self.add_hostname(filename)
# Add unique number to filename
if self.unique_filename:
filename = self.add_unique_filename(filename)
# Set filename after linux copy
filename_on_linux = os.path.join(ctl_dest_dir, filename)
# Add file to 'files' key under 'copy_to_device'
self.ctd_files.append(filename_on_linux)
def update_change_boot_variable(self):
'''Update clean stage 'change_boot_variable' with image information'''
cbv_images = self.device.clean.setdefault('change_boot_variable', {}).\
setdefault('images', [])
if not self.ctd_files:
# 'copy_to_device' was not executed before 'change_boot_variable'
cbv_images.extend(self.images)
else:
# 'copy_to_device' is executed before 'change_boot_variable'
# Get destination director of 'copy_to_device'
ctd_dest_dir = self.device.clean.get('copy_to_device', {}).\
get('destination', {}).\
get('directory')
if not ctd_dest_dir:
raise Exception("Clean section 'copy_to_device' missing "
"mandatory key 'destination' or 'directory'")
for file in self.images:
# Get base filename
filename = os.path.basename(file)
if self.append_hostname:
filename = self.add_hostname(filename)
# Set filename on device
filename_on_device = os.path.join(ctd_dest_dir, filename)
# Add file to 'images' key under 'change_boot_variable'
cbv_images.append(filename_on_device)
def update_verify_running_image(self):
'''Update clean stage 'verify_running_image' with image information'''
# Init 'verify_running_image' defaults
vrv_images = self.device.clean.setdefault('verify_running_image', {}).\
setdefault('images', [])
if not self.ctd_files:
# 'copy_to_device' was not executed before 'verify_running_image'
vrv_images.extend(self.images)
else:
# 'copy_to_device' is executed before 'verify_running_image'
# Get destination director of 'copy_to_device'
ctd_dest_dir = self.device.clean.get('copy_to_device', {}).\
get('destination', {}).\
get('directory')
if not ctd_dest_dir:
raise Exception("Clean section 'copy_to_device' missing "
"mandatory key 'destination' or 'directory'")
for file in self.images:
# Get base filename
filename = os.path.basename(file)
if self.append_hostname:
filename = self.add_hostname(filename)
# Set filename on device
filename_on_device = os.path.join(ctd_dest_dir, filename)
# Add file to 'images' key under 'verify_running_image'
vrv_images.append(filename_on_device)
```
#### File: junos/interface/get.py
```python
import re
import logging
import copy
# Genie
from genie.metaparser.util.exceptions import SchemaEmptyParserError
from genie.libs.sdk.libs.utils.normalize import GroupKeys
from genie.utils import Dq
# Pyats
from pyats.utils.objects import find, R
# unicon
from unicon.core.errors import SubCommandFailure
log = logging.getLogger(__name__)
def get_interface_address_mask_running_config(
device, interface, address_family):
""" Get interface address and mask from show running-config interface {interface}
Args:
device ('obj'): Device object
interface ('str'): Interface name
address_family ('str'): Address family
Returns:
(Interface IP address, Interface Mask)
Raise:
None
"""
try:
output = device.execute('show configuration interfaces {interface}'
.format(interface=interface))
except SubCommandFailure:
return None, None
if not output:
return None, None
if address_family in ['ipv4', 'inet']:
# address 192.168.0.1/32
p1 = re.compile(r'address +(?P<ip>[\d\.]+)/(?P<mask>\d+);')
elif address_family in ['ipv6', 'inet6']:
# address 2001:db8:1005:4401::b/128
p1 = re.compile(r'address +(?P<ip>[\w\:]+)/(?P<mask>\d+);')
else:
log.info(
'Must provide one of the following address families: "ipv4", "ipv6", "inet", "inet6"')
return None, None
match = p1.findall(output)
if match:
return match[0][0], device.api.int_to_mask(int(match[0][1]))
return None, None
def get_interface_ip_address(device, interface, address_family,
return_all=False):
""" Get interface ip address from device
Args:
interface('str'): Interface to get address
device ('obj'): Device object
address_family ('str'): Address family
return_all ('bool'): return List of values
Returns:
None
ip_address ('str'): If has multiple addresses
will return the first one.
Raises:
None
"""
if address_family not in ["ipv4", "ipv6", "inet", "inet6"]:
log.info('Must provide one of the following address families: '
'"ipv4", "ipv6", "inet", "inet6"')
return
if address_family == "ipv4":
address_family = "inet"
elif address_family == "ipv6":
address_family = "inet6"
try:
out = device.parse('show interfaces terse {interface}'.format(
interface=interface))
except SchemaEmptyParserError:
return
# Example dictionary structure:
# {
# "ge-0/0/0.0": {
# "protocol": {
# "inet": {
# "10.189.5.93/30": {
# "local": "10.189.5.93/30"
# }
# },
# "inet6": {
# "2001:db8:223c:2c16::1/64": {
# "local": "2001:db8:223c:2c16::1/64"
# },
# "fe80::250:56ff:fe8d:c829/64": {
# "local": "fe80::250:56ff:fe8d:c829/64"
# }
# },
# }
# }
# }
found = Dq(out).contains(interface).contains(address_family). \
get_values("local")
if found:
if return_all:
return found
return found[0]
return None
def get_interface_speed(device, interface, bit_size='gbps'):
"""Get speed of an interface
Args:
device (obj): device object
interface (str): interface name
bit_size (str): desired return size (gbps/mbps/kbps)
Returns:
Device speed or None
Raises:
None
"""
try:
out = device.parse('show interfaces extensive {interface}'.format(
interface=interface.split('.')[0]
))
except SchemaEmptyParserError as e:
return None
# Example Dictionary
# "physical-interface": [
# {
# "name": "ge-0/0/0",
# "speed": "1000mbps",
# }
speed_matrix = {
'kbps': {
'kbps': 1,
'mbps': 1000,
'gbps': 1000000,
},
'mbps': {
'kbps': 0.001,
'mbps': 1,
'gbps': 1000,
},
'gbps': {
'kbps': .0000001,
'mbps': 0.001,
'gbps': 1,
},
}
interfaces_list = Dq(out).get_values('physical-interface')
for interfaces_dict in interfaces_list:
speed_ = Dq(interfaces_dict).get_values('speed', 0)
if not speed_:
continue
if 'kbps' in speed_:
speed_ = int(re.sub(r'[a-z,]', '', speed_)) / speed_matrix['kbps'][bit_size]
elif 'mbps' in speed_:
speed_ = int(re.sub(r'[a-z,]', '', speed_)) / speed_matrix['mbps'][bit_size]
else:
speed_ = int(re.sub(r'[a-z,]', '', speed_)) / speed_matrix['gbps'][bit_size]
return speed_
```
#### File: junos/ldp/verify.py
```python
import logging
# Genie
from genie.utils.timeout import Timeout
from genie.metaparser.util.exceptions import SchemaEmptyParserError
from genie.utils import Dq
# pyATS
from genie.utils import Dq
log = logging.getLogger(__name__)
def verify_ldp_session(
device,
address=None,
expected_session_state=None,
max_time=60,
check_interval=10,
):
"""Verifies ldp session exists
Args:
device (obj): device object
address (str): Neighbor address to check for
expected_address (str): Expected address
max_time (int): Maximum timeout time
check_interval (int): Interval to check
"""
timeout = Timeout(max_time, check_interval)
while timeout.iterate():
out = None
try:
out = device.parse("show ldp session")
except SchemaEmptyParserError:
timeout.sleep()
continue
sessions = out.q.get_values('ldp-session')
for session in sessions:
if address:
if session['ldp-neighbor-address'] != address:
continue
if expected_session_state:
if session['ldp-session-state'] != expected_session_state:
continue
return True
timeout.sleep()
return False
```
#### File: junos/routing/get.py
```python
import re
import logging
# unicon
from unicon.core.errors import SubCommandFailure
from genie.metaparser.util.exceptions import SchemaEmptyParserError
from genie.libs.sdk.apis.utils import get_config_dict
from genie.utils import Dq
log = logging.getLogger(__name__)
def get_active_outgoing_interface(device, destination_address, extensive=False):
""" Get active outgoing interface value
Args:
device (`obj`): Device object
destination_address (`str`): Destination address value
extensive ('bool'): Try command with extensive
Returns:
Interface name
"""
try:
if extensive:
out = device.parse('show route protocol static extensive')
else:
out = device.parse('show route protocol static')
except SchemaEmptyParserError:
return None
# Example dictionary structure:
# {
# "rt": [
# {
# "rt-destination": "10.169.14.240/32",
# "rt-entry": {
# "nh": [
# {
# "to": "10.169.14.121",
# "via": "ge-0/0/1.0"
# }
# ],
# "rt-tag": "100",
# "preference": "5",
# "protocol-name": "Static"
# }
# }
# ],
# "table-name": "inet.0",
# "total-route-count": "240"
# },
rt_list = Dq(out).get_values("rt")
for rt_dict in rt_list:
rt_destination_ = Dq(rt_dict).get_values("rt-destination", 0)
if not rt_destination_.startswith(destination_address):
continue
active_tag_ = Dq(rt_dict).get_values("active-tag", None)
if not active_tag_:
continue
via_ = Dq(rt_dict).get_values("via", None)
if not via_:
continue
return via_.pop()
return None
def get_route_destination_address(device, extensive=None, prefix='inet.0', protocol='Direct',
interface='ge-0/0/0.0'):
"""Get destination address that matches criteria
Args:
device (obj): device object
extensive (bool): Show extensive output. Defaults to None.
prefix (str, optional): Route prefix. Defaults to None.
protocol (str, optional): Route protocol. Defaults to None.
interface (str, optional): Route interface. Defaults to None.
Returns:
str: The destination address
"""
try:
if extensive:
out = device.parse('show route extensive')
else:
out = device.parse('show route')
except SchemaEmptyParserError:
return None
# Example dictionary structure:
# {
# 'rt': [{'rt-destination': '0.0.0.0/0',
# 'rt-entry': {'active-tag': '*',
# 'age': {'#text': '02:53:14'},
# 'nh': [{'to': '172.16.1.254',
# 'via': 'ge-0/0/0.0'}],
# 'preference': '12',
# 'protocol-name': 'Access-internal'}},
# {'rt-destination': '192.168.3.11/24',
# 'rt-entry': {'active-tag': '*',
# 'age': {'#text': '5w1d '
# '19:01:21'},
# 'nh': [{'via': 'ge-0/0/3.0'}],
# 'preference': '0',
# 'protocol-name': 'Direct'}},
# {'rt-destination': '172.16.17.32/32',
# 'rt-entry': {'active-tag': '*',
# 'age': {'#text': '5w1d '
# '19:01:21'},
# 'nh': [{'nh-local-interface': 'ge-0/0/3.0'}],
# 'preference': '0',
# 'protocol-name': 'Local'}},
# },
route_table_list = Dq(out).get_values("route-table")
for route in route_table_list:
if prefix:
prefix_ = Dq(route).get_values('table-name', 0)
if not prefix_.lower().startswith(prefix.lower()):
continue
rt_list = Dq(route).get_values('rt')
for rt_dict in rt_list:
if protocol:
protocol_ = Dq(rt_dict).get_values('protocol-name', 0)
if not protocol_.lower().startswith(protocol.lower()):
continue
if interface:
interface_ = Dq(rt_dict).get_values('via', 0) or Dq(rt_dict).get_values('nh-local-interface', 0)
if not interface_.lower().startswith(interface.lower()):
continue
return Dq(rt_dict).get_values('rt-destination', 0)
return None
def get_ospf_metric(device,
destination_address):
"""Get OSPF metric
Args:
device (obj): Device object
destination_address (str): Destination address
"""
out = device.parse('show route')
# Example dictionary
# "route-table": [
# {
# "active-route-count": "0",
# "destination-count": "0",
# "hidden-route-count": "0",
# "holddown-route-count": "0",
# "rt": [
# {
# "metric": "101",
# }
# },
rt_list = Dq(out).get_values('rt')
for rt_dict in rt_list:
rt_destination_ = Dq(rt_dict).get_values("rt-destination", 0)
if not isinstance(rt_destination_, list):
if rt_destination_.startswith(str(destination_address)):
metric_ = Dq(rt_dict).get_values('metric', 0)
if not metric_:
continue
return metric_
return None
```
#### File: tests/iosxe/test_api_iosxe_platform.py
```python
import unittest
from genie.conf import Genie
from genie.metaparser.util.exceptions import SchemaEmptyParserError
from genie.libs.clean.stages.iosxe.tests.iosxe_pos_stage_outputs import get_parsed_output as pos_parsed
from genie.libs.sdk.apis.iosxe.platform.get import (
get_boot_variables, get_config_register)
class TestApiIiosxePlatform(unittest.TestCase):
@classmethod
def setUpClass(cls):
testbed = """
devices:
R1:
os: iosxe
type: router
connections: {}
"""
cls.tb = Genie.init(testbed)
cls.device = cls.tb.devices['R1']
cls.device.parse = pos_parsed
def test_get_boot_variables(self):
boot_vars = get_boot_variables(self.device, 'current')
self.assertEqual(boot_vars, ['harddisk:/vmlinux_PE1.bin'])
boot_vars = get_boot_variables(self.device, 'next')
self.assertEqual(boot_vars, ['harddisk:/vmlinux_PE1.bin'])
with self.assertRaises(AssertionError):
get_boot_variables(self.device, 'does_not_exist')
def test_get_config_register(self):
config_reg = get_config_register(self.device)
self.assertEqual(config_reg, '0x2102')
# Need updated parsed device output
# config_reg = get_config_register(self.device, next_reload=True)
# self.assertEqual(config_reg, '0x2101')
if __name__ == '__main__':
unittest.main()
```
#### File: triggers/blitz/actions_helper.py
```python
import re
import sys
import time
import logging
import copy
import operator
from genie.libs import sdk
from genie.utils.dq import Dq
from pyats.aetest.steps import Steps
from genie.utils.timeout import Timeout
from unicon.eal.dialogs import Statement, Dialog
from genie.harness.standalone import run_genie_sdk
from genie.ops.utils import get_ops_exclude
from genie.libs.parser.utils import get_parser_exclude
from genie.metaparser.util.exceptions import SchemaEmptyParserError
from pyats.results import TestResult, Passed, Failed, Skipped, Passx, Aborted, Errored
log = logging.getLogger(__name__)
def configure_handler(self, step, device, command, reply=None):
kwargs = {}
if reply:
kwargs.update({'reply':_prompt_handler(reply)})
try :
# if reply dialog exist append to command if not configure normal command
output = device.configure(command, **kwargs)
except Exception as e:
step.failed('Configure failed {}'.format(str(e)))
return output
def parse_handler(self, step, device, command, include=None, exclude=None,
max_time=None, check_interval=None, continue_=True, action='parse'):
# handeling parse command
try:
output = device.parse(command)
# check if the parser is empty then return an empty dictionary
except SchemaEmptyParserError:
step.passed('The result of this command is an empty parser.')
else:
# go through the include/exclude process
return _output_query_template(self, output, step, device, command,
include, exclude, max_time, check_interval, continue_, action)
def execute_handler(self, step, device, command, include=None, exclude=None,
max_time=None, check_interval=None, continue_=True, action='execute', reply=None):
kwargs = {}
if reply:
kwargs.update({'reply':_prompt_handler(reply)})
# handeling execute command
output = device.execute(command, **kwargs)
return _output_query_template(self, output, step, device, command,
include, exclude, max_time, check_interval, continue_, action, reply=reply)
def learn_handler(self, step, device, command, include=None, exclude=None,
max_time=None, check_interval=None, continue_=True, action='learn'):
# Save the to_dict learn output,
output = device.learn(command).to_dict()
return _output_query_template(self, output, step, device, command,
include, exclude, max_time, check_interval, continue_, action)
def api_handler(self, step, device, command, include=None, exclude=None,
max_time=None, check_interval=None, continue_=True, arguments=None, action='api'):
#handeling api command
output = None
# if no arguments send an empty argument list to api function
if not arguments:
arguments = {}
# check the os and decide how to call the api_function
# will be changed when we figure out the use of device.type for more general use
if device.os == 'ixianative':
api_function = device
else:
api_function = device.api
if arguments.get('device'):
# if device does not exist error
try:
arg_device = device.testbed.devices[arguments['device']]
except KeyError as e:
step.errored('Cannot find device {} in testbed'.format(arguments['device']))
else:
arguments['device'] = arg_device
try:
output = getattr(api_function, command)(**arguments)
except (AttributeError, TypeError) as e: # if could not find api or the kwargs is wrong for api
step.errored(str(e))
except Exception as e: # anything else
step.failed(str(e))
return _output_query_template(self, output, step, device, command,
include, exclude, max_time, check_interval, continue_, action, arguments=arguments)
def _prompt_handler(reply):
# handeling the reply for instances that a prompt message would get displayed in the console
dialog_list = []
for statement in reply:
dialog_list.append(Statement(**statement))
return Dialog(dialog_list)
def _output_query_template(self, output, steps, device, command, include,
exclude, max_time, check_interval, continue_, action, reply=None, arguments=None):
keys = _include_exclude_list(include, exclude)
max_time, check_interval = _get_timeout_from_ratios(
device=device, max_time=max_time, check_interval=check_interval)
timeout = Timeout(max_time, check_interval)
for query, style in keys:
# dict that would be sent with various data for inclusion/exclusion check
kwargs = {}
send_cmd = False
# for each query and style
with steps.start("Verify that '{query}' is {style} in the output".\
format(query=query, style=style), continue_=continue_) as substep:
while True:
if send_cmd:
output = _send_command(command, device, action, arguments=arguments, reply=reply)
if action == 'execute':
# validating the inclusion/exclusion of action execute,
pattern = re.compile(str(query))
found = pattern.search(str(output))
kwargs.update({'action_output': found, 'operation':None, 'expected_value': None,
'style':style, 'key':query, 'query_type': 'execute_query'})
else:
# verifying the inclusion/exclusion of actions : learn, parse and api
found = _get_output_from_query_validators(output, query)
kwargs = found
kwargs.update({'style': style, 'key':None})
# Function would return (pass | fail | error)
step_result, message = _verify_include_exclude(**kwargs)
if step_result == Passed:
substep.passed(message)
send_cmd = True
timeout.sleep()
if not timeout.iterate():
break
# failing logic in case of timeout
substep.failed(message)
return output
def _send_command(command, device, action, arguments=None, reply=None):
kwargs = {}
# if api
if action == 'api' :
if not arguments:
arguments = {}
if device.os == 'ixianative':
api_func = device
else:
api_func = device.api
return getattr(api_func, command)(**arguments)
# if learn
elif action == ' learn':
return getattr(device, action)(command).to_dict()
# for everything else, just check if reply should get updated
if reply and action == 'execute':
kwargs.update({'reply':_prompt_handler(reply)})
return getattr(device, action)(command, **kwargs)
def _include_exclude_list(include, exclude):
# create the list of quries that would be checked for include or exclude
keys = []
if include:
for item in include:
keys.append((item, 'included'))
if exclude:
for item in exclude:
keys.append((item, 'excluded'))
return keys
def _get_timeout_from_ratios(device, max_time, check_interval):
max_time_ratio = device.custom.get('max_time_ratio', None)
if max_time and max_time_ratio:
try:
max_time = int(max_time * float(max_time_ratio))
except ValueError:
log.error('The max_time_ratio ({m}) value is not of type float'.format(m=max_time_ratio))
check_interval_ratio = device.custom.get('check_interval_ratio', None)
if check_interval and check_interval_ratio:
try:
check_interval = int(check_interval * float(check_interval_ratio))
except ValueError:
log.error('The check_interval_ratio ({c}) value is not of type float'.format(c=check_interval_ratio))
if max_time and not check_interval:
check_interval = 0.0
return max_time, check_interval
def _verify_include_exclude(action_output, style, query_type,
operation=None, expected_value=None, key=None):
# Checking the inclusion or exclusion and verifies result values
# With regards to different operations for actions ('api','parse', 'learn')
if query_type == 'api_query':
# if a value exist to compare the result
return _verify_string_query_include_exclude(action_output, expected_value, style, operation=operation)
else:
# if results are dictionary and the queries are in dq format ( contains('value'))
return _verify_dq_query_and_execute_include_exclude(action_output, style, key)
def _verify_string_query_include_exclude(action_output, expected_value, style, operation=None):
# the query is in this format : ">= 1200"
# verify the operator and value results for non dq queries (mostly apis)
if not operation:
# default operation based on style
if style == 'included':
operation = '=='
elif style == 'excluded':
operation = '!='
# message template for the case that we are validating the result within a range
msg_if_range = 'The API result "{result}" is "{operation}" the range provided in the trigger datafile'
# message template for other general cases
msg = 'The API result "{result}" is "{operation}" to "{value}" provided in the trigger datafile"'
if _evaluate_operator(result=action_output, operation=operation, value=expected_value):
# Step would be Passed
# The only current exception, when user asks for checking a range
if isinstance(expected_value, range):
return (Passed, msg_if_range.format(result=action_output, operation=operation))
return (Passed, msg.format(result=action_output, value=expected_value, operation=operation))
else:
if isinstance(expected_value, range):
return (Failed, msg_if_range.format(result=action_output, operation = "not "+operation))
return (Failed, msg.format(result=action_output, value=expected_value, operation="not "+operation))
def _verify_dq_query_and_execute_include_exclude(action_output, style, key):
# Validating execute include exclude keys and queries that are following dq formats
# if key is a query of type (contains('status')) then we show the resulted output
# otherwise the key itself usually for execute action
if not key:
key = action_output
message = "'{k}' is {s} in the output"
if style == "included" and action_output:
return (Passed, message.format(k=key, s=style))
elif style =='excluded' and not action_output:
return (Passed, message.format(k=key, s=style))
elif style == "included" and not action_output:
# change the style if it is not included for reporting
return (Failed, message.format(k=key, s= 'not ' + style))
else:
return (Failed, message.format(k=key, s= 'not ' + style))
def _get_output_from_query_validators(output, query):
# the function determines the type of query and returns the appropriate result
ret_dict = {}
# if it is a valid dq query than apply the query and return the output
if Dq.query_validator(query):
output = Dq.str_to_dq_query(output, query)
ret_dict.update({'action_output': output, 'query_type': 'dq_query', 'operation': None, 'expected_value': None})
else:
# check for the string query
output = _string_query_validator(output, query)
action_output = output['output']
operation = output['operation']
value = output['value']
ret_dict.update({'action_output': action_output, 'query_type': 'api_query', 'operation': operation, 'expected_value': value})
return ret_dict
def _string_query_validator(output, query):
# Validating users queries like (>=1220), (!= 100), (>= 100 && <= 200), ( == some string)
# These queries mostly used for verifying api outputs that are digits or string
# if the query does not match the expected pattern raise valueerror
ret_dict = {}
p = re.compile(r'(?P<operation>[>=<!\s]*)(?P<value>[\S\s]+)')
if '&&' in query:
# if range return the value of this function
return _string_query_range_validator(output, query)
# striping the query from spaces
m = p.match(query)
if not m:
# raising error if query is not inputed as per instructions
raise ValueError("The query: '{}' is not entered properly".format(query))
value = m.groupdict()['value'].strip(' ')
operation = m.groupdict()['operation'].replace(' ', '')
# check the type of the action result
output_type = type(output)
try:
# cast the input value to result value
value = output_type(value)
except Exception:
pass
finally:
ret_dict.update({'output': output, 'operation': operation, 'value': value})
return ret_dict
def _string_query_range_validator(output, query):
# validating users queries like (>= 1200 && <=2000)
# list of valid operations in case of a range call we dont want = or !=
list_of_ops = ['>=', '<=', '>', '<']
# list of values that later will be our range
value_list = []
ret_dict = {}
# splitting the range input (>=1220 && <=2000) on '&&'
# making each element of this list as one operation
# range_query_list[0] = ">=1220" and range_query_list[1] = "<=2000"
range_query_list = query.split('&&')
# we need the set to be able to not allow duplicate operators to be in a same query
set_of_input_ops = set()
# for each operation and oprand
for q in range_query_list:
# send it back to _string_query_validator for extracting the value and operator
single_query_dict = _string_query_validator(output, q)
# the lentgh of the spillted query cannot be anything other than 2 because it is a range
# examples of not valid range: >=1220 && ==1300, <1200 && <= 2300, >1200 && <230
# example of a valid: > 1200 && <= 2300, >= 1220 && <= 2330, >1000 && <2000
if not single_query_dict['operation'] \
or single_query_dict['operation'] not in list_of_ops \
or single_query_dict['operation'] in set_of_input_ops \
or len(range_query_list) != 2:
raise Exception('The input is not describing a range')
range_value = int( single_query_dict['value'])
# have to update this set to make sure that users is not using duplicate operations and it always is using range valid ops
if single_query_dict['operation'] == '>' or single_query_dict['operation'] == '>=':
set_of_input_ops.update(['>', '>='])
if single_query_dict['operation'] == '<' or single_query_dict['operation'] == '<=':
set_of_input_ops.update(['<', '<='])
# giving user the freedom of using > or >=
# Using range func, the last value in this function is always excluded.
# The first value is always included. If users input operation is > the first value
# should be excluded and if operation is <= the last value should be included.
# Adding 1 to the value for such instances.
if single_query_dict['operation'] == '>' or single_query_dict['operation'] == '<=':
range_value +=1
value_list.append(range_value)
if int(value_list[1]) <= int(value_list[0]):
raise Exception('This is not describing a range, the end of the '
'interval {} cannot be smaller or equal its start {}'.format(str(value_list[1]), str(value_list[0])))
# create the range object out of the extracted output and return the value
value = range(int(value_list[0]), int(value_list[1]))
operation = 'within'
ret_dict.update({'output': output, 'operation': operation, 'value': value})
return ret_dict
def _evaluate_operator(result, operation=None, value=None):
# used to evaluate the operation results
# if number 6 operations
if isinstance(value, (float, int)) and isinstance(result, (float, int)):
dict_of_ops = {'==': operator.eq, '>=':operator.ge,
'>': operator.gt, '<=':operator.le, '<':operator.le,
'!=':operator.ne}
elif isinstance(result, (float, int)) and isinstance(value, range):
# just check if the first argument is within the second argument,
# changing the operands order of contains function (in)
dict_of_ops = {'within': lambda item, container: item in container}
elif isinstance(result, str) and isinstance(value, str):
# if strings just check equal or not equal or inclusion
dict_of_ops = {'==': operator.eq, '!=':operator.ne, 'in': operator.contains}
else:
# if any other type return error
return False
# if the operation is not valid errors the testcase
if not operation in dict_of_ops.keys():
raise Exception('Operator {} is not supported'.format(operation))
return dict_of_ops[operation](result, value)
def _get_exclude(command, device):
exclude = None
if command:
try:
# Try parser
return get_parser_exclude(command, device)
except Exception:
pass
try:
# Try ops
return get_ops_exclude(command, device)
except Exception:
pass
return []
```
#### File: triggers/blitz/actions.py
```python
import re
import time
import logging
from genie.libs import sdk
from genie.utils.diff import Diff
from genie.harness.standalone import run_genie_sdk
from pyats.async_ import pcall
from pyats.log.utils import banner
from pyats.aetest.steps import Steps
from pyats.results import TestResult, Passed, Failed, Skipped, Passx, Aborted, Errored
from .markup import save_variable
from .yangexec import run_netconf, run_gnmi, notify_wait
from .actions_helper import configure_handler, api_handler, learn_handler,\
parse_handler, execute_handler, _get_exclude
log = logging.getLogger(__name__)
def configure(self, device, steps, command, reply=None, continue_=True):
# default output set to none in case of an exception
output = None
with steps.start("Configuring '{device}'".\
format(device=device.name), continue_=continue_) as step:
output = configure_handler(self, step, device, command, reply)
notify_wait(steps, device)
return output
def parse(self, device, steps, command, include=None,
exclude=None, max_time=None, check_interval=None, continue_=True, *args, **kwargs):
# action parse
output = {}
with steps.start("Parsing '{c}' on '{d}'".\
format(c=command, d=device.name), continue_=continue_) as step:
output = parse_handler(self, step, device, command, include=include, exclude=exclude,
max_time=max_time, check_interval=check_interval, continue_=continue_)
notify_wait(steps, device)
return output
def execute(self, device, steps, command, include=None, exclude=None,
max_time=None, check_interval=None, reply=None, continue_=True):
# action execute
with steps.start("Executing '{c}' on '{d}'".\
format(c=command, d=device.name), continue_=continue_) as step:
output = execute_handler(self, step, device, command, include=include, exclude=exclude,
max_time=max_time, check_interval=check_interval, continue_=continue_, reply=reply)
notify_wait(steps, device)
return output
def api(self, device, steps, function, arguments=None, include=None,
exclude=None, max_time=None, check_interval=None, continue_=True):
# action api
output = None
with steps.start("Calling API '{f}' on '{d}'".\
format(f=function, d=device.name), continue_=continue_) as step:
output = api_handler(self, step, device, function, include=include, exclude=exclude,
max_time=max_time, check_interval=check_interval, continue_=continue_, arguments=arguments)
notify_wait(steps, device)
return output
def learn(self, device, steps, feature, include=None, exclude=None,
max_time=None, check_interval=None, continue_=True):
# action learn
with steps.start("Learning '{f}' on '{d}'".\
format(f=feature, d=device.name), continue_=continue_) as step:
output = learn_handler(self, step, device, feature, include=include, exclude=exclude,
max_time=max_time, check_interval=check_interval, continue_=continue_,)
return output
def sleep(self, steps, sleep_time, continue_=True, *args, **kwargs):
log.info('Sleeping for {s} seconds'.format(s=sleep_time))
time.sleep(float(sleep_time))
def yang(self, device, steps, protocol, datastore, content, operation,
continue_=True, connection=None, returns=None, *args, **kwargs):
if connection:
device = getattr(device, connection)
# Verify that we are connected
# TODO
# I think all our connection implementation (unicon, rest, yang)
# should have an isconnected which does an action on the device, example show
# clock for cli, to verify. Right now, we dont havet his, we have
# connected but we all know its of no use.
if returns is None:
returns = {}
if protocol == 'netconf':
result = run_netconf(operation=operation, device=device, steps=steps,
datastore=datastore, rpc_data=content,
returns=returns, **kwargs)
elif protocol == 'gnmi':
result = run_gnmi(operation=operation, device=device, steps=steps,
datastore=datastore, rpc_data=content,
returns=returns, **kwargs)
if not result:
steps.failed('Yang action has failed')
if operation != 'subscribe':
notify_wait(steps, device)
return result
def configure_replace(self, device, steps, config, continue_=True, iteration=2, interval=30):
restore = sdk.libs.abstracted_libs.restore.Restore(device=device)
# lib.to_url is normally saved via restore.save_configuration()
# but since we only want the os abstraction and we are providing
# a config - Just set the to_url equal to the config path provided
restore.lib.to_url = config
restore.restore_configuration(
device=device,
abstract=None,
method='config_replace',
iteration=iteration,
interval=interval,
delete_after_restore=False,
)
def save_config_snapshot(self, device, steps, continue_=True):
# setup restore object for device
if not hasattr(self, 'restore'):
self.restore = {}
if device not in self.restore:
self.restore[device] = sdk.libs.abstracted_libs.restore.Restore(device=device)
# Get default directory
save_dir = getattr(self.parent, 'default_file_system')
if not save_dir:
self.parent.default_file_system = {}
# learn default directory
if device.name not in save_dir:
self.parent.default_file_system.update({
device.name: self.restore[device].abstract.sdk.libs.abstracted_libs.\
subsection.get_default_dir(device=device)})
self.restore[device].save_configuration(
device=device,
abstract=None,
method='config_replace',
default_dir=self.parent.default_file_system
)
# To keep track of snapshots (whether they are deleted or not)
self.restore[device].snapshot_deleted = False
def restore_config_snapshot(self, device, steps, continue_=True, delete_snapshot=True):
if not hasattr(self, 'restore') or device not in self.restore:
steps.errored("Must use action 'save_config_snapshot' first.\n\n")
# If the snapshot file was deleted - error
if self.restore[device].snapshot_deleted:
steps.errored("If you want to restore with the same snapshot "
"multiple times then you must pass 'delete_snapshot=False' "
"to previous uses of this action. Otherwise the "
"snapshot will be deleted on the first usage.")
try:
self.restore[device].restore_configuration(
device=device,
abstract=None,
method='config_replace',
delete_after_restore=delete_snapshot
)
except Exception as e:
steps.failed(str(e))
# To keep track of snapshots (whether they are deleted or not)
if delete_snapshot:
self.restore[device].snapshot_deleted = True
def bash_console(self, device, steps, commands, continue_=True, **kwargs):
ret_dict = {}
with device.bash_console(**kwargs) as bash:
for command in commands:
output = bash.execute(command, **kwargs)
ret_dict.update({command:output})
return ret_dict
def genie_sdk(self, steps, continue_=True, **kwargs):
# This is to remove the uut dependency of genie standalone.
# Since the device we are running the sdk on is in the
# kwargs we just pass the first device found as the 'uut'
uut = 'uut'
for _, params in kwargs.items():
uut = params.get('devices', ['uut'])[0]
break
sdks = list(kwargs.keys())
run_genie_sdk(self, steps, sdks, uut=uut, parameters=kwargs)
def print(self, steps, continue_=True, *args, **kwargs):
if 'steps' in kwargs:
kwargs.pop('steps')
for key, value in kwargs.items():
if value.get('type') == 'banner':
print_value = 'printing message: {k}\n{v}'.format(k=key,v=banner(value['value']))
else:
print_value = 'The value of {k}: {v}'.format(k=key,v=value['value'])
log.info(print_value)
def diff(self, steps, device, pre, post, continue_=True, fail_different=False,
command=None, exclude=None):
with steps.start("Perform Diff for '{device}'".format(device=device.name),
continue_=continue_) as step:
exclude_items = _get_exclude(command, device)
if exclude and isinstance(exclude, list):
exclude_items.extend(exclude)
try:
diff = Diff(pre, post, exclude=exclude_items)
except Exception as e:
step.failed(str(e))
diff.findDiff()
if diff:
log.info(diff)
if fail_different:
step.failed('{pre} and {post} are not '
'identical'.format(pre=pre, post=post))
actions = {'configure': configure,
'parse': parse,
'execute': execute,
'api': api,
'tgn': api,
'sleep': sleep,
'yang': yang,
'learn': learn,
'print': print,
'configure_replace': configure_replace,
'save_config_snapshot': save_config_snapshot,
'restore_config_snapshot': restore_config_snapshot,
'run_genie_sdk': genie_sdk,
'diff': diff,
'bash_console': bash_console}
def action_parallel(self, steps, testbed, section, data):
# When called run all the actions
# below the keyword parallel concurently
pcall_payloads = []
with steps.start('Executing actions in parallel', continue_=True) as steps:
for action_item in data:
for action, action_kwargs in action_item.items():
# for future use - Enhancement needed in pyATS
# with steps.start("Implementing action '{a}' in parallel".format(a=actions)) as step:
# on parallel it is not possible to set continue to False and benefit from that feature
step = Steps()
kwargs = {'steps': step, 'testbed': testbed, 'section': section, 'data': [{action:action_kwargs}]}
pcall_payloads.append(kwargs)
pcall_returns = pcall(self.dispatcher, ikwargs=pcall_payloads)
# Each action return is a dictionary containing the action name, possible saved_variable
# Action results, and device name that action is being implemented on
# These value would be lost when the child processor that executes the action end the process.
# It is being implemented this way in order to add these values to the main processor.
for each_return in pcall_returns:
if each_return.get('saved_vars'):
for saved_var_name, saved_var_data in each_return.get('saved_vars').items():
if each_return.get('filters'):
log.info('Applied filter: {} to the action {} output'.format(each_return['filters'], action))
save_variable(self, saved_var_data, saved_var_name)
if each_return['device']:
msg = 'Executed action {action} on {device} in parallel'.format(
action=each_return['action'], device=each_return['device'])
else:
msg = 'Executed action {action} in parallel'.format(
action=each_return['action'])
with steps.start(msg, continue_=True, description=each_return['description']) as report_step:
log.info('Check above for detailed action report')
getattr(report_step, str(each_return['step_result']))()
```
|
{
"source": "jeauger/qrocodile",
"score": 2
}
|
#### File: jeauger/qrocodile/qrgen.py
```python
import argparse
import hashlib
import json
import os.path
import shutil
import spotipy
import spotipy.util as util
import subprocess
import sys
import urllib
import urllib2
# Build a map of the known commands
# TODO: Might be better to specify these in the input file to allow for more customization
# (instead of hardcoding names/images here)
commands = {
'cmd:playpause': ('Play / Pause', 'https://raw.githubusercontent.com/google/material-design-icons/master/av/drawable-xxxhdpi/ic_pause_circle_outline_black_48dp.png'),
'cmd:next': ('Skip to Next Song', 'https://raw.githubusercontent.com/google/material-design-icons/master/av/drawable-xxxhdpi/ic_skip_next_black_48dp.png'),
'cmd:turntable': ('Turntable', 'http://moziru.com/images/record-player-clipart-vector-3.jpg'),
'cmd:bureau': ('Bureau', 'http://icons.iconarchive.com/icons/icons8/ios7/512/Household-Livingroom-icon.png'),
'cmd:songonly': ('Play the Song Only', 'https://raw.githubusercontent.com/google/material-design-icons/master/image/drawable-xxxhdpi/ic_audiotrack_black_48dp.png'),
'cmd:wholealbum': ('Play the Whole Album', 'https://raw.githubusercontent.com/google/material-design-icons/master/av/drawable-xxxhdpi/ic_album_black_48dp.png'),
'cmd:buildqueue': ('Build List of Songs', 'https://raw.githubusercontent.com/google/material-design-icons/master/av/drawable-xxxhdpi/ic_playlist_add_black_48dp.png'),
'cmd:whatsong': ('What\'s Playing?', 'https://raw.githubusercontent.com/google/material-design-icons/master/action/drawable-xxxhdpi/ic_help_outline_black_48dp.png'),
'cmd:whatnext': ('What\'s Next?', 'https://raw.githubusercontent.com/google/material-design-icons/master/action/drawable-xxxhdpi/ic_help_outline_black_48dp.png')
}
# Parse the command line arguments
arg_parser = argparse.ArgumentParser(description='Generates an HTML page containing cards with embedded QR codes that can be interpreted by `qrplay`.')
arg_parser.add_argument('--input', help='the file containing the list of commands and songs to generate')
arg_parser.add_argument('--generate-images', action='store_true', help='generate an individual PNG image for each card')
arg_parser.add_argument('--list-library', action='store_true', help='list all available library tracks')
arg_parser.add_argument('--hostname', default='localhost', help='the hostname or IP address of the machine running `node-sonos-http-api`')
arg_parser.add_argument('--spotify-username', help='the username used to set up Spotify access (only needed if you want to generate cards for Spotify tracks)')
args = arg_parser.parse_args()
print args
base_url = 'http://' + args.hostname + ':5005'
if args.spotify_username:
# Set up Spotify access (comment this out if you don't want to generate cards for Spotify tracks)
scope = 'user-library-read'
token = util.prompt_for_user_token(args.spotify_username, scope)
if token:
sp = spotipy.Spotify(auth=token)
else:
raise ValueError('Can\'t get Spotify token for ' + username)
else:
# No Spotify
sp = None
def perform_request(url):
print(url)
response = urllib2.urlopen(url)
result = response.read()
return result
def list_library_tracks():
result_json = perform_request(base_url + '/musicsearch/library/listall')
tracks = json.loads(result_json)['tracks']
for t in tracks:
print(t.encode('utf-8'))
# Removes extra junk from titles, e.g:
# (Original Motion Picture Soundtrack)
# - From <Movie>
# (Remastered & Expanded Edition)
def strip_title_junk(title):
junk = [' (Original', ' - From', ' (Remaster', ' [Remaster']
for j in junk:
index = title.find(j)
if index >= 0:
return title[:index]
return title
def process_command(uri, index):
(cmdname, arturl) = commands[uri]
# Determine the output image file names
qrout = 'out/{0}qr.png'.format(index)
artout = 'out/{0}art.jpg'.format(index)
# Create a QR code from the command URI
print subprocess.check_output(['qrencode', '-o', qrout, uri])
# Fetch the artwork and save to the output directory
print subprocess.check_output(['curl', arturl, '-o', artout])
return (cmdname, None, None)
def process_spotify_track(uri, index):
if not sp:
raise ValueError('Must configure Spotify API access first using `--spotify-username`')
track = sp.track(uri)
print track
# print 'track : ' + track['name']
# print 'artist : ' + track['artists'][0]['name']
# print 'album : ' + track['album']['name']
# print 'cover art: ' + track['album']['images'][0]['url']
song = strip_title_junk(track['name'])
artist = strip_title_junk(track['artists'][0]['name'])
album = strip_title_junk(track['album']['name'])
arturl = track['album']['images'][0]['url']
# Determine the output image file names
qrout = 'out/{0}qr.png'.format(index)
artout = 'out/{0}art.jpg'.format(index)
# Create a QR code from the track URI
print subprocess.check_output(['qrencode', '-o', qrout, uri])
# Fetch the artwork and save to the output directory
print subprocess.check_output(['curl', arturl, '-o', artout])
return (song.encode('utf-8'), album.encode('utf-8'), artist.encode('utf-8'))
def process_library_track(uri, index):
track_json = perform_request(base_url + '/musicsearch/library/metadata/' + uri)
track = json.loads(track_json)
print(track)
song = strip_title_junk(track['trackName'])
artist = strip_title_junk(track['artistName'])
album = strip_title_junk(track['albumName'])
arturl = track['artworkUrl']
# XXX: Sonos strips the "The" prefix for bands that start with "The" (it appears to do this
# only in listing contexts; when querying the current/next queue track it still includes
# the "The"). As a dumb hack (to preserve the "The") we can look at the raw URI for the
# track (this assumes an iTunes-style directory structure), parse out the artist directory
# name and see if it starts with "The".
from urlparse import urlparse
uri_parts = urlparse(track['uri'])
uri_path = uri_parts.path
print(uri_path)
(uri_path, song_part) = os.path.split(uri_path)
(uri_path, album_part) = os.path.split(uri_path)
(uri_path, artist_part) = os.path.split(uri_path)
if artist_part.startswith('The%20'):
artist = 'The ' + artist
# Determine the output image file names
qrout = 'out/{0}qr.png'.format(index)
artout = 'out/{0}art.jpg'.format(index)
# Create a QR code from the track URI
print subprocess.check_output(['qrencode', '-o', qrout, uri])
# Fetch the artwork and save to the output directory
print subprocess.check_output(['curl', arturl, '-o', artout])
return (song.encode('utf-8'), album.encode('utf-8'), artist.encode('utf-8'))
# Return the HTML content for a single card.
def card_content_html(index, artist, album, song):
qrimg = '{0}qr.png'.format(index)
artimg = '{0}art.jpg'.format(index)
html = ''
html += ' <img src="{0}" class="art"/>\n'.format(artimg)
html += ' <img src="{0}" class="qrcode"/>\n'.format(qrimg)
html += ' <div class="labels">\n'
html += ' <p class="song">{0}</p>\n'.format(song)
if artist:
html += ' <p class="artist"><span class="small">by</span> {0}</p>\n'.format(artist)
if album:
html += ' <p class="album"><span class="small">from</span> {0}</p>\n'.format(album)
html += ' </div>\n'
return html
# Generate a PNG version of an individual card (with no dashed lines).
def generate_individual_card_image(index, artist, album, song):
# First generate an HTML file containing the individual card
html = ''
html += '<html>\n'
html += '<head>\n'
html += ' <link rel="stylesheet" href="cards.css">\n'
html += '</head>\n'
html += '<body>\n'
html += '<div class="singlecard">\n'
html += card_content_html(index, artist, album, song)
html += '</div>\n'
html += '</body>\n'
html += '</html>\n'
html_filename = 'out/{0}.html'.format(index)
with open(html_filename, 'w') as f:
f.write(html)
# Then convert the HTML to a PNG image (beware the hardcoded values; these need to align
# with the dimensions in `cards.css`)
png_filename = 'out/{0}'.format(index)
print subprocess.check_output(['webkit2png', html_filename, '--scale=1.0', '--clipped', '--clipwidth=720', '--clipheight=640', '-o', png_filename])
# Rename the file to remove the extra `-clipped` suffix that `webkit2png` includes by default
os.rename(png_filename + '-clipped.png', png_filename + 'card.png')
def generate_cards():
# Create the output directory
dirname = os.getcwd()
outdir = os.path.join(dirname, 'out')
print(outdir)
if os.path.exists(outdir):
shutil.rmtree(outdir)
os.mkdir(outdir)
# Read the file containing the list of commands and songs to generate
with open(args.input) as f:
lines = f.readlines()
# The index of the current item being processed
index = 0
# Copy the CSS file into the output directory. (Note the use of 'page-break-inside: avoid'
# in `cards.css`; this prevents the card divs from being spread across multiple pages
# when printed.)
shutil.copyfile('cards.css', 'out/cards.css')
# Begin the HTML template
html = '''
<html>
<head>
<link rel="stylesheet" href="cards.css">
</head>
<body>
'''
for line in lines:
# Trim newline
line = line.strip()
# Remove any trailing comments and newline (and ignore any empty or comment-only lines)
line = line.split('#')[0]
line = line.strip()
if not line:
continue
if line.startswith('cmd:'):
(song, album, artist) = process_command(line, index)
elif line.startswith('spotify:'):
(song, album, artist) = process_spotify_track(line, index)
elif line.startswith('lib:'):
(song, album, artist) = process_library_track(line, index)
else:
print('Failed to handle URI: ' + line)
exit(1)
# Append the HTML for this card
html += '<div class="card">\n'
html += card_content_html(index, artist, album, song)
html += '</div>\n'
if args.generate_images:
# Also generate an individual PNG for the card
generate_individual_card_image(index, artist, album, song)
if index % 2 == 1:
html += '<br style="clear: both;"/>\n'
index += 1
html += '</body>\n'
html += '</html>\n'
print(html)
with open('out/index.html', 'w') as f:
f.write(html)
if args.input:
generate_cards()
elif args.list_library:
list_library_tracks()
```
|
{
"source": "JeaustinSirias/Backend-Test-Sirias",
"score": 2
}
|
#### File: Backend-Test-Sirias/application/views.py
```python
from django.shortcuts import render, redirect
from .models import menu, lunch
from .forms import menuForm, lunchForm
from django.conf import settings
from django.contrib.auth.decorators import user_passes_test
from django.contrib.auth import login, authenticate, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.utils.timezone import localtime, localdate
from .tasks import slack_advertisement
from .utils import sudo_check
@login_required
@user_passes_test(sudo_check)
def create_menu(request):
'''A view that renders a form to create a menu
for a specific date.
:param request: the request callout object
:return: the rendered HTML menu form
'''
new_form = menuForm()
if request.method == 'POST':
form = menuForm(request.POST)
if form.is_valid():
date = form.cleaned_data.get('date')
if menu.objects.filter(date=date).exists():
note = 'This date is already in use'
return render(
request,
'createMenu.html',
{
'menuform':form,
'note':note
}
)
form.save()
if date == localdate():
item = menu.objects.filter(date=date)
UUID = item[0].uuid
slack_advertisement(UUID)
note = 'A new menu has been created'
return render(
request,
'createMenu.html',
{
'menuform': None,
'note': note,
}
)
else:
note = 'Date cannot be in the past'
return render(
request,
'createMenu.html',
{
'menuform': form,
'note': note,
}
)
else:
return render(
request,
'createMenu.html',
{
'menuform' : new_form,
}
)
@login_required
@user_passes_test(sudo_check)
def main_page(request):
'''The admin's (Nora) mainpage view.
:param request: the request callout object
:return: the homepage rendered HTML temp.
'''
date = localtime()
return render(
request,
'index.html',
{
'date': date,
}
)
@login_required
def request_lunch(request):
'''A view to let employees to order
their today's preferred meal and customize it.
The form is available if Nora (or any other admin)
already has filled 'today's' menu and if the page
is visited before 11 AM CLT.
:param request: the request object callout
:return: the menu rendered HTML form
'''
# Call today's date as reference
date = localdate()
# Check if today's menu is available
item = menu.objects.filter(date=date)
if item.exists() and localtime().hour < settings.LIMIT_HOUR:
#Check if employee already requested once a day
instance = lunch.objects.filter(
user=request.user,
date=date,
)
if instance.exists():
return render(
request,
'requestMenu.html',
{
'note': 'We\'re preparing your meal!'
}
)
user = lunch(user=request.user)
new_form = lunchForm()
if request.method == 'POST':
form = lunchForm(request.POST, instance=user)
if form.is_valid():
form.save()
note = (
'Your today\'s meal has been saved!'
)
else:
note = 'So what are you gonna eat?'
return render(
request,
'requestMenu.html',
{
'requestMealForm': new_form,
'note': note,
'todays_menu': item,
}
)
else:
note = 'Today\'s menu is not available'
return render(
request,
'requestMenu.html',
{
'requestMealForm': None,
'note': note,
}
)
@login_required
@user_passes_test(sudo_check)
def list_menu(request):
'''An iterative method to show all the listed
menus in the database.
:param request: the request call object
:return: the menu rendered HTML list
'''
menu_dict = {}
for item in menu.objects.all():
menu_dict[item.pk] = {
'date': item.date,
}
return render(
request,
'listMenu.html',
{
'menu_list': menu_dict,
}
)
@login_required
@user_passes_test(sudo_check)
def edit_menu(request, id):
'''A method to update or modify a menu.
:param request: the request call object
:param id: the primary key of the menu to modify
:return: the rendered menu HTML update form
'''
# Fill the form with current data
note = ''
pk = menu.objects.get(id=id)
edit_form = menuForm(instance=pk)
if request.method == 'POST':
form = menuForm(request.POST, instance=pk)
if form.is_valid():
note = 'The menu has been updated'
form.save()
edit_form = form
return render(
request,
'editMenu.html',
{
'form': None,
'note': note,
}
)
else:
note = 'Date cannot be in the past'
return render(
request,
'editMenu.html',
{
'form': form,
'note': note,
}
)
return render(
request,
'editMenu.html',
{
'form': edit_form,
'note': note,
}
)
def delete_menu(request, id):
'''A method for superusers to delete their
linked menus in case they won't
need them.
:param request: the request object callout
:param id: the object's primary key
:return: redirects
'''
form = menu.objects.get(id=id)
form.delete()
return redirect(to='list')
@login_required
@user_passes_test(sudo_check)
def list_requests(request):
'''A view that renders the list of requested
lunches for 'today'.
:param request: the request callout object
:return: the rendered HTML list of requests
'''
orders = {}
instant = localdate()
for item in lunch.objects.filter(date=instant):
orders[item.pk] = {
'option': item.option,
'user': item.user.username,
}
return render(
request,
'listRequests.html',
{
'requests_list': orders,
'date': instant,
}
)
def check_details(request, id):
'''A view dedicated to show the details
of the requested lunch for a specific employee.
:param request: the request callout object
:param id: the id of the luch
:return: the rendered HTML list of details
'''
item = lunch.objects.filter(id=id)
return render(
request,
'checkDetails.html',
{
'lunch': item,
'pk': id,
}
)
def show_menu(request, uuid):
'''The main view for employees
:param request: the request object callout
:param uuid: the UUID key for 'today'
:return: the rendered HTML menu
'''
date = localdate()
Menu = menu.objects.filter(date=date)
return render(
request,
'showMenu.html',
{
'lunch': Menu,
}
)
```
|
{
"source": "jeavila/IS310-1-2021",
"score": 4
}
|
#### File: IS310-1-2021/01_intro/func.py
```python
def saludo(persona, saludo_final="que tal"):
print('Hola', persona, saludo_final)
saludo("Francia", "como has estado")
saludo("Emilio")
def sumar(a:int, b:int):
return a + b
def sumar2(valores:list):
suma = 0
for item in valores:
suma += item
return suma
# args -> list, kwargs -> dict
def sumar3(*args):
suma = 0
for item in args:
suma += item
return suma
print('Sumar2=',sumar2([1, 2, 3]))
print('Sumar3=',sumar3(1))
print('Sumar3=',sumar3(1, 2))
print('Sumar3=',sumar3(1, 2, 3))
print('Type sumar3:', type(sumar3))
x = sumar3
s = x(4, 5, 6)
print(s)
def check_tipo(x):
print(type(x))
check_tipo(3)
check_tipo("Hola")
check_tipo(True)
check_tipo(sumar3)
```
#### File: IS310-1-2021/02_oop/credit_card.py
```python
class CreditCard:
# Atributo de clase
TASA_SOBREGIRO = 0.05
# No definimos directamente los atributos.
# Los constructores se definen de la siguiente manera (INICIALIZADOR)
def __init__(self):
#Atributos de clase
self.fecha_emision = None
self.fecha_vencimiento = None
self.__cvv = '1234'
self.numero_cuenta = None
self.cliente = None
# Si es Visa, MasterCard, AMEX, etc.
self.emisor = None
self.banco = None
self.__limite = 10000
self.__saldo = 0.0
# Variable local
monto = 0
def get_cvv(self):
return self.__cvv
def set_saldo(self, saldo:float) -> None:
if (self.__saldo + saldo <= self.__limite):
self.__saldo += saldo
def get_saldo(self) -> float:
return self.__saldo
def __str__(self):
return "Banco: " + self.banco + "\nEmisor: " + self.emisor + "\nCliente: " + self.cliente
# Creacion de un objeto
cc = CreditCard()
cc.banco = 'BAC'
cc.emisor = 'Visa'
cc.cliente = '<NAME>'
print(cc.get_cvv())
cc.set_saldo(2500)
print(cc.get_saldo())
cc.set_saldo(66000)
print(cc.get_saldo())
# Aqui llama al metodo __str__
print(cc)
# Impresion del diccionario con los atributos del objeto
print(cc.__dict__)
# Manipulacion del atributo de clase (estatico)
print('Tasa antes: ', CreditCard.TASA_SOBREGIRO)
CreditCard.TASA_SOBREGIRO = 0.07
print('Tasa despues: ', CreditCard.TASA_SOBREGIRO)
print(cc.TASA_SOBREGIRO)
cc1 = CreditCard()
print(cc1.TASA_SOBREGIRO)
```
#### File: IS310-1-2021/02_oop/slots.py
```python
class Alumno:
__slots__ = ['cuenta', 'nombre', 'carrera']
def __init__(self, cuenta:str, nombre:str, carrera:str) -> None:
self.cuenta = cuenta
self.nombre = nombre
self.carrera = carrera
def __str__(self):
# Uso de interpolacion de cadenas
return f"Cuenta: {self.cuenta} Nombre: {self.cuenta}\nCarrera: {self.carrera}"
if __name__ == '__main__':
a001 = Alumno('20201001234', '<NAME>', 'Sociologia')
print(a001)
#a001.nota_final = 100
print(a001.__dict__)
a002 = Alumno('20201001235', '<NAME>', 'Filosofia')
print(a002.__dict__)
```
#### File: IS310-1-2021/03_analisis/linear_search.py
```python
import time
def linear_search(coleccion, valor):
posicion = -1
indice = 0
for x in coleccion:
if x == valor:
posicion = indice
break
indice += 1
return posicion
n = 1000000
l1 = [2, 4, 6, 8, 3, 9, 5, 7]
l2 = [0] * n
l3 = l2 + l1
# Medicion de tiempo
t_inicio = time.time()
a = linear_search(l3, 3)
t_final = time.time()
dif = t_final - t_inicio
print('Tiempo= ', dif)
'''
Tiempo= 0.11121511459350586
Tiempo= 0.07894706726074219
Tiempo= 0.09076690673828125
Tiempo= 0.09628438949584961
'''
a = 1
a = 100000000
```
#### File: IS310-1-2021/06_stack/ejemplo.py
```python
def a():
b()
print('A')
def b():
c()
print('B')
def c():
print('C')
a()
```
|
{
"source": "JEB12345/SB2_python_scripts",
"score": 3
}
|
#### File: JEB12345/SB2_python_scripts/DetectCurrentFace.py
```python
def DetectCurrentFace( hebi, Group ):
import scipy.io as scio
import sys
import numpy as np
### This was used for testing purposes only
# import hebi # for the Hebi motors
# from time import sleep
#
# # Need to look into XML formatting for Hebi Gains
# # sio.loadmat('defaultGains.mat')
#
# lookup = hebi.Lookup() # Get table of all Hebi motors
# sleep(2) # gives the Lookup process time to discover modules
#
# # Displays the Hebi modules found on the network
# print('Modules found on the network:')
#
# for entry in lookup.entrylist:
# print('{0} | {1}'.format(entry.family, entry.name))
#
# # print('\n')
#
# var = raw_input('Were any modules found? [y/N]: \n')
# if var == 'y':
# print('\nYay!\n')
# elif var == 'Y':
# print('\nYay!\n')
# else:
# print('\nNONE FOUND!\n')
# sys.exit()
#
# Group = lookup.get_group_from_family('*')
# infoTable = Group.request_info()
### This was used for testing purposes only
trainingData = scio.loadmat('IMUTrainingRutgers.mat') # training data gathered from MATLAB
labels = np.float(trainingData['labs'][0][0][0])
for i in range(1,len(trainingData['labs'])):
labels = np.append(labels,np.float(trainingData['labs'][i][0][0]))
# Create KNN model
from sklearn.neighbors import KNeighborsRegressor
knn = KNeighborsRegressor(n_neighbors=10)
# Fit the model
knn.fit(trainingData['trainingData'], labels)
fbk = hebi.GroupFeedback(Group.size)
Group.feedback_frequency = 200.0
fbk = Group.get_next_feedback(reuse_fbk=fbk)
# if(fbk.size != trainingData['nbMotors'][0][0]):
# print('Something is wrong with the number of connected motors!')
# return 0
accel = fbk.accelerometer.reshape(1,-1)
[d, n] = knn.kneighbors(accel, 10) # give the lines which most closely match in variable "n"
predicted_lines = np.asanyarray(labels[n[0]], dtype=int) # obtains the label values which were predicted in "n"
counts = np.bincount(predicted_lines) # counts each instance of face numbers
face = np.argmax(counts) # finds the face with the highest number of instances [THIS IS OUR PREDICTION]
return face
```
|
{
"source": "jeb2260/Jeopardy",
"score": 3
}
|
#### File: jeb2260/Jeopardy/j-archive-parser.py
```python
from __future__ import print_function
from bs4 import BeautifulSoup
import time
import lxml
import sys
import os
import re
import csv
import progressbar
import concurrent.futures as futures
from string import punctuation
# Break up CSVs into seasons
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
SITE_FOLDER = os.path.join(CURRENT_DIR, "j-archive archive")
SAVE_FOLDER = os.path.join(CURRENT_DIR, "j-archive-csv")
NUM_THREADS = 2
try:
import multiprocessing
NUM_THREADS = multiprocessing.cpu_count() * 2
print('Using {} threads'.format(NUM_THREADS))
except (ImportError, NotImplementedError):
pass
def main():
create_save_folder()
get_all_seasons()
#Create a folder, if there isn't already one, to save season csv's in
def create_save_folder():
if not os.path.isdir(SAVE_FOLDER):
print("Creating {} folder".format(SAVE_FOLDER))
os.mkdir(SAVE_FOLDER)
#Get a list of all seasons from the list season page. Then iterate through list, parsing
#each season (using multithreading to have, typically, four seasons being parsed at once.)
def get_all_seasons():
seasons = sorted([int(re.search(r'(\d+)', d).group(1)) for d in os.listdir(SITE_FOLDER) if os.path.isdir(os.path.join(SITE_FOLDER, d))])
with futures.ThreadPoolExecutor(max_workers=NUM_THREADS) as executor:
for season in seasons:
f = executor.submit(parse_season, season)
def parse_season(season):
print('Starting season {}'.format(season))
season_folder = os.path.join(SITE_FOLDER, 'season {}'.format(season))
#this list comprehension doesn't preseve episode order so I'll sort it manually
files = [os.path.join(season_folder, f) for f in os.listdir(season_folder) if os.path.isfile(os.path.join(season_folder, f))]
files.sort()
#Get rid of .DS_store file (maybe find better way to do it)
del files[0]
#Name and set up path for csv file in created folder using the name/number of season
saveFile = os.path.join(SAVE_FOLDER, 'j-archive-season-{}.csv'.format(season))
#Create csv file in write mode with utf-8 encoding
with open(saveFile,'w+',newline='',encoding='utf-8') as csvfile:
#Set up csv writer
episodeWriter = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
#Write titles to csv file
episodeWriter.writerow(['epNum', 'airDate', 'extra_info', 'round_name', 'coord', 'category', 'order', 'value', 'daily_double', 'question', 'answer', 'correctAttempts', 'wrongAttempts', 'leftResult', 'middleResult', 'rightResult'])
#144 episodes in Season 36, this prints 144 so it is right
#BUT it's not parsing those 144, only 26 of them before stopping
print(len(files))
for file_i in range(len(files)):
print('\rSeason {}: Parsing episode {}/{}'.format(season,file_i,len(files)), flush=True)
ep = parse_episode(files[file_i])
if ep:
ep = [[[clueElement for clueElement in clue] for clue in round] for round in ep]
for round in ep:
for question in round:
episodeWriter.writerow(question)
print('Season {} complete'.format(season))
def parse_episode(episodeLink):
#Get episode page
episode = open(episodeLink, encoding="utf-8")
soupEpisode = BeautifulSoup(episode, 'lxml')
episode.close()
#Get all names of contestants (contestants are named by their first names when right/wrong)
contestants = [cat.text for cat in soupEpisode.find_all('p', class_='contestants')]
firstNames = [x.split()[0] for x in contestants]
rightContestant = firstNames[0].lower()
middleContestant = firstNames[1].lower()
leftContestant = firstNames[2].lower()
#Get episode number (different from ID) from page title
epNum = re.search(r'#(\d+)', soupEpisode.title.text).group(1)
#Get extra info about episode from top of page
extraInfo = soupEpisode.find('div', id='game_comments').text
#Check for special season names (Super Jeopardy, Trebek Pilots, anything non-number)
sj = re.compile(r'(Super Jeopardy!) show #(\d+)')
if sj.search(soupEpisode.title.text):
epNum = ' '.join(sj.search(soupEpisode.title.text).groups())
trbk = re.compile(r'(Trebek pilot) #(\d+)')
if trbk.search(soupEpisode.title.text):
epNum = ' '.join(trbk.search(soupEpisode.title.text).groups())
#Get episode air date from page title (format YYYY-MM-DD)
airDate = re.search(r'[0-9]{4}-[0-9]{2}-[0-9]{2}', soupEpisode.title.text).group()
#Booleans to check if page has each round type (TB = TieBreak)
hasRoundJ = True if soupEpisode.find(id='jeopardy_round') else False
hasRoundDJ = True if soupEpisode.find(id='double_jeopardy_round') else False
hasRoundFJ = True if soupEpisode.find(id='final_jeopardy_round') else False
hasRoundTB = True if len(soupEpisode.find_all(class_='final_round')) > 1 else False
#List of rounds that we have parsed
parsedRounds = []
#For each round type, if exists in page, parse
if hasRoundJ:
j_table = soupEpisode.find(id='jeopardy_round')
#Pass epNum and airDate to so all info can be added into array as a question at once
parsedRounds.append(parse_round(0, j_table, epNum, airDate, extraInfo, leftContestant, middleContestant, rightContestant))
if hasRoundDJ:
dj_table = soupEpisode.find(id='double_jeopardy_round')
#Pass epNum and airDate to so all info can be added into array as a question at once
parsedRounds.append(parse_round(1, dj_table, epNum, airDate, extraInfo, leftContestant, middleContestant, rightContestant))
if hasRoundFJ:
fj_table = soupEpisode.find(id='final_jeopardy_round').find_all(class_='final_round')[0]
#Pass epNum and airDate to so all info can be added into array as a question at once
parsedRounds.append(parse_round(2, fj_table, epNum, airDate, extraInfo, leftContestant, middleContestant, rightContestant))
if hasRoundTB:
tb_table = soupEpisode.find(id='final_jeopardy_round').find_all(class_='final_round')[1]
parsedRounds.append(parse_round(3, tb_table, epNum, airDate, extraInfo, leftContestant, middleContestant, rightContestant))
#Some episodes have pages, but don't have any actual episode content in them
if parsedRounds:
return parsedRounds
else:
return None
#Parse a single round layout (Jeopardy, Double Jeopardy, Final Jeopardy)
#Final is different than regular and double. Only has a single clue, and has multiple responses and bets.
def parse_round(round, table, epNum, airDate, extraInfo, leftContestant, middleContestant, rightContestant):
roundClues = []
if round < 2:
#Get list of category names
categories = [cat.text for cat in table.find_all('td', class_='category_name')]
#Variable for tracking which column (category) currently getting clues from
x = 0
for clue in table.find_all('td', class_='clue'):
#Checks if clue exists
exists = True if clue.text.strip() else False
if exists:
#Clue text <td> has id attribute in the format clue_round_x_y, one indexed
#Extract coordinates from id text
coord = tuple([int(x) for x in re.search(r'(\d)_(\d)', clue.find('td', class_='clue_text').get('id')).groups()])
valueRaw = clue.find('td', class_=re.compile('clue_value')).text
#Strip down value text to just have number (daily doubles have DD:)
try:
value = (int(valueRaw.lstrip('D: $').replace(',','')),)
except:
value = (-100,)
question = clue.find('td', class_='clue_text').text
#Answers to questions (both right and wrong) are in hover, each with a class to specify color
answer = BeautifulSoup(clue.find('div', onmouseover=True).get('onmouseover'), 'lxml').find('em', class_='correct_response').text
daily_double = True if re.match(r'DD:', valueRaw) else False
#Let's see who buzzed in and got the right answer (can't use .text because someone may not have gotten it right!)
right = BeautifulSoup(clue.find('div', onmouseover=True).get('onmouseover'), 'lxml').find('td', class_='right')
#There may be more than one wrong answer!
wrong = BeautifulSoup(clue.find('div', onmouseover=True).get('onmouseover'), 'lxml').find_all('td', class_='wrong')
#Let's keep track of who got this question right/wrong
#+1 means contestant got it right, 0 if they didn't buzz in, -1 if they buzzed in and got it wrong
leftResult = 0
middleResult = 0
rightResult = 0
if right != None:
contestant = (right.text).strip(punctuation).lower()
if contestant==leftContestant:
leftResult = 1
if contestant==middleContestant:
middleResult = 1
if contestant==rightContestant:
rightResult = 1
#Check if "Triple Stumper" appears (makes the wrong attempts calculation incorrect)
tripleStumper = 0
for w in wrong:
contestant = (w.text).strip(punctuation).lower()
if contestant==leftContestant:
leftResult = -1
if contestant==middleContestant:
middleResult = -1
if contestant==rightContestant:
rightResult = -1
if contestant=="triple stumper":
tripleStumper=1
wrongAttempts = len(wrong) if tripleStumper==0 else (len(wrong)-1)
#Some odd situations with more than one correct response (?) ---- Maybe address????
correctAttempts = 0 if right==None else 1
order = clue.find('td', class_='clue_order_number').text
category = categories[x]
round_name = 'Jeopardy' if round == 0 else 'Double Jeopardy'
#SUGGESTION: maybe add who got the question right?
#Add all retrieved data onto array
roundClues.append([epNum, airDate, extraInfo, round_name, coord, category, order, value, daily_double, question, answer, correctAttempts, wrongAttempts, leftResult, middleResult, rightResult])
#Tracking current column
x = 0 if x == 5 else x + 1
elif round == 2:
#Final Jeopardy
coord = (1,1)
rawValue = [x.text for x in BeautifulSoup(table.find('div', onmouseover=True).get('onmouseover'), 'lxml').find_all(lambda tag: tag.name == 'td' and not tag.attrs)]
value = tuple([int(v.lstrip('D: $').replace(',','')) for v in rawValue])
question = table.find('td', id='clue_FJ').text
answer = BeautifulSoup(table.find('div', onmouseover=True).get('onmouseover'), 'lxml').find('em').text
daily_double = False
#This time it must be find_all!!!! More than one person can get Final Jeopardy right
right = BeautifulSoup(table.find('div', onmouseover=True).get('onmouseover'), 'lxml').find_all('td', class_='right')
#There may be more than one wrong answer!
wrong = BeautifulSoup(table.find('div', onmouseover=True).get('onmouseover'), 'lxml').find_all('td', class_='wrong')
#Let's keep track of who got this question right/wrong
#+1 means contestant got it right, 0 if they didn't buzz in, -1 if they buzzed in and got it wrong
leftResult = 0
middleResult = 0
rightResult = 0
for r in right:
contestant = (r.text).strip(punctuation).lower()
if contestant==leftContestant:
leftResult = 1
if contestant==middleContestant:
middleResult = 1
if contestant==rightContestant:
rightResult = 1
for w in wrong:
contestant = (w.text).strip(punctuation).lower()
if contestant==leftContestant:
leftResult = -1
if contestant==middleContestant:
middleResult = -1
if contestant==rightContestant:
rightResult = -1
if contestant=="Triple Stumper":
tripleStumper=1
wrongAttempts = len(wrong)
correctAttempts = 0 if right==None else 1
order = 0
category = table.find('td', class_='category_name').text
round_name = 'Final Jeopardy'
roundClues.append([epNum, airDate, extraInfo, round_name, coord, category, order, value, daily_double, question, answer, correctAttempts, wrongAttempts, leftResult, middleResult, rightResult])
else:
#Tiebreaker round
coord = (1,1)
value = ()
question = table.find('td', id='clue_TB').text
answer = BeautifulSoup(table.find('div', onmouseover=True).get('onmouseover'), 'lxml').find('em').text
daily_double = False
#Let's see who buzzed in and got the right answer
right = BeautifulSoup(clue.find('div', onmouseover=True).get('onmouseover'), 'lxml').find('td', class_='right')
#There may be more than one wrong answer!
wrong = BeautifulSoup(clue.find('div', onmouseover=True).get('onmouseover'), 'lxml').find_all('td', class_='wrong')
#Let's keep track of who got this question right/wrong
#+1 means contestant got it right, 0 if they didn't buzz in, -1 if they buzzed in and got it wrong
leftResult = 0
middleResult = 0
rightResult = 0
if right != None:
contestant = (right.text).strip(punctuation).lower()
if contestant==leftContestant:
leftResult = 1
if contestant==middleContestant:
middleResult = 1
if contestant==rightContestant:
rightResult = 1
for w in wrong:
contestant = (w.text).strip(punctuation).lower()
if contestant==leftContestant:
leftResult = -1
if contestant==middleContestant:
middleResult = -1
if contestant==rightContestant:
rightResult = -1
wrongAttempts = len(wrong)
correctAttempts = 0 if right==None else 1
order = 0
category = table.find('td', class_='category_name').text
round_name = 'Tiebreaker'
roundClues.append([epNum, airDate, extraInfo, round_name, coord, category, order, value, daily_double, question, answer, correctAttempts, wrongAttempts, leftResult, middleResult, rightResult])
return roundClues
if __name__ == "__main__":
main()
```
|
{
"source": "jeb5/markdown-link-review",
"score": 3
}
|
#### File: markdown-link-review/markdown-link-reviewer/refinementInterface.py
```python
from aiohttp import web
import os
import json
from bs4.builder import HTML
from selenium.webdriver.chrome.webdriver import WebDriver
import websockets
import asyncio
import selenium
from webbrowser import open as openLink
from utils import ServerCloseException
WEBDIR = os.path.join(os.path.dirname(__file__), "refinementInterface")
FILE_SERVER_PORT = 8005
WEBSOCKET_PORT = 8006
async def serveHtml():
def respondWithFile(file):
def handleReq(req):
return web.FileResponse(file)
return handleReq
app = web.Application()
app.add_routes([web.view('/', respondWithFile(os.path.join(WEBDIR, "refinementInterface.html")))])
app.add_routes([web.static('/', WEBDIR)])
runner = web.AppRunner(app)
await runner.setup()
site = web.TCPSite(runner, 'localhost', FILE_SERVER_PORT)
openLink(f"http://localhost:{FILE_SERVER_PORT}")
await site.start()
connections = set()
def driverSafeGet(driver, url):
try:
return driver.get(url)
except selenium.common.exceptions.WebDriverException as e:
pass # If the browser can't open a link, this shouldn't throw an error.
# they shouldn't have told me about nested methods
def websocketLinkRefiner(links, pbar):
linkIndex = {link.getId(): link for link in links}
pendingLinkIds = [link.getId() for link in links]
driver = selenium.webdriver.Chrome() # TODO: handle driver being closed
driverSafeGet(driver, links[0].url)
async def linkSend(linkId):
return json.dumps(linkIndex[id].toJudgementDict())
async def sendGreeting(ws):
await ws.send(json.dumps({"type": "hello", "data": pendingLinkIds}))
async def handleMessage(message, ws):
msgDict = json.loads(message)
msgType = msgDict["type"]
msgData = msgDict.get("data")
if msgType == "startLink":
link = linkIndex[msgData["id"]]
resJson = json.dumps({"type": "link", "data": link})
sends = [ws.send(resJson) for ws in connections]
pbar.update(len(links) - len(pendingLinkIds), str(link))
await asyncio.gather(*sends) # starting a link starts this link for all clients
driverSafeGet(driver, link.url) # TODO: make this async - some webpages take forever to load
elif msgType == "updateLink":
linkId = msgData["id"]
link = linkIndex[linkId]
link.humanJudge(msgData.get("broken"), problem=msgData.get("problem"), urlUpdate=msgData.get("replacementUrl"), nameUpdate=msgData.get("replacementName"), needRemove=msgData.get("needRemove"), note=msgData.get("note"))
if linkId in pendingLinkIds:
pendingLinkIds.remove(linkId)
pbar.update(len(links) - len(pendingLinkIds), str(link))
await ws.send(json.dumps({"type": "linkUpdated", "data": linkId}))
elif msgType == "endJudgement":
# This closes the server (hackily)
raise ServerCloseException()
async def wsHandler(ws, path):
connections.add(ws)
await sendGreeting(ws)
try:
async for message in ws:
await handleMessage(message, ws)
finally:
connections.remove(ws)
return wsHandler
async def refinementLoop(suspectLinks, pBar):
htmlServer = asyncio.create_task(serveHtml())
# open a user's web browser here
try:
serve = await websockets.serve(websocketLinkRefiner(suspectLinks, pBar), "localhost", WEBSOCKET_PORT) # the handler function is called once PER connection
await serve.wait_closed()
except ServerCloseException: # this is surely wrong
serve.close()
htmlServer.cancel()
```
|
{
"source": "jebabi/controllerx",
"score": 3
}
|
#### File: core/type/media_player_controller.py
```python
from const import MediaPlayer
from core.controller import ReleaseHoldController, action
from core.stepper import Stepper
from core.stepper.minmax_stepper import MinMaxStepper
DEFAULT_VOLUME_STEPS = 10
class MediaPlayerController(ReleaseHoldController):
def initialize(self):
super().initialize()
self.media_player = self.args["media_player"]
volume_steps = self.args.get("volume_steps", DEFAULT_VOLUME_STEPS)
self.volume_stepper = MinMaxStepper(0, 1, volume_steps)
self.volume_level = 0
def get_type_actions_mapping(self):
return {
MediaPlayer.HOLD_VOLUME_DOWN: (self.hold, Stepper.DOWN),
MediaPlayer.HOLD_VOLUME_UP: (self.hold, Stepper.UP),
MediaPlayer.CLICK_VOLUME_DOWN: self.volume_down,
MediaPlayer.CLICK_VOLUME_UP: self.volume_up,
MediaPlayer.RELEASE: self.release,
MediaPlayer.PLAY_PAUSE: self.play_pause,
MediaPlayer.NEXT_TRACK: self.next_track,
MediaPlayer.PREVIOUS_TRACK: self.previous_track,
}
@action
async def play_pause(self):
self.call_service("media_player/media_play_pause", entity_id=self.media_player)
@action
async def previous_track(self):
self.call_service(
"media_player/media_previous_track", entity_id=self.media_player
)
@action
async def next_track(self):
self.call_service("media_player/media_next_track", entity_id=self.media_player)
@action
async def volume_up(self):
await self.prepare_volume_change()
await self.volume_change(Stepper.UP)
@action
async def volume_down(self):
await self.prepare_volume_change()
await self.volume_change(Stepper.DOWN)
@action
async def hold(self, direction):
await self.prepare_volume_change()
await super().hold(direction)
async def prepare_volume_change(self):
volume_level = await self.get_entity_state(
self.media_player, attribute="volume_level"
)
if volume_level is not None:
self.volume_level = volume_level
async def volume_change(self, direction):
self.volume_level, exceeded = self.volume_stepper.step(
self.volume_level, direction
)
self.call_service(
"media_player/volume_set",
entity_id=self.media_player,
volume_level=self.volume_level,
)
return exceeded
async def hold_loop(self, direction):
return await self.volume_change(direction)
def default_delay(self):
return 500
```
#### File: controllerx/tests/devices_test.py
```python
from tests.utils import hass_mock, get_instances
import devices as devices_module
from core import Controller
from core import type as type_module
def _import_modules(file_dir, package):
pkg_dir = os.path.dirname(file_dir)
for (module_loader, name, ispkg) in pkgutil.iter_modules([pkg_dir]):
if ispkg:
_import_modules(pkg_dir + "/" + name + "/__init__.py", package + "." + name)
else:
importlib.import_module("." + name, package)
def _all_subclasses(cls):
return list(
set(cls.__subclasses__()).union(
[s for c in cls.__subclasses__() for s in _all_subclasses(c)]
)
)
def get_devices():
_import_modules(devices_module.__file__, devices_module.__package__)
subclasses = _all_subclasses(Controller)
devices = [cls_() for cls_ in subclasses if len(cls_.__subclasses__()) == 0]
return devices
def check_mapping(mapping, all_possible_actions, device):
if mapping is None:
return
for k, v in mapping.items():
if type(v) != str:
raise ValueError(
"The value from the mapping should be a string, matching "
+ "one of the actions from the controller. "
+ f"The possible actions are: {all_possible_actions}. "
+ f"Device class: {device.__class__.__name__}"
)
if v not in all_possible_actions:
raise ValueError(
f"{v} not found in the list of possible action from the controller. "
+ f"The possible actions are: {all_possible_actions}"
)
def test_devices(hass_mock):
devices = get_instances(
devices_module.__file__, devices_module.__package__, Controller
)
for device in devices:
type_actions_mapping = device.get_type_actions_mapping()
if type_actions_mapping is None:
continue
possible_actions = list(type_actions_mapping.keys())
mappings = device.get_z2m_actions_mapping()
check_mapping(mappings, possible_actions, device)
mappings = device.get_deconz_actions_mapping()
check_mapping(mappings, possible_actions, device)
mappings = device.get_zha_actions_mapping()
check_mapping(mappings, possible_actions, device)
```
#### File: controllerx/tests/utils.py
```python
import importlib
import os
import pkgutil
import sys
import appdaemon.plugins.hass.hassapi as hass
import pytest
sys.path.append("apps/controllerx")
from core.controller import Controller
class IntegrationMock:
def __init__(self, name, controller, mocker):
self.name = name
self.controller = controller
self.get_actions_mapping = mocker.stub(name="get_actions_mapping")
self.listen_changes = mocker.stub(name="listen_changes")
super().__init__()
@pytest.fixture
def hass_mock(monkeypatch, mocker):
"""
Fixture for set up the tests, mocking appdaemon functions
"""
fake_fn = lambda *args, **kwargs: None
monkeypatch.setattr(hass.Hass, "__init__", fake_fn)
monkeypatch.setattr(hass.Hass, "listen_event", fake_fn)
monkeypatch.setattr(hass.Hass, "listen_state", fake_fn)
monkeypatch.setattr(hass.Hass, "log", fake_fn)
monkeypatch.setattr(hass.Hass, "call_service", fake_fn)
@pytest.fixture
def fake_controller(hass_mock):
c = Controller()
c.args = {}
return c
def _import_modules(file_dir, package):
pkg_dir = os.path.dirname(file_dir)
for (module_loader, name, ispkg) in pkgutil.iter_modules([pkg_dir]):
if ispkg:
_import_modules(pkg_dir + "/" + name + "/__init__.py", package + "." + name)
else:
importlib.import_module("." + name, package)
def _all_subclasses(cls):
return list(
set(cls.__subclasses__()).union(
[s for c in cls.__subclasses__() for s in _all_subclasses(c)]
)
)
def get_instances(file_, package_, class_):
_import_modules(
file_, package_,
)
subclasses = _all_subclasses(class_)
devices = [
cls_()
for cls_ in subclasses
if len(cls_.__subclasses__()) == 0 and package_ in cls_.__module__
]
return devices
```
|
{
"source": "jebas/ansible-role-dns",
"score": 3
}
|
#### File: ansible-role-dns/lookup_plugins/cluster_dns.py
```python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: random_choice
author: <NAME> <<EMAIL>>
version_added: "1.1"
short_description: return random element from list
description:
- The 'random_choice' feature can be used to pick something at random. While it's not a load balancer (there are modules for those),
it can somewhat be used as a poor man's load balancer in a MacGyver like situation.
- At a more basic level, they can be used to add chaos and excitement to otherwise predictable automation environments.
"""
EXAMPLES = """
- name: Magic 8 ball for MUDs
debug:
msg: "{{ item }}"
with_random_choice:
- "go through the door"
- "drink from the goblet"
- "press the red button"
- "do nothing"
"""
RETURN = """
_raw:
description:
- random item
"""
import random
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_native
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms=None, servers=None, zones=None, hostvars=None, grouping=None, **kwargs):
ret = zones
for zone in zones:
zone['ns'] = []
for server in servers:
zone['ns'].append({'name': f'{hostvars[server]["ansible_hostname"]}.{zone["name"]}.'})
if not 'records' in zone:
zone['records'] = []
for server in servers:
if hostvars[server]['ansible_default_ipv4']:
zone['records'].append({
'name': hostvars[server]["ansible_hostname"],
'value': hostvars[server]['ansible_default_ipv4']['address']})
if grouping:
zone['records'].append({
'name': grouping,
'value': hostvars[server]['ansible_default_ipv4']['address']})
if hostvars[server]['ansible_default_ipv6']:
zone['records'].append({
'name': hostvars[server]["ansible_hostname"],
'type': 'AAAA',
'value': hostvars[server]['ansible_default_ipv6']['address']})
if grouping:
zone['records'].append({
'name': grouping,
'type': 'AAAA',
'value': hostvars[server]['ansible_default_ipv6']['address']})
return ret
```
|
{
"source": "Je-Ba/Wind-farm-wake-control-using-convolutional-neural-networks",
"score": 3
}
|
#### File: Wind-farm-wake-control-using-convolutional-neural-networks/CNNWake/HeatMap.py
```python
import torch
import matplotlib.pyplot as plt
import numpy as np
import time
import floris.tools as wfct
from superposition import super_position
from optimisation import FLORIS_wake_steering, CNNwake_wake_steering
from superposition import FLORIS_farm_power, CNNWake_farm_power
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, CNNwake"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "1.0"
__email__ = "<EMAIL>"
__status__ = "Development"
def visualize_turbine(plane, domain_size, nr_points, title="", ax=None):
"""
Function to plot the flow field around a single turbine
Args:
plane (2d numpy array): Flow field around turbine
domain_size (list or numpy array): x and y limits of the domain,
the first two values correspond to min and max of x and
similar for the y values [x_min, x_max, y_min, y_max]
nr_points (list or numpy array): Nr. of points in the array
title (str, optional): Title of the graph. Defaults to "".
ax (ax.pcolormesh, optional): Pyplot subplot class,
adds the plot to this location.
Returns:
ax.pcolormesh: Image of the flow field
"""
# create mesh grid for plotting
x = np.linspace(domain_size[0], domain_size[1], nr_points[0])
y = np.linspace(domain_size[2], domain_size[3], nr_points[1])
x_mesh, y_mesh = np.meshgrid(x, y)
# Plot the cut-through
im = ax.pcolormesh(x_mesh, y_mesh, plane, shading='auto', cmap="coolwarm")
ax.set_title(title)
# Make equal axis
ax.set_aspect("equal")
return im
def visualize_farm(
plane, nr_points, size_x, size_y, title="", ax=None, vmax=False):
"""
Function to plot flow-field around a wind farm.
Args:
plane (2d numpy array): Flow field of wind farm
nr_points (list or np array): List of nr of points in x and y
size_x (int): Size of domain in x direction (km)
size_y (int): Size of domain in y direction (km)
title (str, optional): Title of the plot. Defaults to "".
ax (ax.pcolormesh, optional): Pyplot subplot class, adds the plot
to this location.
vmax (bool, optional): Maximum value to plot. If false,
the max value of the plane is used a vmax
Returns:
ax.pcolormesh: Image of the flow field around the wind farm
"""
x = np.linspace(0, size_x, nr_points[0]) # this is correct!
y = np.linspace(0, size_y, nr_points[1])
x_mesh, y_mesh = np.meshgrid(x, y)
# if no vmax is set, use the maximum of plane
if vmax is False:
vmax = np.max(plane)
# Plot the cut-through
im = ax.pcolormesh(x_mesh, y_mesh, plane,
shading='auto', cmap="coolwarm", vmax=vmax)
ax.set_title(title)
# Make equal axis
ax.set_aspect("equal")
return im
def Compare_CNN_FLORIS(
x_position, y_position, yawn_angles, wind_velocity, turbulent_int,
CNN_generator, Power_model, TI_model, device,
florisjason_path='', plot=False):
"""
Generates the wind field around a wind park using the neural networks.
The individual wakes of the turbines are calculated using thee CNN and
superimposed onto the wind farm flow field using a super-position model.
The energy produced by the turbines are calcuated using another fully
connected network from the flow data just upstream the turbine.
The functions generates the same wind park flow field using FLORIS so that
the two solutions can be compared when plot = True is set.
Args:
x_position (list): 1d array of x locations of the wind turbines in m.
y_position (list): 1d array of y locations of the wind turbines in m.
yawn_angles (list): 1d array of yaw angles of every wind turbine.
wind_velocity (float): Free stream wind velocity in m/s.
turbulent_int (float): Turbulent intensity in percent.
device (torch.device): Device to store and run the neural network on,
cpu or cuda
florisjason_path (string): Location of the FLORIS jason file
plot (bool, optional): If True, the FLORIS and CNN solution will
be plotted and compared.
Returns:
numpy array: Final 2d array of flow field around the wind park.
"""
# Define the x and y length of a single cell in the array
# This is set by the standard value used in FLORIS wakes
dx = 18.4049079755
dy = 2.45398773006
# Set the maximum length of the array to be 3000m and 400m
# more than the maximum x and y position of the wind park
# If a larger physical domain was used change adapt the values
x_max = np.max(x_position) + 3000
y_max = np.max(y_position) + 300
# Number of cells in x and y needed to create a 2d array of
# that is x_max x y_max using dx, dy values
Nx = int(x_max / dx)
Ny = int(y_max / dy)
# Initialise a 2d array of the wind park with the
# inlet wind speed
farm_array = np.ones((Ny, Nx)) * wind_velocity
# set up FLORIS model
floris_model = wfct.floris_interface.FlorisInterface(
florisjason_path + "FLORIS_input_gauss.json")
floris_model.reinitialize_flow_field(
layout_array=[x_position, np.array(y_position)])
for _ in range(0, len(x_position)):
floris_model.change_turbine([_], {'yaw_angle': yawn_angles[_],
"blade_pitch": 0.0})
floris_model.reinitialize_flow_field(wind_speed=wind_velocity,
turbulence_intensity=turbulent_int)
start_t = time.time()
# Calcuate using FLORIS and extract 2d flow field
floris_model.calculate_wake()
print(f"Time taken for FLORIS to generate"
f" wind park: {time.time() - start_t:.3f}")
floris_plane = floris_model.get_hor_plane(
height=90, x_resolution=Nx, y_resolution=Ny, x_bounds=[0, x_max],
y_bounds=[0, y_max]).df.u.values.reshape(Ny, Nx)
floris_power = floris_model.get_turbine_power()
floris_ti = floris_model.get_turbine_ti()
# print(floris_power, floris_ti)
power_CNN = []
ti_CNN = []
t = time.time()
with torch.no_grad():
# Do CNNwake cautions
for i in range(len(x_position)):
# determine the x and y cells that the turbine center is at
turbine_cell = [int((x_position[i]) / dx),
int((y_position[i] - 200) / dy)]
t1 = time.time()
# extract wind speeds along the rotor, 60 meters upstream
u_upstream_hub = farm_array[
turbine_cell[1] + 45: turbine_cell[1] + 110,
turbine_cell[0] - 3]
# Do an running average, this is done because CNNwake has slight
# variations in the u predictions, also normalise the u values
u_power = [
((u_upstream_hub[i - 1] + u_upstream_hub[i] +
u_upstream_hub[i + 1]) / 3) / 12 for
i in np.linspace(5, 55, 40, dtype=int)]
u_power = np.append(u_power, yawn_angles[i] / 30)
u_power = np.append(u_power, turbulent_int)
# The local TI does not change from inlet TI if the turbine
# is not covered by a wake, therefore check if if all values
# in u_list_hub are the same -> means no wake coverage
# Local TI also depends on yaw, if yaw is less than 12° and
# turbine is not in wake -> use inlet TI for local TI
if np.allclose(u_power[0], u_power[0:-3],
rtol=1e-02, atol=1e-02) and abs(u_power[-2]) < 0.4:
# print("Turbine in free stream, set ti to normal")
ti = turbulent_int
else:
ti = TI_model((torch.tensor(u_power).float().to(device))).detach().cpu().numpy() * 0.30000001192092896
# regulate TI to ensure it is not to different from free stream
if ti < turbulent_int * 0.7:
# print(f"TI REGULATED 1 AT {i}")
ti = turbulent_int * 1.5
# clip ti values to max and min trained
ti = np.clip(ti, 0.015, 0.25).item(0)
ti_CNN.append(ti)
u_power[-1] = ti
energy = Power_model(torch.tensor(u_power).float().to(device)).detach().cpu().numpy() * 4834506
power_CNN.append(energy[0])
hub_speed = np.round(np.mean(u_upstream_hub), 2)
turbine_condition = [[hub_speed, ti, yawn_angles[i]]]
turbine_field = CNN_generator(torch.tensor(turbine_condition).float().to(device))
# Use CNN to calculate wake of individual trubine
# Since CNN output is normalised,
# mutiply by 12 and create a numpy array
turbine_field = turbine_field[0][0].detach().cpu().numpy() * 12
# Place wake of indivual turbine in the farm_array
farm_array = super_position(
farm_array, turbine_field, turbine_cell, hub_speed,
wind_velocity, sp_model="SOS")
# print information
print(f"Time taken for CNNwake to generate wind park: {time.time() - t:.3f}")
print(f"CNNwake power prediction error: "
f"{100 * np.mean(abs(np.array(floris_power) - np.array(power_CNN)) / np.array(floris_power)):.2f} %")
print(f"CNNwake TI prediction error: {100 * np.mean(abs(np.array(floris_ti) - np.array(ti_CNN)) / np.array(floris_ti)):.2f} %")
print(f"APWP error: {100 * np.mean(abs(floris_plane - farm_array) / np.max(floris_plane)):.2f}")
if plot:
plt.rcParams.update({'font.size': 16})
# Plot wake fields of both wind farms and error field
fig, axarr = plt.subplots(3, 1, sharex=True, figsize=(20, 49))
im1 = visualize_farm(farm_array, nr_points=[Nx, Ny], size_x=x_max,
size_y=y_max, title="CNNwake", ax=axarr[0])
im2 = visualize_farm(floris_plane, nr_points=[Nx, Ny], size_x=x_max,
size_y=y_max, title="FLORIS", ax=axarr[1])
im3 = visualize_farm(
(100 * abs(floris_plane - farm_array) / np.max(floris_plane)),
nr_points=[Nx, Ny], size_x=x_max, size_y=y_max,
title="Pixel wise percentage error ", ax=axarr[2], vmax=20)
col1 = fig.colorbar(im1, ax=axarr[0])
col1.set_label('m/s', labelpad=15, y=1.06, rotation=0)
col2 = fig.colorbar(im2, ax=axarr[1])
col2.set_label('m/s', labelpad=15, y=1.06, rotation=0)
col3 = fig.colorbar(im3, ax=axarr[2])
col3.set_label('%', labelpad=11, y=0.9, rotation=0)
axarr[2].set_xlabel('m', fontsize=15)
axarr[0].set_ylabel('m', labelpad=9, rotation=0, y=0.4, fontsize=15)
axarr[1].set_ylabel('m', labelpad=9, rotation=0, y=0.4, fontsize=15)
axarr[2].set_ylabel('m', labelpad=9, rotation=0, y=0.4, fontsize=15)
# Plot TI and Power of every turbine for FLORIS adn CNNNwake
fig, axarr = plt.subplots(2, figsize=(9, 9))
axarr[0].plot(range(1, len(x_position) + 1),
np.array(power_CNN)/1.e06, 'o--', label="CNNwake")
axarr[0].plot(range(1, len(x_position) + 1),
np.array(floris_power)/1.e06, 'o--', label="FLORIS")
axarr[1].plot(range(1, len(x_position) + 1),
np.array(ti_CNN), 'o--', label="CNNwake")
axarr[1].plot(range(1, len(x_position) + 1),
floris_ti, 'o--', label="FLORIS")
axarr[0].set_ylabel('Power output [MW]', fontsize=15)
axarr[1].set_ylabel('Local TI [%]', fontsize=15)
axarr[1].set_xlabel('Turbine Nr.', rotation=0, fontsize=15)
axarr[1].legend()
axarr[0].legend()
plt.show()
return farm_array, floris_plane
if __name__ == '__main__':
# To run individual CNNWake files, the imports are not allowed to be
# relative. Instead of: from .superposition import super_position
# it needs to be: from superposition import super_position, for all CNNWake imports
# also import all NNs
from CNN_model import Generator
from FCC_model import FCNN
from superposition import super_position
# Set up/load all NNs
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
CNN_generator = Generator(3, 30).to(device)
CNN_generator.load_model('./trained_models/CNN_FLOW.pt', device=device)
CNN_generator = CNN_generator.to()
CNN_generator.eval()
# the first forward pass is super slow so do it outside loop and use the
# output for a simple assert test
example_out = CNN_generator(torch.tensor([[4, 0.1, 20]]).float().to(device))
assert example_out.size() == torch.Size([1, 1, 163, 163])
Power_model = FCNN(42, 300, 1).to(device)
Power_model.load_state_dict(torch.load('./trained_models/FCNN_POWER.pt', map_location=device))
Power_model.eval()
# the first forward pass is super slow so do it outside loop and use the
# output for a simple assert test
energy = Power_model(torch.tensor([i for i in range(0, 42)]).float().to(device))
assert energy.size() == torch.Size([1])
TI_model = FCNN(42, 300, 1).to(device)
TI_model.load_state_dict(torch.load('./trained_models/FCNN_TI.pt', map_location=device))
TI_model.eval()
# the first forward pass is super slow so do it outside loop and use the
# output for a simple assert test
TI = TI_model(torch.tensor([i for i in range(0, 42)]).float().to(device))
assert TI.size() == torch.Size([1])
# Compare a single wind farm, this will show the wake, energy and local TI
# for every turbine and compare it to FLORIS
'''farm, a = Compare_CNN_FLORIS([100, 100, 700, 700, 1200, 1200],
[300, 800, 1300, 550, 1050, 300],
[0, 0, 0, 0, 0, 0, 0], 11.6, 0.06,
CNN_generator, Power_model,
TI_model, device, plot=True)'''
def heatmap(xs, ys, res=10):
"""
Assess the performance of the DNN vs FLORIS on
parametric optimiser calls for a wide range of
inlet speed and turbulence intensity for a
specific array configuration.
Args:
xs (numpy array of floats) Turbine x coordinates.
ys (numpy array of floats) Turbine y coordinates.
res (int, optional) Resolution of heatmap.
farm_opt (boolean, optional) Calls either farm or yaw optimisers.
"""
# Wind speeds and turbulence intensities examined
x_ws = np.linspace(3, 12, res)
y_ti = np.linspace(0.05, 0.25, res)
# Initialisation of power and timing heatmaps
g0 = np.zeros((res, res))
g1 = np.zeros((res, res))
g2 = np.zeros((res, res))
t1 = np.zeros((res, res))
t2 = np.zeros((res, res))
floris_park = wfct.floris_interface.FlorisInterface("FLORIS_input_gauss.json")
# Begin parametric runs
for k1 in range(res):
# Print progress
print(round(k1 / res * 100, 2), '%', 'complete.')
for k2 in range(res):
g0[k1, k2] = abs(FLORIS_farm_power([0 for _ in range(len(xs))], xs, ys, x_ws[k1], y_ti[k2], floris_park))
opt_yaw, g1[k1, k2], t1[k1, k2] = FLORIS_wake_steering(xs, ys, [0 for _ in range(len(xs))], x_ws[k1],
y_ti[k2], [-30, 30], 1.e-06, floris_path='./')
opt_yaw, g2[k1, k2], t2[k1, k2] = CNNwake_wake_steering(xs, ys, [0 for _ in range(len(xs))], x_ws[k1],
y_ti[k2], CNN_generator, Power_model, TI_model,
device, [-30, 30], 1.e-06)
# Calculate FLORIS power gain in MW
sample_1 = g1 - g0
sample_2 = g2 - g0
maxval = np.max([sample_1.min(), sample_1.max()])
minval = np.min([sample_2.min(), sample_2.max()])
makeHeatmap(np.transpose(np.flip(sample_1, 1)), x_ws, y_ti, maxval, minval, title='Floris optimisation')
# Calculate FLORIS power gain in MW
makeHeatmap(np.transpose(np.flip(sample_2, 1)), x_ws, y_ti, maxval, minval, title='Neural optimisation')
# Calculate FLORIS average time
sample_1 = t1
sample_2 = t2
maxval = np.max([sample_1.min(), sample_1.max()])
minval = 0
print('Average FLORIS time:', np.round(np.mean(t1), 2))
makeHeatmap(np.transpose(np.flip(sample_1, 1)), x_ws, y_ti, maxval, minval, title='Floris time')
# Calculate DNN average time
print('Average DNN time:', np.round(np.mean(t2), 2))
makeHeatmap(np.transpose(np.flip(sample_2, 1)), x_ws, y_ti, maxval, minval, title='Neural time')
def makeHeatmap(bitmap, x_ws, y_ti, maxval, minval, title=None):
"""
Plots bitmap of parametric optimisation runs.
Args:
bitmap (2D numpy array of floats) Calculated powers.
x_ws (1D numpy array of floats) Wind speeds.
y_ti (1D numpy array of floats) Turbulence intensities.
vmax (float, optional) Max velocity cap of plot.
title (string) Plot title.
"""
# Min and max values of heatmap
x_min = np.min(x_ws)
x_max = np.max(x_ws)
y_min = np.min(y_ti)
y_max = np.max(y_ti)
# Plot heatmap based on bitmap produced by the "Assess" function.
plt.figure()
plt.imshow(bitmap, cmap='RdYlGn', interpolation='nearest',
vmin=minval, vmax=maxval, extent=[x_min, x_max, y_min, y_max],
aspect=(x_max - x_min) / (y_max - y_min))
plt.title(title, fontname='serif')
plt.xlabel('Free stream velocity (m/s)', fontname='serif')
plt.ylabel('Turbulence intensity', fontname='serif')
plt.colorbar()
plt.show()
def power_map(xs, ys, u, ti, res=10):
# Wind speeds and turbulence intensities examined
yaw = np.linspace(0, 30, res)
# Initialisation of power and timing heatmaps
CNN_power_map = np.zeros((res, res))
FLORIS_pwoer_map = np.zeros((res, res))
floris_park = wfct.floris_interface.FlorisInterface("FLORIS_input_gauss.json")
# Begin parametric runs
for k1 in range(res):
for k2 in range(res):
FLORIS_pwoer_map[k1, k2] = abs(
FLORIS_farm_power([yaw[k1], yaw[k2]], xs, ys, u, ti, floris_park))/1.e6
CNN_power_map[k1, k2] = abs(CNNWake_farm_power(
[yaw[k1], yaw[k2]], xs, ys, u, ti,
CNN_generator, Power_model, TI_model, device))/1.e6
X, Y = np.meshgrid(yaw, yaw)
# Twice as wide as it is tall.
fig = plt.figure(figsize=plt.figaspect(0.5))
# ---- First subplot
ax = fig.add_subplot(1, 2, 1, projection='3d')
surf = ax.plot_surface(X, Y, FLORIS_pwoer_map, rstride=1, cstride=1, cmap='viridis', edgecolor='none')
ax.set_xlabel('yaw 2')
ax.set_ylabel('yaw 1')
ax.set_zlabel('Power')
ax.set_title('FLORIS')
ax = fig.add_subplot(1, 2, 2, projection='3d')
surf = ax.plot_surface(X, Y, CNN_power_map, rstride=1, cstride=1, cmap='viridis', edgecolor='none')
ax.set_xlabel('yaw 2')
ax.set_ylabel('yaw 1')
ax.set_zlabel('Power')
ax.set_title('CNNWAKE')
plt.show()
D = 121
# Case B (yaw)
xs = np.array([1 * D, 1 * D, 1 * D, 4.5 * D, 4.5 * D,
8 * D, 8 * D, 8 * D, 11.5 * D, 11.5 * D,
15 * D, 15 * D, 15 * D, 18.5 * D, 18.5 * D])
ys = np.array([1 * D, 5 * D, 9 * D, 3 * D, 7 * D,
1 * D, 5 * D, 9 * D, 3 * D, 7 * D,
1 * D, 5 * D, 9 * D, 3 * D, 7 * D]) + 300
#xs = np.array([1 * D, 1 * D, 8 * D, 8 * D, 15 * D, 15 * D])
#ys = np.array([1 * D, 7 * D, 1 * D, 7 * D, 1 * D, 7 * D]) + 300
#heatmap(xs, ys, res=8)
power_map([300, 900], [500, 500], 9, 0.9, res=20)
```
#### File: Wind-farm-wake-control-using-convolutional-neural-networks/tests/integration_test.py
```python
import numpy as np
import sys
import os
import torch
import random
# To import the model, need to append the main folder path to the run
# i.e. sys.path.append(path_to/acse20-acse9-finalreport-acse-jtb20)
# This works automatically on every system
sys.path.append(os.path.abspath(
os.path.dirname(os.path.abspath(__file__))[0:-6]))
import CNNWake
# Test model training
def test_train_CNN():
# Test CNN model training
# set seeds and ensure that training is
# less random
random.seed(42)
np.random.seed(42)
torch.manual_seed(42)
torch.cuda.manual_seed_all(42)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = False
devices = torch.device("cpu")
# train small model on small training set for a few epochs
model, loss, val_error = \
CNNWake.train_CNN.train_CNN_model(
nr_filters=5, nr_epochs=20, learing_rate=0.003, batch_size=100,
train_size=200, val_size=5, image_size=163, device=devices,
u_range=[3, 12], ti_range=[0.015, 0.25], yaw_range=[-30, 30],
model_name='CNN.pt')
assert isinstance(model, CNNWake.Generator)
assert loss < 0.4
# the validation error of less than 50 seems like a lot but the
# training is done on the cpu and it is just test to training time
# of less than 10 sec but error starts above 40 so should reduce
# by a lot
assert val_error < 40
def test_train_FCNN_power():
# Test FCNN power model
# set seeds and ensure that training is
# less random
random.seed(42)
np.random.seed(42)
torch.manual_seed(42)
torch.cuda.manual_seed_all(42)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = False
devices = torch.device("cpu")
# define number of u values sampled from horizontal line
u_samples = 40
# train small model on small training set for a few epochs
model, loss, val_error = \
CNNWake.train_FCNN.train_FCNN_model(
nr_neurons=160, input_size=u_samples, nr_epochs=30,
learing_rate=0.00005, batch_size=20, train_size=40,
val_size=5, type='power', device=devices, u_range=[3, 12],
ti_range=[0.015, 0.25], yaw_range=[-30, 30],
model_name="power_model.pt")
# check if model is correct and if the input size is correct and
# equal to the number given to the function
assert isinstance(model, CNNWake.FCNN)
# Check if model accepts the correct number of inputs
assert model.disc[0].in_features == u_samples + 2
# loss for a untrained network is 0.5 so an test error of less than 0.1
# shows that the model training is working
assert loss < 0.1
# the validation error starts of with more than 85% error so an error of
# less than 50% for this test shows that the training is working
assert val_error < 50
def test_train_FCNN_ti():
# Test FCNN TI training
# set seeds and ensure that training is
# less random
random.seed(42)
np.random.seed(42)
torch.manual_seed(42)
torch.cuda.manual_seed_all(42)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = False
devices = torch.device("cpu")
# define number of u values sampled from horizontal line
u_samples = 20
# train small model on small training set for a few epochs
model, loss, val_error = \
CNNWake.train_FCNN.train_FCNN_model(
nr_neurons=50, input_size=u_samples, nr_epochs=40,
learing_rate=0.0003, batch_size=5, train_size=25,
val_size=6, type='TI', device=devices,
u_range=[3, 12], ti_range=[0.015, 0.25], yaw_range=[-30, 30],
model_name='./trained_models/power_model.pt')
# check if model is correct and if the input size is correct and
# equal to the number given to the function
assert isinstance(model, CNNWake.FCNN)
assert model.disc[0].in_features == u_samples + 2
# Loss of less than 0.7 shows that the model training is working
assert loss < 0.7
# the validation error starts of with more than 90% error so an error of
# less than 40% for this test shows that the training is working
assert val_error < 40
# test power output function
def test_CNNWake_farm_power_single_turbine():
# Test if CNNWake can predict the power of
# a 4 example wind turbines to 5% to true power
device = torch.device("cpu")
# Set up all NN by loading pre-trained model
CNN_generator = CNNWake.Generator(3, 30).to(device)
CNN_generator.load_model('./trained_models/CNN_FLOW.pt',
device=device)
CNN_generator.eval()
Power_model = CNNWake.FCNN(42, 300, 1).to(device)
Power_model.load_state_dict(torch.load('./trained_models/FCNN_POWER.pt',
map_location=device))
Power_model.eval()
TI_model = CNNWake.FCNN(42, 300, 1).to(device)
TI_model.load_state_dict(torch.load('./trained_models/FCNN_TI.pt',
map_location=device))
TI_model.eval()
# Calculate power output of four examples using CNNWake
power1 = CNNWake.CNNWake_farm_power([0], [100], [300], 8, 0.15,
CNN_generator, Power_model,
TI_model, device)
power2 = CNNWake.CNNWake_farm_power([0], [399], [600], 11, 0.06,
CNN_generator, Power_model,
TI_model, device)
power3 = CNNWake.CNNWake_farm_power([25], [200], [400], 4.36, 0.21,
CNN_generator, Power_model,
TI_model, device)
power4 = CNNWake.CNNWake_farm_power([-13], [200], [400], 5.87, 0.09,
CNN_generator, Power_model,
TI_model, device)
# Check if CNNWake was able to predict the power generated by every test
# case is within 5 percent to the known true value
assert 100*abs(1695368.64554726849 - abs(power1))/1695368.64554726849 < 5
assert 100*abs(4373591.717498961 - abs(power2))/4373591.717498961 < 5
assert 100*abs(187942.47740620747 - abs(power3)) / 187942.47740620747 < 5
assert 100*abs(624533.4395056335 - abs(power4)) / 624533.4395056335 < 5
def test_CNNWake_farm_power_mutiple_turbine():
# Test if CNNWake can predict the power of
# a 4 example wind parks with more than 2
# turbines to 5% to true power
device = torch.device("cpu")
# Set up all NN by loading pre-trained model
CNN_generator = CNNWake.Generator(3, 30).to(device)
CNN_generator.load_model('./trained_models/CNN_FLOW.pt',
device=device)
CNN_generator.eval()
Power_model = CNNWake.FCNN(42, 300, 1).to(device)
Power_model.load_state_dict(torch.load('./trained_models/FCNN_POWER.pt',
map_location=device))
Power_model.eval()
TI_model = CNNWake.FCNN(42, 300, 1).to(device)
TI_model.load_state_dict(torch.load('./trained_models/FCNN_TI.pt',
map_location=device))
TI_model.eval()
# Calculate the power generated by four example wind
# parks
power1 = CNNWake.CNNWake_farm_power([0, 0], [100, 1100],
[300, 300], 6.1, 0.11,
CNN_generator, Power_model,
TI_model, device)
power2 = CNNWake.CNNWake_farm_power([-25, 25], [100, 1100],
[300, 300], 6.1, 0.11,
CNN_generator, Power_model,
TI_model, device)
power3 = CNNWake.CNNWake_farm_power([-25, 15, 0], [300, 300, 850],
[300, 500, 400], 9.7, 0.19,
CNN_generator, Power_model,
TI_model, device)
power4 = CNNWake.CNNWake_farm_power([0, 13, 19, 16], [50, 600, 1200, 1900],
[400, 350, 450, 400],
11.5, 0.09,
CNN_generator, Power_model,
TI_model, device)
# Check if CNNWake power prediction is within 5% to the known value
assert 100*abs(1178044.7762486674 - abs(power1))/1178044.7762486674 < 5
assert 100*abs(1041185.7702935545 - abs(power2))/1041185.7702935545 < 5
assert 100 * abs(7478873.655768376 - abs(power3)) / 7478873.655768376 < 5
assert 100 * abs(13104825.945751127 - abs(power4)) / 13104825.945751127 < 5
def test_optimization():
# Check if CNNWake is able optimise example wind farms
device = torch.device("cpu")
# Set up all NN by loading pre-trained model
CNN_generator = CNNWake.Generator(3, 30).to(device)
CNN_generator.load_model('./trained_models/CNN_FLOW.pt',
device=device)
Power_model = CNNWake.FCNN(42, 300, 1).to(device)
Power_model.load_state_dict(torch.load('./trained_models/FCNN_POWER.pt',
map_location=device))
Power_model.eval()
TI_model = CNNWake.FCNN(42, 300, 1).to(device)
TI_model.load_state_dict(torch.load('./trained_models/FCNN_TI.pt',
map_location=device))
TI_model.eval()
# Test if CNNwake can predict optimal angle of single turbine
yaw1, power1, timing1 = CNNWake.CNNwake_wake_steering(
[100], [300], [-6], 7.6, 0.06, CNN_generator,
Power_model, TI_model, device, [-30, 30], 1e-06)
yaw1_flor, power1_flor, timing1_flor = CNNWake.FLORIS_wake_steering(
[100], [300], [-6], 7.6, 0.06, [-30, 30], 1e-04)
# For a single turbine, the yaw should be 0 degrees
# A small range of 2 degrees is used to allow for tolerances
assert 1 > yaw1[0] > -1
assert 1 > yaw1_flor[0] > -1
# Test if wake steering bounds work by only allowing a specific
# range of yaw angle in the optimisation
yaw2, power2, timing2 = CNNWake.CNNwake_wake_steering(
[100], [300], [19], 10.4, 0.12, CNN_generator,
Power_model, TI_model, device, [15, 25], 1e-06)
yaw2_flor, power2_flor, timing2_flor = CNNWake.FLORIS_wake_steering(
[100], [300], [19], 10.4, 0.12, [15, 25], 1e-04)
assert 16 > yaw2[0] >= 15
assert 16 > yaw2_flor[0] >= 15
yaw3, power3, timing3 = CNNWake.CNNwake_wake_steering(
[100], [300], [24], 5.4, 0.18, CNN_generator,
Power_model, TI_model, device, [-14, -5], 1e-04)
assert -5 >= yaw3[0] >= -6
# check if it reaches 0, 0 angle for a wind park with small
# turbine wakes which means that best yaw angle is 0, 0
yaw4, power4, timing4 = CNNWake.CNNwake_wake_steering(
[100, 600], [300, 300], [20, -15], 5.4, 0.21, CNN_generator,
Power_model, TI_model, device, [-30, 30], 1e-03)
assert -1 < yaw4[0] < 1
assert -1 < yaw4[1] < 1
# check if CNNwake optimisation can get same results as
# FLORIS optimisation for a 1 x 2 wind farm
yaw5, power5, timing5 = CNNWake.CNNwake_wake_steering(
[100, 1100], [300, 300], [0, 0], 7.2, 0.12, CNN_generator,
Power_model, TI_model, device, [-30, 30], 1e-05)
yaw5_flor, power5_flor, timing5_flor = CNNWake.FLORIS_wake_steering(
[100, 1100], [300, 300], [0, 0], 7.2, 0.12, [-30, 30], 1e-05)
assert np.allclose(abs(yaw5), abs(yaw5_flor), atol=5)
if __name__ == '__main__':
test_train_CNN()
test_train_FCNN_power()
test_train_FCNN_ti()
test_CNNWake_farm_power_single_turbine()
test_CNNWake_farm_power_mutiple_turbine()
test_optimization()
print('ALL INTEGRATION TESTS HAVE PASSED')
```
|
{
"source": "JebBarbas/jebpy",
"score": 4
}
|
#### File: jebpy/jebpy/input_types.py
```python
def __input_x(function_to_decorate):
"""
Decorator to create a new input function.
"""
def input_x(prompt='',validation_function=lambda x: True,exception_function=lambda x: None,success_function=lambda x: None):
value = None
try:
_input_ = input(prompt).strip()
value = function_to_decorate(_input_)
if validation_function(value):
success_function(value)
return value
else:
raise Exception
except:
exception_function(_input_)
return input_x(prompt, validation_function, exception_function, success_function)
return input_x
@__input_x
def input_str(x):
"""
Try to get a string input that matches with the validation function.
* If not validation given, any string will work
* If there are any error, executes the optional exception_function with the input as parameter
* If success, executes the optional success_function with the input as parameter
Args:
(optional) prompt (Any): The prompt that you'd use in a normal input() function
(optional) validation_function (function): A function used to check if the input meets the conditions, for default always return True
(optional) exception_function (function): A function used when there is any error converting the input or when the validation_function returns False
(optional) success_function (function): A function used when the input is successfuly converted and returned
Returns:
A string value
Examples:
>>> input_str('Write your name: ', lambda x: len(x) > 0, lambda x: print('An input is required'))
Write your name:
An input is required
Write your name: <NAME>
>>>
"""
return str(x)
@__input_x
def input_str_required(x):
"""
Try to get a string input that matches with the validation function and that is not an empty string.
* If not validation given, any string that isn't empty will work
* If there are any error, executes the optional exception_function with the input as parameter
* If success, executes the optional success_function with the input as parameter
Args:
(optional) prompt (Any): The prompt that you'd use in a normal input() function
(optional) validation_function (function): A function used to check if the input meets the conditions, for default always return True
(optional) exception_function (function): A function used when there is any error converting the input or when the validation_function returns False
(optional) success_function (function): A function used when the input is successfuly converted and returned
Returns:
A string value
Examples:
>>> input_str_required('Write your last name: ', lambda x: print('An input is required'))
Write your last name:
An input is required
Write your name: Doe
>>>
"""
if len(x) > 0:
return str(x)
else:
raise ValueError
@__input_x
def input_int(x):
"""
Try to get an integer input that matches with the validation function.
* If not validation given, any integer will work
* If there are any error, executes the optional exception_function with the input as parameter
* If success, executes the optional success_function with the input as parameter
Args:
(optional) prompt (Any): The prompt that you'd use in a normal input() function
(optional) validation_function (function): A function used to check if the input meets the conditions, for default always return True
(optional) exception_function (function): A function used when there is any error converting the input or when the validation_function returns False
(optional) success_function (function): A function used when the input is successfuly converted and returned
Returns:
An integer value
Examples:
>>> input_int('Write how many years do you have: ', lambda x: x >= 0, lambda x: print(x + ' is not a valid age'))
Write how many years do you have: lorem ipsum
lorem ipsum is not a valid age
Write how many years do you have: -1
-1 is not a valid age
Write how many years do you have: 10
>>>
"""
return int(x)
@__input_x
def input_float(x):
"""
Try to get a float input that matches with the validation function.
* If not validation given, any float will work
* If there are any error, executes the optional exception_function with the input as parameter
* If success, executes the optional success_function with the input as parameter
Args:
(optional) prompt (Any): The prompt that you'd use in a normal input() function
(optional) validation_function (function): A function used to check if the input meets the conditions, for default always return True
(optional) exception_function (function): A function used when there is any error converting the input or when the validation_function returns False
(optional) success_function (function): A function used when the input is successfuly converted and returned
Returns:
A float value
Examples:
>>> input_float('How much costs an apple? ', lambda x: x > 0, lambda x: print('The price must be valid and greater than 0'))
How much costs an apple? lorem ipsum
The price must be valid and greater than 0
How much costs an apple? 0
The price must be valid and greater than 0
How much costs an apple? 1.99
>>>
"""
return float(x)
@__input_x
def input_bool(x):
"""
Try to get a boolean input that matches with the validation function.
* If not validation given, any boolean will work
* If there are any error, executes the optional exception_function with the input as parameter
* If success, executes the optional success_function with the input as parameter
Args:
(optional) prompt (Any): The prompt that you'd use in a normal input() function
(optional) validation_function (function): A function used to check if the input meets the conditions, for default always return True
(optional) exception_function (function): A function used when there is any error converting the input or when the validation_function returns False
(optional) success_function (function): A function used when the input is successfuly converted and returned
Returns:
A boolean value
Examples:
>>> input_bool('Do you have sisters? ', exception_function=lambda x: print('Write True or False'))
Do you have sisters? yes
Write True or False
Do you have sisters? True
>>>
"""
boolean_text = x
if boolean_text == 'True':
return True
elif boolean_text == 'False':
return False
else:
raise ValueError
@__input_x
def input_yesno(x):
"""
Try to get a string input that must be 'y','yes','n' or 'no' and matches with the validation function.
* If not validation given, any boolean will work
* If there are any error, executes the optional exception_function with the input as parameter
* If success, executes the optional success_function with the input as parameter
* 'y' and 'yes' return True
* 'n' and 'no' return False
* The answers are case-insensitive, so, the function takes 'yes', 'Yes', 'yEs' and 'YES' as the same
Args:
(optional) prompt (Any): The prompt that you'd use in a normal input() function
(optional) validation_function (function): A function used to check if the input meets the conditions, for default always return True
(optional) exception_function (function): A function used when there is any error converting the input or when the validation_function returns False
(optional) success_function (function): A function used when the input is successfuly converted and returned
Returns:
A boolean value
Examples:
>>> input_yesno('Do you have sisters? (yes/no) ', exception_function=lambda x: print('Type yes or no'))
Do you have sisters? lorem ipsum
Type y or n
Do you have sisters? YES
>>>
"""
text = x
if text.lower() == 'y' or text.lower() == 'yes':
return True
elif text.lower() == 'n' or text.lower() == 'no':
return False
else:
raise ValueError
if __name__ == '__main__':
print('TESTING FUNCTIONS ...')
print('\tTESTING STRING FUNCTION ...')
input_str('Case 1: Write a string: ')
input_str('Case 2: Write a string (required): ', lambda x: len(x) > 0)
print('\tTESTING INT FUNCTION ...')
input_int('Case 1: Write an integer: ')
input_int('Case 2: Write an integer greater than 0 and less than 10: ', lambda x: x > 0 and x < 10)
print('\tTESTING FLOAT FUNCTION ...')
input_float('Case 1: Write a float: ')
input_float('Case 2: Write a negative float: ', lambda x: x < 0)
print('\tTESTING BOOLEAN FUNCTION ...')
input_bool('Case 1: Write a boolean: ')
input_bool('Case 2: Write a boolean (It has to be False): ', lambda x: not x)
print('\tTESTING YESNO FUNCTION ...')
input_yesno('Case 1: Write y/yes/n/no: ')
input_yesno('Case 2: Write y/yes: ', lambda x: x)
```
#### File: JebBarbas/jebpy/setup.py
```python
import pathlib
from distutils.core import setup
# -*- coding: utf-8 -*-
"""jebpy module can be installed and configured from here"""
import json
from os import path
from setuptools import setup, find_packages
from sys import version_info
VERSION = "v1.0.0"
CURR_PATH = "{}{}".format(path.abspath(path.dirname(__file__)), '/')
def path_format(file_path=None, file_name=None, is_abspath=False,
ignore_raises=False):
"""
Get path joined checking before if path and filepath exist,
if not, raise an Exception
if ignore_raise it's enabled, then file_path must include '/' at end lane
"""
path_formatted = "{}{}".format(file_path, file_name)
if ignore_raises:
return path_formatted
if file_path is None or not path.exists(file_path):
raise IOError("Path '{}' doesn't exists".format(file_path))
if file_name is None or not path.exists(path_formatted):
raise IOError(
"File '{}{}' doesn't exists".format(file_path, file_name))
if is_abspath:
return path.abspath(path.join(file_path, file_name))
else:
return path.join(file_path, file_name)
def read_file(is_json=False, file_path=None, encoding='utf-8',
is_encoding=True, ignore_raises=False):
"""Returns file object from file_path,
compatible with all py versiones
optionals:
can be use to return dict from json path
can modify encoding used to obtain file
"""
text = None
try:
if file_path is None:
raise Exception("File path received it's None")
if version_info.major >= 3:
if not is_encoding:
encoding = None
with open(file_path, encoding=encoding) as buff:
text = buff.read()
if version_info.major <= 2:
with open(file_path) as buff:
if is_encoding:
text = buff.read().decode(encoding)
else:
text = buff.read()
if is_json:
return json.loads(text)
except Exception as err:
if not ignore_raises:
raise Exception(err)
return text
def read(file_name=None, is_encoding=True, ignore_raises=False):
"""Read file"""
if file_name is None:
raise Exception("File name not provided")
if ignore_raises:
try:
return read_file(
is_encoding=is_encoding,
file_path=path_format(
file_path=CURR_PATH,
file_name=file_name,
ignore_raises=ignore_raises))
except Exception:
# TODO: not silence like this,
# must be on setup.cfg, README path
return 'NOTFOUND'
return read_file(is_encoding=is_encoding,
file_path=path_format(
file_path=CURR_PATH,
file_name=file_name,
ignore_raises=ignore_raises))
setup(
name='jebpy',
version=VERSION,
license=read("LICENSE", is_encoding=False, ignore_raises=True),
packages=find_packages(),
description='Python module made by jebbarbas',
long_description=read("README.md"),
long_description_content_type="text/markdown",
author='jebbarbas',
author_email='<EMAIL>',
url='https://github.com/jebbarbas/jebpy',
download_url='https://github.com/jebbarbas/jebpy/tarball/v1.0.0',
keywords = ['jebpy','input', 'password', 'percentage','weight','probability','is'],
install_requires=[],
setup_requires=['requests'],
tests_require=[
'pytest',
'pytest-cov',
'pytest-html',
'pytest-dependency',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
```
|
{
"source": "jebeckford/cloudmesh-common",
"score": 3
}
|
#### File: cloudmesh/common/error.py
```python
import sys
import traceback
from cloudmesh.common.console import Console
#
# TODO: this class seems to replicate some portions of what Console does
#
class Error(object):
"""
A class to print error messages
"""
#
# TODO: this should probably use Console so we can print in color
#
@classmethod
def msg(cls, error=None, debug=True, trace=True):
"""
prints the error message
:param error: the error message
:param debug: only prints it if debug is set to true
:param trace: if true prints the trace
:return:
"""
if debug and error is not None:
print(error)
if debug and trace:
print(traceback.format_exc())
@classmethod
def traceback(cls, error=None, debug=True, trace=True):
"""
prints the trace
:param error: a message preceding the trace
:param debug: prints it if debug is set to true
:param trace:
:return:
"""
if debug and trace:
Error.msg(error=error, debug=debug, trace=trace)
@classmethod
def info(cls, msg, debug=True):
"""
prints an info msg.
:param msg: the message
:return:
"""
if debug:
Console.info(msg)
@classmethod
def warning(cls, msg, debug=True):
"""
prints a warning message.
:param msg:
:return:
"""
if debug:
Console.warning(msg)
@classmethod
def debug(cls, msg, debug=True):
"""
prints a debug message.
:param msg: the message
:return:
"""
if debug:
Console.msg(msg)
@classmethod
def exit(cls, msg):
"""
call a system exit
:param msg:
:return:
"""
Console.error(msg)
sys.exit()
```
#### File: cloudmesh/common/Host.py
```python
import os
import subprocess
from multiprocessing import Pool
from sys import platform
from cloudmesh.common.parameter import Parameter
from cloudmesh.common.util import path_expand
import time
from pprint import pprint
class Host(object):
@staticmethod
def _ssh(args):
"""
check a vm
:param args: dict of {key, username, host, command}
:return: a dict representing the result, if returncode=0 ping is
successfully
"""
key = args['key']
host = args['host']
username = args['username']
command = args['command']
shell = args['shell'] or False
location = f"{username}@{host}"
command = ['ssh',
"-o", "StrictHostKeyChecking=no",
"-o", "UserKnownHostsFile=/dev/null",
'-i', key, location, command]
result = subprocess.run(command, capture_output=True, shell=shell)
result.stdout = result.stdout.decode("utf-8")
result.success = result.returncode == 0
return result
@staticmethod
def ssh(hosts=None,
command=None,
username=None,
key="~/.ssh/id_rsa.pub",
shell=False,
processors=3):
#
# BUG: this code has a bug and does not deal with different
# usernames on the host to be checked.
#
"""
:param command: the command to be executed
:param hosts: a list of hosts to be checked
:param username: the usernames for the hosts
:param key: the key for logging in
:param processors: the number of parallel checks
:return: list of dicts representing the ping result
"""
if type(hosts) != list:
hosts = Parameter.expand(hosts)
if username is None:
username = os.environ['USER']
key = path_expand(key)
# wrap ip and count into one list to be sent to Pool map
args = [{'command': command,
'key': key,
'shell': shell,
'username': username,
'host': host} for host in hosts]
with Pool(processors) as p:
res = p.map(Host._ssh, args)
return res
@staticmethod
def check(hosts=None,
username=None,
key="~/.ssh/id_rsa.pub",
processors=3):
#
# BUG: this code has a bug and does not deal with different
# usernames on the host to be checked.
#
"""
:param hosts: a list of hosts to be checked
:param username: the usernames for the hosts
:param key: the key for logging in
:param processors: the number of parallel checks
:return: list of dicts representing the ping result
"""
result = Host.ssh(hosts=hosts,
command='hostname',
username=username,
key=key,
processors=processors)
return result
# noinspection PyBroadException
@staticmethod
def _ping(args):
"""
ping a vm
:param args: dict of {ip address, count}
:return: a dict representing the result, if returncode=0 ping is
successfully
"""
ip = args['ip']
count = str(args['count'])
count_flag = '-n' if platform == 'windows' else '-c'
command = ['ping', count_flag, count, ip]
result = subprocess.run(command, capture_output=True)
try:
timers = result.stdout \
.decode("utf-8") \
.split("round-trip min/avg/max/stddev =")[1] \
.replace('ms', '').strip() \
.split("/")
data = {
"host": ip,
"success": result.returncode == 0,
"stdout": result.stdout,
"min": timers[0],
"avg": timers[1],
"max": timers[2],
"stddev": timers[3]
}
except:
data = {
"host": ip,
"success": result.returncode == 0,
"stdout": result.stdout,
}
return data
@staticmethod
def ping(hosts=None, count=1, processors=3):
"""
ping a list of given ip addresses
:param hosts: a list of ip addresses
:param count: number of pings to run per ip
:param processors: number of processors to Pool
:return: list of dicts representing the ping result
"""
# first expand the ips to a list
if type(hosts) != list:
hosts = Parameter.expand(hosts)
# wrap ip and count into one list to be sent to Pool map
args = [{'ip': ip, 'count': count} for ip in hosts]
with Pool(processors) as p:
res = p.map(Host._ping, args)
return res
@staticmethod
def generate_key(hosts=None,
filename="~/.ssh/id_rsa",
username=None,
processors=3,
dryrun=False,
verbose=True):
"""
generates the keys on the specified hosts
:param hosts:
:param filename:
:param username:
:param output:
:param dryrun:
:param verbose:
:return:
"""
command = f' cat /dev/zero | ssh-keygen -t rsa -b 4096 -q -N "" -P "" -f {filename} -q'
print(command)
results = Host.ssh(
hosts=hosts,
command=command,
username=username,
key="~/.ssh/id_rsa.pub",
processors=processors)
keys = []
for result in results:
key = result.stdout.strip()
keys.append(key)
return keys
@staticmethod
def gather_keys(
username=None,
hosts=None,
filename="~/.ssh/id_rsa.pub",
key="~/.ssh/id_rsa",
processors=3,
dryrun=False):
"""
returns in a list the keys of the specified hosts
:param username:
:param hosts:
:param filename:
:param key:
:param dryrun:
:return:
"""
command = f"cat {filename}"
results = Host.ssh(
hosts=hosts,
command=command,
username=username,
key=key,
processors=processors)
pprint(results)
keys = []
for result in results:
key = result.stdout.strip()
keys.append(key)
return keys
```
#### File: common/run/subprocess.py
```python
import subprocess
def run(command, shell=True):
result = subprocess.check_output(command, shell=shell)
return result.decode("utf-8")
```
#### File: cloudmesh/common/todo.py
```python
class TODO(object):
"""
class to raise an exception for code that has not yet been implemented.
import cloudmesh.common.todo
TODO.implement()
"""
@classmethod
def implement(cls, msg="Please implement"):
"""
Raises an exception as not implemented
:param msg: the message to print
:return:
"""
print(msg)
raise NotImplementedError(msg)
```
|
{
"source": "jebentancour/Bandonberry",
"score": 3
}
|
#### File: jebentancour/Bandonberry/botoneras.py
```python
from BDN_MCP23S17 import MCP23S17
import RPi.GPIO as GPIO
import rtmidi_python as rtmidi
import time
#LED_PIN = 17
#GPIO.setmode(GPIO.BCM)
#GPIO.setup(LED_PIN, GPIO.OUT)
#GPIO.output(LED_PIN, GPIO.LOW)
#led_state = False
PIN = 4
GPIO.setmode(GPIO.BCM)
GPIO.setup(PIN, GPIO.OUT)
servo = GPIO.PWM(PIN,50)
servo.start(0)
SYNTH_PORT_NAME = "FLUID"
USB_PORT_NAME = "f_midi"
LEFT_VELOCITY = 96
RIGHT_VELOCITY = 64
NOTE_ON = 0x90
NOTE_OFF = 0x80
CONTROL = 0xB0
VOLUME = 0x07
BALANCE = 0x0A
LEFT = 0x00
RIGHT = 0x7F
CENTER = 0x40
COLUMS = 6
ROWS = 8
NUM_OF_READS = 2
DEBOUNCE_DELAY = 0.001
right_notes_matrix = [[[0, 0] for x in range(ROWS)] for y in range(COLUMS)]
# <NAME>
# 0 = Abriendo, 1 = Cerrando
right_notes_matrix[0][0] = [81, 80]
right_notes_matrix[0][1] = [85, 88]
right_notes_matrix[0][2] = [79, 75]
right_notes_matrix[0][3] = [78, 80]
right_notes_matrix[0][4] = [75, 76]
right_notes_matrix[0][5] = [77, 77]
right_notes_matrix[0][6] = [63, 63]
right_notes_matrix[0][7] = [57, 58]
right_notes_matrix[1][0] = [74, 76]
right_notes_matrix[1][1] = [80, 81]
right_notes_matrix[1][2] = [83, 85]
right_notes_matrix[1][3] = [71, 73]
right_notes_matrix[1][4] = [68, 69]
right_notes_matrix[1][5] = [70, 64]
right_notes_matrix[1][6] = [65, 65]
right_notes_matrix[1][7] = [57, 57]
right_notes_matrix[2][0] = [69, 71]
right_notes_matrix[2][1] = [72, 74]
right_notes_matrix[2][2] = [76, 79]
right_notes_matrix[2][3] = [66, 67]
right_notes_matrix[2][4] = [73, 78]
right_notes_matrix[2][5] = [64, 66]
right_notes_matrix[2][6] = [59, 59]
right_notes_matrix[3][0] = [82, 70]
right_notes_matrix[3][1] = [84, 72]
right_notes_matrix[3][2] = [86, 87]
right_notes_matrix[3][3] = [67, 68]
right_notes_matrix[3][4] = [62, 61]
right_notes_matrix[3][5] = [60, 62]
right_notes_matrix[4][0] = [90, 82]
right_notes_matrix[4][1] = [88, 84]
right_notes_matrix[4][2] = [87, 86]
right_notes_matrix[4][3] = [93, 91]
right_notes_matrix[4][4] = [61, 60]
right_notes_matrix[5][0] = [92, 92]
right_notes_matrix[5][1] = [91, 90]
right_notes_matrix[5][2] = [89, 89]
right_notes_matrix[5][3] = [94, 93]
left_notes_matrix = [[[0, 0] for x in range(ROWS)] for y in range(COLUMS)]
# <NAME>
# 0 = Abriendo, 1 = Cerrando
left_notes_matrix[0][0] = [ 54, 53]
left_notes_matrix[0][1] = [ 39, 37]
left_notes_matrix[0][2] = [ 36, 41]
left_notes_matrix[0][3] = [ 63, 71]
left_notes_matrix[0][4] = [ 69, 68]
left_notes_matrix[0][5] = [ 67, 66]
left_notes_matrix[0][6] = [ 47, 52]
left_notes_matrix[0][7] = [ 38, 40]
left_notes_matrix[1][0] = [ 66, 64]
left_notes_matrix[1][1] = [ 61, 56]
left_notes_matrix[1][2] = [ 42, 47]
left_notes_matrix[1][3] = [ 62, 61]
left_notes_matrix[1][4] = [ 59, 57]
left_notes_matrix[1][5] = [ 56, 52]
left_notes_matrix[1][6] = [ 52, 45]
left_notes_matrix[2][0] = [ 48, 65]
left_notes_matrix[2][1] = [ 43, 54]
left_notes_matrix[2][2] = [ 41, 42]
left_notes_matrix[2][3] = [ 64, 62]
left_notes_matrix[2][4] = [ 60, 59]
left_notes_matrix[2][5] = [ 57, 55]
left_notes_matrix[2][6] = [ 50, 43]
left_notes_matrix[3][0] = [ 51, 60]
left_notes_matrix[3][1] = [ 65, 49]
left_notes_matrix[3][2] = [ 58, 48]
left_notes_matrix[3][3] = [ 55, 58]
left_notes_matrix[3][4] = [ 45, 50]
left_notes_matrix[3][5] = [ 40, 38]
left_notes_matrix[4][0] = [ 49, 51]
left_notes_matrix[4][1] = [ 53, 63]
left_notes_matrix[4][2] = [ 68, 67]
left_notes_matrix[4][3] = [ 46, 46]
left_notes_matrix[4][4] = [ 44, 44]
right_prev_data = [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF]
left_prev_data = [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF]
notes_playing = 0
position = -1
position_timestamp = time.time()
def set_servo_position(new_position):
global position
global position_timestamp
if new_position != position:
if new_position == 0:
# print 'servo close'
servo.ChangeDutyCycle(5)
if new_position == 1:
# print 'servo open'
servo.ChangeDutyCycle(10)
position = new_position
position_timestamp = time.time()
if time.time() > position_timestamp + 1:
#print 'servo stop'
servo.ChangeDutyCycle(0)
try:
midi_out = rtmidi.MidiOut()
port_found = False
while not port_found:
for port_name in midi_out.ports:
if SYNTH_PORT_NAME in port_name:
midi_out.open_port(port_name)
print "Puerto sintetizador encontrado"
port_found = True
for note in range(128):
midi_out.send_message([NOTE_OFF, note, 127])
midi_usb_out = rtmidi.MidiOut()
port_found = False
while not port_found:
for port_name in midi_usb_out.ports:
if USB_PORT_NAME in port_name:
midi_usb_out.open_port(port_name)
print "Puerto USB MIDI encontrado"
port_found = True
# Configuracion de canales MIDI
midi_out.send_message([CONTROL | 0x00, VOLUME, 0x00]) # Mano derecha abriendo
midi_out.send_message([CONTROL | 0x01, VOLUME, 0x00]) # Mano derecha cerrando
midi_out.send_message([CONTROL | 0x02, VOLUME, 0x00]) # Mano izquierda abriendo
midi_out.send_message([CONTROL | 0x03, VOLUME, 0x00]) # Mano izquierda cerrando
midi_out.send_message([CONTROL | 0x00, BALANCE, RIGHT])
midi_out.send_message([CONTROL | 0x01, BALANCE, RIGHT])
midi_out.send_message([CONTROL | 0x02, BALANCE, LEFT])
midi_out.send_message([CONTROL | 0x03, BALANCE, LEFT])
# MCP23S17 izquierdo
mcp1 = MCP23S17(ce=1)
mcp1.open()
mcp1.setDirPORTA(0xFF)
mcp1.setDirPORTB(0xC0)
mcp1.setPullupPORTA(0xFF)
mcp1.setPullupPORTB(0xC0)
# MCP23S17 derecho
mcp0 = MCP23S17(ce=0)
mcp0.open()
mcp0.setDirPORTA(0xFF)
mcp0.setDirPORTB(0xC0)
mcp0.setPullupPORTA(0xFF)
mcp0.setPullupPORTB(0xC0)
while (True):
#if led_state:
#GPIO.output(LED_PIN, GPIO.HIGH)
#led_state = False
#else:
#GPIO.output(LED_PIN, GPIO.LOW)
#led_state = True
left_new_data = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
right_new_data = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
valve_new_data = 0x00
for current_col in range(COLUMS):
mcp1.writePORTB(~(0x01 << current_col))
mcp0.writePORTB(~(0x01 << current_col))
for i in range(NUM_OF_READS):
left_new_data[current_col] |= mcp1.readPORTA()
time.sleep(DEBOUNCE_DELAY)
right_new_data[current_col] |= mcp0.readPORTA()
time.sleep(DEBOUNCE_DELAY)
if left_prev_data[current_col] != left_new_data[current_col]:
for current_row in range(ROWS):
new_bit = (left_new_data[current_col] >> current_row) & 0x01
prev_bit = (left_prev_data[current_col] >> current_row) & 0x01
if new_bit != prev_bit:
if new_bit:
note = left_notes_matrix[current_col][current_row]
midi_out.send_message([NOTE_OFF | 0x02, note[0], LEFT_VELOCITY])
midi_out.send_message([NOTE_OFF | 0x03, note[1], LEFT_VELOCITY])
midi_usb_out.send_message([NOTE_OFF | 0x02, note[0], 127])
midi_usb_out.send_message([NOTE_OFF | 0x03, note[1], 127])
notes_playing -= 1
#print 'left_OFF [{0}][{1}] {2}'.format(current_col, current_row, note)
else:
note = left_notes_matrix[current_col][current_row]
midi_out.send_message([NOTE_ON | 0x02, note[0], LEFT_VELOCITY])
midi_out.send_message([NOTE_ON | 0x03, note[1], LEFT_VELOCITY])
midi_usb_out.send_message([NOTE_ON | 0x02, note[0], 127])
midi_usb_out.send_message([NOTE_ON | 0x03, note[1], 127])
notes_playing += 1
#print 'left_ON [{0}][{1}] {2}'.format(current_col, current_row, note)
if right_prev_data[current_col] != right_new_data[current_col]:
for current_row in range(ROWS):
new_bit = (right_new_data[current_col] >> current_row) & 0x01
prev_bit = (right_prev_data[current_col] >> current_row) & 0x01
if new_bit != prev_bit:
if new_bit:
note = right_notes_matrix[current_col][current_row]
midi_out.send_message([NOTE_OFF | 0x00, note[0], RIGHT_VELOCITY])
midi_out.send_message([NOTE_OFF | 0x01, note[1], RIGHT_VELOCITY])
midi_usb_out.send_message([NOTE_OFF | 0x00, note[0], 127])
midi_usb_out.send_message([NOTE_OFF | 0x01, note[1], 127])
notes_playing -= 1
#print 'right_OFF [{0}][{1}] {2}'.format(current_col, current_row, note)
else:
note = right_notes_matrix[current_col][current_row]
midi_out.send_message([NOTE_ON | 0x00, note[0], RIGHT_VELOCITY])
midi_out.send_message([NOTE_ON | 0x01, note[1], RIGHT_VELOCITY])
midi_usb_out.send_message([NOTE_ON | 0x00, note[0], 127])
midi_usb_out.send_message([NOTE_ON | 0x01, note[1], 127])
notes_playing += 1
#print 'right_ON [{0}][{1}] {2}'.format(current_col, current_row, note)
if notes_playing > 0:
#print '{0} notes playing'.format(notes_playing)
set_servo_position(1)
else:
#print 'no notes_playing'
set_servo_position(0)
left_prev_data = left_new_data
right_prev_data = right_new_data
finally:
mcp1.close()
mcp0.close()
midi_out.close_port()
midi_usb_out.close_port()
servo.stop()
GPIO.cleanup()
```
|
{
"source": "jebentancour/bandonberry_simple",
"score": 2
}
|
#### File: jebentancour/bandonberry_simple/BDN_MCP23S17.py
```python
import spidev
class MCP23S17(object):
"""This class provides an abstraction of the GPIO expander MCP23S17
for the Raspberry Pi.
It is depndent on the Python packages spidev, which can
be get from https://pypi.python.org/pypi/spidev.
"""
DIR_INPUT = 1
DIR_OUTPUT = 0
PULLUP_ENABLED = 1
PULLUP_DISABLED = 0
LEVEL_LOW = 0
LEVEL_HIGH = 1
"""Register addresses (ICON.BANK = 0) as documentined in the technical data sheet at
http://ww1.microchip.com/downloads/en/DeviceDoc/21952b.pdf
"""
MCP23S17_IODIRA = 0x00
MCP23S17_IODIRB = 0x01
MCP23S17_IPOLA = 0x02
MCP23S17_IPOLB = 0x03
MCP23S17_GPIOA = 0x12
MCP23S17_GPIOB = 0x13
MCP23S17_OLATA = 0x14
MCP23S17_OLATB = 0x15
MCP23S17_IOCON = 0x0A
MCP23S17_GPPUA = 0x0C
MCP23S17_GPPUB = 0x0D
"""Bit field flags as documentined in the technical data sheet at
http://ww1.microchip.com/downloads/en/DeviceDoc/21952b.pdf
"""
IOCON_UNUSED = 0x01
IOCON_INTPOL = 0x02
IOCON_ODR = 0x04
IOCON_HAEN = 0x08
IOCON_DISSLW = 0x10
IOCON_SEQOP = 0x20
IOCON_MIRROR = 0x40
IOCON_BANK_MODE = 0x80
IOCON_INIT = 0x00 # IOCON_BANK_MODE = 0, IOCON_HAEN = 0 address pins disabled
MCP23S17_CMD_WRITE = 0x40
MCP23S17_CMD_READ = 0x41
def __init__(self, bus=0, ce=0, deviceID=0x00):
"""Constructor
Initializes all attributes with 0.
Keyword arguments:
bus -- The SPI bus number
ce -- The chip-enable number for the SPI
deviceID -- The device ID of the component, i.e., the hardware address (default 0.0)
"""
self.spi = spidev.SpiDev()
self.bus = bus
self.ce = ce
self.deviceID = deviceID
self._GPIOA = 0x00
self._GPIOB = 0x00
self._IODIRA = 0xFF
self._IODIRB = 0xFF
self._GPPUA = 0x00
self._GPPUB = 0x00
self.isInitialized = False
def open(self):
"""Initializes the MCP23S17 with hardware-address access
and sequential operations mode.
"""
self.spi.open(self.bus, self.ce)
self.spi.max_speed_hz = 10000000
self.isInitialized = True
self._writeRegister(MCP23S17.MCP23S17_IOCON, MCP23S17.IOCON_INIT)
def close(self):
"""Closes the SPI connection that the MCP23S17 component is using.
"""
self.spi.close()
self.isInitialized = False
def setPullupMode(self, pin, mode):
"""Enables or disables the pull-up mode for input pins.
Parameters:
pin -- The pin index (0 - 15)
mode -- The pull-up mode (MCP23S17.PULLUP_ENABLED, MCP23S17.PULLUP_DISABLED)
"""
assert(pin < 16)
assert((mode == MCP23S17.PULLUP_ENABLED)
or (mode == MCP23S17.PULLUP_DISABLED))
assert(self.isInitialized)
if (pin < 8):
register = MCP23S17.MCP23S17_GPPUA
data = self._GPPUA
noshifts = pin
else:
register = MCP23S17.MCP23S17_GPPUB
noshifts = pin & 0x07
data = self._GPPUB
if (mode == MCP23S17.PULLUP_ENABLED):
data |= (1 << noshifts)
else:
data &= (~(1 << noshifts))
self._writeRegister(register, data)
if (pin < 8):
self._GPPUA = data
else:
self._GPPUB = data
def setDirection(self, pin, direction):
"""Sets the direction for a given pin.
Parameters:
pin -- The pin index (0 - 15)
direction -- The direction of the pin (MCP23S17.DIR_INPUT, MCP23S17.DIR_OUTPUT)
"""
assert (pin < 16)
assert ((direction == MCP23S17.DIR_INPUT)
or (direction == MCP23S17.DIR_OUTPUT))
assert(self.isInitialized)
if (pin < 8):
register = MCP23S17.MCP23S17_IODIRA
data = self._IODIRA
noshifts = pin
else:
register = MCP23S17.MCP23S17_IODIRB
noshifts = pin & 0x07
data = self._IODIRB
if (direction == MCP23S17.DIR_INPUT):
data |= (1 << noshifts)
else:
data &= (~(1 << noshifts))
self._writeRegister(register, data)
if (pin < 8):
self._IODIRA = data
else:
self._IODIRB = data
def digitalRead(self, pin):
"""Reads the logical level of a given pin.
Parameters:
pin -- The pin index (0 - 15)
Returns:
- MCP23S17.LEVEL_LOW, if the logical level of the pin is low,
- MCP23S17.LEVEL_HIGH, otherwise.
"""
assert(self.isInitialized)
assert (pin < 16)
if (pin < 8):
self._GPIOA = self._readRegister(MCP23S17.MCP23S17_GPIOA)
if ((self._GPIOA & (1 << pin)) != 0):
return MCP23S17.LEVEL_HIGH
else:
return MCP23S17.LEVEL_LOW
else:
self._GPIOB = self._readRegister(MCP23S17.MCP23S17_GPIOB)
pin &= 0x07
if ((self._GPIOB & (1 << pin)) != 0):
return MCP23S17.LEVEL_HIGH
else:
return MCP23S17.LEVEL_LOW
def digitalWrite(self, pin, level):
"""Sets the level of a given pin.
Parameters:
pin -- The pin idnex (0 - 15)
level -- The logical level to be set (MCP23S17.LEVEL_LOW, MCP23S17.LEVEL_HIGH)
"""
assert(self.isInitialized)
assert(pin < 16)
assert((level == MCP23S17.LEVEL_HIGH) or (level == MCP23S17.LEVEL_LOW))
if (pin < 8):
register = MCP23S17.MCP23S17_GPIOA
data = self._GPIOA
noshifts = pin
else:
register = MCP23S17.MCP23S17_GPIOB
noshifts = pin & 0x07
data = self._GPIOB
if (level == MCP23S17.LEVEL_HIGH):
data |= (1 << noshifts)
else:
data &= (~(1 << noshifts))
self._writeRegister(register, data)
if (pin < 8):
self._GPIOA = data
else:
self._GPIOB = data
def setDirPORTA(self, data):
assert(self.isInitialized)
self._writeRegister(MCP23S17.MCP23S17_IODIRA, data)
self._IODIRA = data
def setDirPORTB(self, data):
assert(self.isInitialized)
self._writeRegister(MCP23S17.MCP23S17_IODIRB, data)
self._IODIRA = data
def setPullupPORTA(self, data):
assert(self.isInitialized)
self._writeRegister(MCP23S17.MCP23S17_GPPUA, data)
self._GPPUA = data
def setPullupPORTB(self, data):
assert(self.isInitialized)
self._writeRegister(MCP23S17.MCP23S17_GPPUB, data)
self._GPPUB = data
def readPORTA(self):
assert(self.isInitialized)
data = self._readRegister(MCP23S17.MCP23S17_GPIOA)
self._GPIOA = data
return data
def readPORTB(self):
assert(self.isInitialized)
data = self._readRegister(MCP23S17.MCP23S17_GPIOB)
self._GPIOB = data
return data
def writePORTA(self,data):
assert(self.isInitialized)
self._writeRegister(MCP23S17.MCP23S17_GPIOA, data)
self._GPIOA = data
def writePORTB(self,data):
assert(self.isInitialized)
self._writeRegister(MCP23S17.MCP23S17_GPIOB, data)
self._GPIOB = data
def writeGPIO(self, data):
"""Sets the data port value for all pins.
Parameters:
data - The 16-bit value to be set.
"""
assert(self.isInitialized)
self._GPIOA = (data & 0xFF)
self._GPIOB = (data >> 8)
self._writeRegisterWord(MCP23S17.MCP23S17_GPIOA, data)
def readGPIO(self):
"""Reads the data port value of all pins.
Returns:
- The 16-bit data port value
"""
assert(self.isInitialized)
data = self._readRegisterWord(MCP23S17.MCP23S17_GPIOA)
self._GPIOA = (data & 0xFF)
self._GPIOB = (data >> 8)
return data
def _writeRegister(self, register, value):
assert(self.isInitialized)
command = MCP23S17.MCP23S17_CMD_WRITE | ((self.deviceID) << 1)
self.spi.xfer2([command, register, value])
def _readRegister(self, register):
assert(self.isInitialized)
command = MCP23S17.MCP23S17_CMD_READ | ((self.deviceID) << 1)
data = self.spi.xfer2([command, register, 0])
return data[2]
def _readRegisterWord(self, register):
assert(self.isInitialized)
buffer = [0, 0]
buffer[0] = self._readRegister(register)
buffer[1] = self._readRegister(register + 1)
return ((buffer[1] << 8) | buffer[0])
def _writeRegisterWord(self, register, data):
assert(self.isInitialized)
self._writeRegister(register, data & 0xFF)
self._writeRegister(register + 1, data >> 8)
```
|
{
"source": "jeberhar/DevOpsLearner",
"score": 2
}
|
#### File: jeberhar/DevOpsLearner/AWS-Lambda-EC2AmiBackup.py
```python
import boto3
import collections
import datetime
import sys
import pprint
ec = boto3.client('ec2')
def lambda_handler(event, context):
reservations = ec.describe_instances(
Filters=[
{'Name': 'tag:Backup', 'Values': ['true', 'True']},
]
).get(
'Reservations', []
)
instances = sum(
[
[i for i in r['Instances']]
for r in reservations
], [])
print "Found %d instances that need backing up" % len(instances)
to_tag = collections.defaultdict(list)
for instance in instances:
try:
retention_days = [
int(t.get('Value')) for t in instance['Tags']
if t['Key'] == 'Retention'][0]
print "Retention tag found"
except IndexError:
print "Retention tag NOT found"
retention_days = 1
try:
instance_name = [
t.get('Value') for t in instance['Tags']
if t['Key'] == 'Name'][0]
print instance_name
print "Name tag found"
except IndexError:
print "Name tag NOT found"
instance_name = instance['InstanceId']
create_time = datetime.datetime.now()
create_fmt = create_time.strftime('%Y-%m-%d-%H-%M-%S')
AMIid = ec.create_image(InstanceId=instance['InstanceId'], Name="Lambda - " + instance_name + " from " + create_fmt, Description="Lambda created AMI of instance " + instance['InstanceId'] + " from " + create_fmt, NoReboot=True, DryRun=False)
#pprint.pprint(instance)
to_tag[retention_days].append(AMIid['ImageId'])
print "Retaining AMI %s of instance %s for %d days" % (
AMIid['ImageId'],
instance['InstanceId'],
retention_days,
)
print to_tag.keys()
for retention_days in to_tag.keys():
delete_date = datetime.date.today() + datetime.timedelta(days=retention_days)
delete_fmt = delete_date.strftime('%m-%d-%Y')
print "Will delete %d AMIs on %s" % (len(to_tag[retention_days]), delete_fmt)
ec.create_tags(
Resources=to_tag[retention_days],
Tags=[
{'Key': 'DeleteOn', 'Value': delete_fmt},
]
)
```
#### File: jeberhar/DevOpsLearner/aws_reporting.py
```python
import boto
import boto.ec2
import sys
from boto.ec2.connection import EC2Connection
import pprint
account_string = "YOUR_ACCOUNT_STRING" #change this for each AWS account
class ansi_color: #unused class due to CSV file limitations
red = '\033[31m'
green = '\033[32m'
reset = '\033[0m'
grey = '\033[1;30m'
def instance_info(i):
groups = ""
volume_info = ""
count = 1
statusDummy = "Status Checks not available"
alarmDummy = "Alarm Status not available"
#logic for instance name
if 'Name' in i.tags:
name = str(i.tags['Name'])
if name == "":
name = '!!! No name specified !!!'
else:
name = '??? Name not in attributes for instance ???'
#n = n.ljust(16)[:16]
#if i.state == 'running':
# n = ansi_color.green + n + ansi_color.reset
#else:
# n = ansi_color.red + n + ansi_color.reset
#logic for public DNS
if i.state == 'running':
pub_name = i.public_dns_name
else:
pub_name = "Machine not running - no public DNS name"
#pub_name = ansi_color.red + pub_name + ansi_color.reset
#logic for private DNS
if i.state == 'running':
priv_name = i.private_dns_name
else:
priv_name = "Machine not running - no private DNS name"
#priv_name = ansi_color.red + priv_name + ansi_color.reset
#logic for instance groups
for group_name in i.groups:
groups = groups + str(group_name.name)
if len(i.groups) > 1:
if count < len(i.groups):
groups = groups + " AND "
count = count + 1
info = account_string
info = info + "," + name
info = info + "," + i.id
info = info + "," + i.instance_type
info = info + "," + i.placement
info = info + ',' + i.state
#info = info + ',' + statusDummy
#info = info + ',' + alarmDummy
info = info + ',' + pub_name
info = info + "," + str(i.ip_address)
info = info + ',' + priv_name
info = info + "," + str(i.key_name)
info = info + "," + str(i.monitored)
info = info + "," + str(i.launch_time)
info = info + ',' + groups
#EBS reporting works but painfully slow.....
for current_volumes in volumes:
#print "\t" + str(current_volumes) + "\n"
if current_volumes.attachment_state() == 'attached':
filter = {'block-device-mapping.volume-id':current_volumes.id}
#print "Starting connection for all instances....\n"
volumesinstance = conn.get_all_instances(filters=filter)
#print "Volumes by instance: " + str(len(volumesinstance))
#print "Ending connection for all instances....\n"
ids = [z for k in volumesinstance for z in k.instances]
for s in ids:
if (i.id == s.id):
#print "SUCCESS!!"
volume_info = volume_info + str(current_volumes.id) + ',' + str(s.id) + ',' + str(current_volumes.attach_data.device) + ',' + str(current_volumes.size) + ','
info = info + ',' + volume_info
volume_info = ""
return info
def print_instance(i):
print instance_info(i)
####main program execution####
regions = sys.argv[1:]
volume_info = ""
if len(regions) == 0:
regions=['us-east-1']
if len(regions) == 1 and regions[0] == "all":
working_regions = boto.ec2.regions()
#print working_regions #DEBUG: uncomment to view all the regions that will be searched for "all"
else:
working_regions = [ boto.ec2.get_region(x) for x in regions ]
for current_working_region in working_regions:
print "\n================"
print current_working_region.name
print "================"
print "Account Name,Instance Name,Instance ID,Instance Type,Availability Zone,Instance State,Public DNS,Public IP,Private DNS,Key Name,Monitoring,Launch Time,Security Groups,Attached Volume ID,Attached Volume Instance ID,Mounted Device Name,Attached Volume Size"
try:
conn = boto.connect_ec2(region = current_working_region)
#conn = EC2Connection() #same as boto.connect_ec2()
reservations = conn.get_all_instances()
volumes = conn.get_all_volumes()
#print "Volumes array has length of: " + str(len(volumes))
instances = [i for r in reservations for i in r.instances]
#pp = pprint.PrettyPrinter(indent=4)
for r in reservations:
for i in r.instances:
#pp.pprint(i.__dict__)
print_instance(i)
#print_ebs_info(i)
except boto.exception.EC2ResponseError:
print "ERROR -- Could not connect to " + current_working_region.name
pass
```
|
{
"source": "jebibault/ProstateCancerSurvival",
"score": 2
}
|
#### File: jebibault/ProstateCancerSurvival/index.py
```python
from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
from app import app, server
from tabs import intro, predictCSS
style = {
'maxWidth': '900px',
'margin': 'auto'}
app.title = 'Prostate cancer survival prediction with interpretable AI'
app.layout = html.Div([
html.A([html.Img(src='https://github.com/jebibault/ProstateCancerSurvival/blob/master/figures/logo.png?raw=true', style={'width' : '100%', 'margin-bottom': '15px', 'margin-top': '25px'})], href='http://med.stanford.edu/xinglab.html', target='_blank'),
dcc.Markdown("## Predict prostate cancer survival with interpretable AI"),
html.P([
'This model allows you to predict the risk to die from prostate cancer within 10 years from diagnosis.',
html.Br(),
html.Br(),
html.Br()]),
dcc.Tabs(id='tabs', value='tab-intro', parent_className='custom-tabs', className='custom-tabs-container', children=[
dcc.Tab(label='About', value='tab-intro', className='custom-tab', selected_className='custom-tab--selected'),
dcc.Tab(label='Predict', value='tab-predictCSS', className='custom-tab', selected_className='custom-tab--selected'),
]),
html.Div(id='tabs-content-classes'),
], style=style)
@app.callback(Output('tabs-content-classes', 'children'),
[Input('tabs', 'value')])
def render_content(tab):
if tab == 'tab-intro': return intro.layout
elif tab == 'tab-predictCSS': return predictCSS.layout
if __name__ == '__main__':
app.run_server(debug=True)
```
|
{
"source": "JebKerman86/Atom-Parser",
"score": 3
}
|
#### File: JebKerman86/Atom-Parser/graph.py
```python
"""
>>> n = Node(5)
>>> p = Node(6)
>>> q = Node(7)
>>> n.add_child(p)
>>> n.add_child(q)
>>> n.children
[<__main__.Node object at 0x02877FF0>, <__main__.Node object at 0x02877F90>]
>>> for c in n.children:
... print c.data
"""
"""
import networkx as nx
import matplotlib.pyplot as plt
def generate_graph(generations):
G = nx.Graph()
cntct_chain = []
for gen in generations:
cntct_chain.append(gen[0])
for bn_idx, bn in enumerate(cntct_chain):
#print("bn" + str(bn))
G.add_node(bn_idx, atom_idx = bn[0].tolist())
#e=[('a','b',0.3),('b','c',0.9),('a','c',0.5),('c','d',1.2)]
#G.add_weighted_edges_from(e)
return G
def plot_graph(G):
nodes = G.nodes()
print(nodes)
pos=nx.spring_layout(G)
nx.draw_networkx(G, pos=pos, node_size=100, font_size = 14, with_labels = True)
labels={}
for node in nodes:
labels[node] = str(G.node[node]["atom_idx"])
for p in pos:
pos[p] = [pos[p][0],pos[p][1]-0.1]
nx.draw_networkx_labels(G,pos,labels,font_size=16, font_color = "b")
plt.axis('off')
plt.savefig("labels_and_colors.png") # save as png
plt.show() # display
"""
```
|
{
"source": "jeblair/kazoo",
"score": 3
}
|
#### File: kazoo/kazoo/hosts.py
```python
from six.moves import urllib_parse
def collect_hosts(hosts):
"""Collect a set of hosts and an optional chroot from a string."""
host_ports, chroot = hosts.partition("/")[::2]
chroot = "/" + chroot if chroot else None
result = []
for host_port in host_ports.split(","):
# put all complexity of dealing with
# IPv4 & IPv6 address:port on the urlsplit
res = urllib_parse.urlsplit("xxx://" + host_port)
host = res.hostname
if host is None:
raise ValueError("bad hostname")
port = int(res.port) if res.port else 2181
result.append((host.strip(), port))
return result, chroot
```
|
{
"source": "jeblair/salt",
"score": 3
}
|
#### File: salt/grains/core.py
```python
import os
import socket
import sys
import re
import platform
import salt.utils
# Solve the Chicken and egg problem where grains need to run before any
# of the modules are loaded and are generally available for any usage.
import salt.modules.cmdmod
__salt__ = {'cmd.run': salt.modules.cmdmod._run_quiet}
def _kernel():
'''
Return the kernel type
'''
# Provides:
# kernel
grains = {}
grains['kernel'] = __salt__['cmd.run']('uname -s').strip()
if grains['kernel'] == 'aix':
grains['kernelrelease'] = __salt__['cmd.run']('oslevel -s').strip()
else:
grains['kernelrelease'] = __salt__['cmd.run']('uname -r').strip()
if 'kernel' not in grains:
grains['kernel'] = 'Unknown'
if not grains['kernel']:
grains['kernel'] = 'Unknown'
return grains
def _windows_cpudata():
'''
Return the cpu information for Windows systems architecture
'''
# Provides:
# cpuarch
# num_cpus
# cpu_model
grains = {}
grains['cpuarch'] = platform.machine()
if 'NUMBER_OF_PROCESSORS' in os.environ:
grains['num_cpus'] = os.environ['NUMBER_OF_PROCESSORS']
grains['cpu_model'] = platform.processor()
return grains
def _linux_cpudata():
'''
Return the cpu information for Linux systems architecture
'''
# Provides:
# cpuarch
# num_cpus
# cpu_model
# cpu_flags
grains = {}
cpuinfo = '/proc/cpuinfo'
# Grab the Arch
arch = __salt__['cmd.run']('uname -m').strip()
grains['cpuarch'] = arch
# Some systems such as Debian don't like uname -m
# so fallback gracefully to the processor type
if not grains['cpuarch'] or grains['cpuarch'] == 'unknown':
arch = __salt__['cmd.run']('uname -p')
grains['cpuarch'] = arch
if not grains['cpuarch'] or grains['cpuarch'] == 'unknown':
arch = __salt__['cmd.run']('uname -i')
grains['cpuarch'] = arch
if not grains['cpuarch'] or grains['cpuarch'] == 'unknown':
grains['cpuarch'] = 'Unknown'
# Parse over the cpuinfo file
if os.path.isfile(cpuinfo):
for line in open(cpuinfo, 'r').readlines():
comps = line.split(':')
if not len(comps) > 1:
continue
if comps[0].strip() == 'processor':
grains['num_cpus'] = int(comps[1].strip()) + 1
elif comps[0].strip() == 'model name':
grains['cpu_model'] = comps[1].strip()
elif comps[0].strip() == 'flags':
grains['cpu_flags'] = comps[1].split()
if 'num_cpus' not in grains:
grains['num_cpus'] = 0
if 'cpu_model' not in grains:
grains['cpu_model'] = 'Unknown'
if 'cpu_flags' not in grains:
grains['cpu_flags'] = []
return grains
def _freebsd_cpudata():
'''
Return cpu information for FreeBSD systems
'''
grains = {}
sysctl = salt.utils.which('sysctl')
if sysctl:
machine_cmd = '{0} -n hw.machine'.format(sysctl)
ncpu_cmd = '{0} -n hw.ncpu'.format(sysctl)
model_cpu = '{0} -n hw.model'.format(sysctl)
grains['num_cpus'] = __salt__['cmd.run'](ncpu_cmd).strip()
grains['cpu_model'] = __salt__['cmd.run'](model_cpu).strip()
grains['cpuarch'] = __salt__['cmd.run'](machine_cmd).strip()
grains['cpu_flags'] = []
return grains
def _memdata(osdata):
'''
Gather information about the system memory
'''
# Provides:
# mem_total
grains = {'mem_total': 0}
if osdata['kernel'] == 'Linux':
meminfo = '/proc/meminfo'
if os.path.isfile(meminfo):
for line in open(meminfo, 'r').readlines():
comps = line.split(':')
if not len(comps) > 1:
continue
if comps[0].strip() == 'MemTotal':
grains['mem_total'] = int(comps[1].split()[0]) / 1024
elif osdata['kernel'] in ('FreeBSD','OpenBSD'):
sysctl = salt.utils.which('sysctl')
if sysctl:
mem = __salt__['cmd.run']('{0} -n hw.physmem'.format(sysctl)).strip()
grains['mem_total'] = str(int(mem) / 1024 / 1024)
elif osdata['kernel'] == 'Windows':
for line in __salt__['cmd.run']('SYSTEMINFO /FO LIST').split('\n'):
comps = line.split(':')
if not len(comps) > 1:
continue
if comps[0].strip() == 'Total Physical Memory':
grains['mem_total'] = int(comps[1].split()[0].replace(',', ''))
break
return grains
def _virtual(osdata):
'''
Returns what type of virtual hardware is under the hood, kvm or physical
'''
# This is going to be a monster, if you are running a vm you can test this
# grain with please submit patches!
# Provides:
# virtual
grains = {'virtual': 'physical'}
lspci = salt.utils.which('lspci')
dmidecode = salt.utils.which('dmidecode')
if dmidecode:
output = __salt__['cmd.run']('dmidecode')
# Product Name: VirtualBox
if 'Vendor: QEMU' in output:
# FIXME: Make this detect between kvm or qemu
grains['virtual'] = 'kvm'
elif 'VirtualBox' in output:
grains['virtual'] = 'VirtualBox'
# Product Name: VMware Virtual Platform
elif 'VMware' in output:
grains['virtual'] = 'VMware'
# Manufacturer: Microsoft Corporation
# Product Name: Virtual Machine
elif 'Manufacturer: Microsoft' in output and 'Virtual Machine' in output:
grains['virtual'] = 'VirtualPC'
# Fall back to lspci if dmidecode isn't available
elif lspci:
model = __salt__['cmd.run']('lspci').lower()
if 'vmware' in model:
grains['virtual'] = 'VMware'
# 00:04.0 System peripheral: InnoTek Systemberatung GmbH VirtualBox Guest Service
elif 'virtualbox' in model:
grains['virtual'] = 'VirtualBox'
elif 'qemu' in model:
grains['virtual'] = 'kvm'
choices = ('Linux', 'OpenBSD', 'SunOS', 'HP-UX')
isdir = os.path.isdir
if osdata['kernel'] in choices:
if isdir('/proc/vz'):
if os.path.isfile('/proc/vz/version'):
grains['virtual'] = 'openvzhn'
else:
grains['virtual'] = 'openvzve'
elif isdir('/proc/sys/xen') or isdir('/sys/bus/xen') or isdir('/proc/xen'):
if os.path.isfile('/proc/xen/xsd_kva'):
# Tested on CentOS 5.3 / 2.6.18-194.26.1.el5xen
# Tested on CentOS 5.4 / 2.6.18-164.15.1.el5xen
grains['virtual_subtype'] = 'Xen Dom0'
else:
if grains.get('productname', '') == 'HVM domU':
# Requires dmidecode!
grains['virtual_subtype'] = 'Xen HVM DomU'
elif os.path.isfile('/proc/xen/capabilities') and os.access('/proc/xen/capabilities', os.R_OK):
caps = open('/proc/xen/capabilities')
if 'control_d' not in caps.read():
# Tested on CentOS 5.5 / 2.6.18-194.3.1.el5xen
grains['virtual_subtype'] = 'Xen PV DomU'
else:
# Shouldn't get to this, but just in case
grains['virtual_subtype'] = 'Xen Dom0'
caps.close()
# Tested on Fedora 10 / 2.6.27.30-170.2.82 with xen
# Tested on Fedora 15 / 2.6.41.4-1 without running xen
elif isdir('/sys/bus/xen'):
if 'xen' in __salt__['cmd.run']('dmesg').lower():
grains['virtual_subtype'] = 'Xen PV DomU'
elif os.listdir('/sys/bus/xen/drivers'):
# An actual DomU will have several drivers
# whereas a paravirt ops kernel will not.
grains['virtual_subtype'] = 'Xen PV DomU'
# If a Dom0 or DomU was detected, obviously this is xen
if 'dom' in grains.get('virtual_subtype', '').lower():
grains['virtual'] = 'xen'
elif isdir('/.SUNWnative'):
grains['virtual'] = 'zone'
elif os.path.isfile('/proc/cpuinfo'):
if 'QEMU Virtual CPU' in open('/proc/cpuinfo', 'r').read():
grains['virtual'] = 'kvm'
elif osdata['kernel'] == 'FreeBSD':
sysctl = salt.utils.which('sysctl')
if sysctl:
model = __salt__['cmd.run']('{0} hw.model'.format(sysctl)).strip()
jail = __salt__['cmd.run']('{0} security.jail.jailed'.format(sysctl)).strip()
if jail:
grains['virtual_subtype'] = 'jail'
if 'QEMU Virtual CPU' in model:
grains['virtual'] = 'kvm'
return grains
def _ps(osdata):
'''
Return the ps grain
'''
grains = {}
bsd_choices = ('FreeBSD', 'NetBSD', 'OpenBSD', 'Darwin')
if osdata['os'] in bsd_choices:
grains['ps'] = 'ps auxwww'
elif osdata['os'] == 'Windows':
grains['ps'] = 'tasklist.exe'
elif osdata.get('virtual', '') == 'openvzhn':
grains['ps'] = 'vzps -E 0 -efH|cut -b 6-'
else:
grains['ps'] = 'ps -efH'
return grains
def _linux_platform_data(osdata):
'''
The platform module is very smart about figuring out linux distro
information. Instead of re-inventing the wheel, lets use it!
'''
# Provides:
# osrelease
# oscodename
grains = {}
(osname, osrelease, oscodename) = platform.dist()
if 'os' not in osdata and osname:
grains['os'] = osname
if osrelease:
grains['osrelease'] = osrelease
if oscodename:
grains['oscodename'] = oscodename
return grains
def _windows_platform_data(osdata):
'''
Use the platform module for as much as we can.
'''
# Provides:
# osrelease
# osversion
# osmanufacturer
# manufacturer
# productname
# biosversion
# osfullname
# inputlocale
# timezone
# windowsdomain
grains = {}
(osname, hostname, osrelease, osversion, machine, processor) = platform.uname()
if 'os' not in osdata and osname:
grains['os'] = osname
if osrelease:
grains['osrelease'] = osrelease
if osversion:
grains['osversion'] = osversion
get_these_grains = {
'OS Manufacturer': 'osmanufacturer',
'System Manufacturer': 'manufacturer',
'System Model': 'productname',
'BIOS Version': 'biosversion',
'OS Name': 'osfullname',
'Input Locale': 'inputlocale',
'Time Zone': 'timezone',
'Domain': 'windowsdomain',
}
systeminfo = __salt__['cmd.run']('SYSTEMINFO')
for line in systeminfo.split('\n'):
comps = line.split(':', 1)
if not len(comps) > 1:
continue
item = comps[0].strip()
value = comps[1].strip()
if item in get_these_grains:
grains[get_these_grains[item]] = value
return grains
def os_data():
'''
Return grains pertaining to the operating system
'''
grains = {}
if 'os' in os.environ:
if os.environ['os'].startswith('Windows'):
grains['os'] = 'Windows'
grains['kernel'] = 'Windows'
grains.update(_memdata(grains))
grains.update(_windows_platform_data(grains))
grains.update(_windows_cpudata())
grains.update(_ps(grains))
return grains
grains.update(_kernel())
if grains['kernel'] == 'Linux':
# Add lsb grains on any distro with lsb-release
if os.path.isfile('/etc/lsb-release'):
for line in open('/etc/lsb-release').readlines():
# Matches any possible format:
# DISTRIB_ID="Ubuntu"
# DISTRIB_ID='Mageia'
# DISTRIB_ID=Fedora
# DISTRIB_RELEASE='10.10'
# DISTRIB_CODENAME='squeeze'
# DISTRIB_DESCRIPTION='Ubuntu 10.10'
regex = re.compile('^(DISTRIB_(?:ID|RELEASE|CODENAME|DESCRIPTION))=(?:\'|")?([\w\s\.-_]+)(?:\'|")?')
match = regex.match(line)
if match:
# Adds: lsb_distrib_{id,release,codename,description}
grains['lsb_{0}'.format(match.groups()[0].lower())] = match.groups()[1].rstrip()
if os.path.isfile('/etc/arch-release'):
grains['os'] = 'Arch'
elif os.path.isfile('/etc/debian_version'):
grains['os'] = 'Debian'
if 'lsb_distrib_id' in grains:
if 'Ubuntu' in grains['lsb_distrib_id']:
grains['os'] = 'Ubuntu'
elif os.path.isfile('/etc/issue.net') and \
'Ubuntu' in open('/etc/issue.net').readline():
grains['os'] = 'Ubuntu'
elif os.path.isfile('/etc/gentoo-release'):
grains['os'] = 'Gentoo'
elif os.path.isfile('/etc/fedora-release'):
grains['os'] = 'Fedora'
elif os.path.isfile('/etc/mandriva-version'):
grains['os'] = 'Mandriva'
elif os.path.isfile('/etc/mandrake-version'):
grains['os'] = 'Mandrake'
elif os.path.isfile('/etc/mageia-version'):
grains['os'] = 'Mageia'
elif os.path.isfile('/etc/meego-version'):
grains['os'] = 'MeeGo'
elif os.path.isfile('/etc/vmware-version'):
grains['os'] = 'VMWareESX'
elif os.path.isfile('/etc/bluewhite64-version'):
grains['os'] = 'Bluewhite64'
elif os.path.isfile('/etc/slamd64-version'):
grains['os'] = 'Slamd64'
elif os.path.isfile('/etc/slackware-version'):
grains['os'] = 'Slackware'
elif os.path.isfile('/etc/enterprise-release'):
if os.path.isfile('/etc/ovs-release'):
grains['os'] = 'OVS'
else:
grains['os'] = 'OEL'
elif os.path.isfile('/etc/redhat-release'):
data = open('/etc/redhat-release', 'r').read()
if 'centos' in data.lower():
grains['os'] = 'CentOS'
elif 'scientific' in data.lower():
grains['os'] = 'Scientific'
else:
grains['os'] = 'RedHat'
elif os.path.isfile('/etc/SuSE-release'):
data = open('/etc/SuSE-release', 'r').read()
if 'SUSE LINUX Enterprise Server' in data:
grains['os'] = 'SLES'
elif 'SUSE LINUX Enterprise Desktop' in data:
grains['os'] = 'SLED'
elif 'openSUSE' in data:
grains['os'] = 'openSUSE'
else:
grains['os'] = 'SUSE'
# Use the already intelligent platform module to get distro info
grains.update(_linux_platform_data(grains))
# If the Linux version can not be determined
if not 'os' in grains:
grains['os'] = 'Unknown {0}'.format(grains['kernel'])
elif grains['kernel'] == 'sunos':
grains['os'] = 'Solaris'
elif grains['kernel'] == 'VMkernel':
grains['os'] = 'ESXi'
elif grains['kernel'] == 'Darwin':
grains['os'] = 'MacOS'
grains.update(_freebsd_cpudata())
else:
grains['os'] = grains['kernel']
if grains['kernel'] == 'Linux':
grains.update(_linux_cpudata())
elif grains['kernel'] in ('FreeBSD', 'OpenBSD'):
# _freebsd_cpudata works on OpenBSD as well.
grains.update(_freebsd_cpudata())
grains.update(_memdata(grains))
# Get the hardware and bios data
grains.update(_hw_data(grains))
# Load the virtual machine info
grains.update(_virtual(grains))
grains.update(_ps(grains))
return grains
def hostname():
'''
Return fqdn, hostname, domainname
'''
# This is going to need some work
# Provides:
# fqdn
# host
# localhost
# domain
grains = {}
grains['fqdn'] = socket.getfqdn()
comps = grains['fqdn'].split('.')
grains['host'] = comps[0]
grains['localhost'] = socket.gethostname()
if len(comps) > 1:
grains['domain'] = '.'.join(comps[1:])
else:
grains['domain'] = ''
return grains
def path():
'''
Return the path
'''
# Provides:
# path
return {'path': os.environ['PATH'].strip()}
def pythonversion():
'''
Return the Python version
'''
# Provides:
# pythonversion
return {'pythonversion': list(sys.version_info)}
def pythonpath():
'''
Return the Python path
'''
# Provides:
# pythonpath
return {'pythonpath': sys.path}
def saltpath():
'''
Return the path of the salt module
'''
# Provides:
# saltpath
path = os.path.abspath(os.path.join(__file__, os.path.pardir))
return {'saltpath': os.path.dirname(path)}
def saltversion():
'''
Return the version of salt
'''
# Provides:
# saltversion
from salt import __version__
return {'saltversion': __version__}
# Relatively complex mini-algorithm to iterate over the various
# sections of dmidecode output and return matches for specific
# lines containing data we want, but only in the right section.
def _dmidecode_data(regex_dict):
'''
Parse the output of dmidecode in a generic fashion that can
be used for the multiple system types which have dmidecode.
'''
# NOTE: This function might gain support for smbios instead
# of dmidecode when salt gets working Solaris support
ret = {}
# No use running if dmidecode isn't in the path
if not salt.utils.which('dmidecode'):
return ret
out = __salt__['cmd.run']('dmidecode')
for section in regex_dict:
section_found = False
# Look at every line for the right section
for line in out.split('\n'):
if not line: continue
# We've found it, woohoo!
if re.match(section, line):
section_found = True
continue
if not section_found:
continue
# Now that a section has been found, find the data
for item in regex_dict[section]:
# Examples:
# Product Name: 64639SU
# Version: 7LETC1WW (2.21 )
regex = re.compile('\s+{0}\s+(.*)$'.format(item))
grain = regex_dict[section][item]
# Skip to the next iteration if this grain
# has been found in the dmidecode output.
if grain in ret: continue
match = regex.match(line)
# Finally, add the matched data to the grains returned
if match:
ret[grain] = match.group(1).strip()
return ret
def _hw_data(osdata):
'''
Get system specific hardware data from dmidecode
Provides
biosversion
productname
manufacturer
serialnumber
biosreleasedate
.. versionadded:: 0.9.5
'''
grains = {}
# TODO: *BSD dmidecode output
if osdata['kernel'] == 'Linux':
linux_dmi_regex = {
'BIOS [Ii]nformation': {
'[Vv]ersion:': 'biosversion',
'[Rr]elease [Dd]ate:': 'biosreleasedate',
},
'[Ss]ystem [Ii]nformation': {
'Manufacturer:': 'manufacturer',
'Product(?: Name)?:': 'productname',
'Serial Number:': 'serialnumber',
},
}
grains.update(_dmidecode_data(linux_dmi_regex))
return grains
def get_server_id():
'''
Provides an integer based on the FQDN of a machine.
Useful as server-id in MySQL replication or anywhere else you'll need an ID like this.
'''
# Provides:
# server_id
return { 'server_id': abs(hash(__opts__['id']) % 2**31) }
```
#### File: salt/modules/django.py
```python
import os
def _get_django_admin(bin_env):
if not bin_env:
da = 'django-admin.py'
else:
# try to get pip bin from env
if os.path.exists(os.path.join(bin_env, 'bin', 'django-admin.py')):
da = os.path.join(bin_env, 'bin', 'django-admin.py')
else:
da = bin_env
return da
def command(settings_module,
command,
bin_env=None,
pythonpath=None,
*args, **kwargs):
"""
run arbitrary django management command
"""
da = _get_django_admin(bin_env)
cmd = "{0} {1} --settings={2}".format(da, command, settings_module)
if pythonpath:
cmd = "{0} --pythonpath={1}".format(cmd, pythonpath)
for arg in args:
cmd = "{0} --{1}".format(cmd, arg)
for key, value in kwargs.iteritems():
if not key.startswith("__"):
cmd = '{0} --{1}={2}'.format(cmd, key, value)
return __salt__['cmd.run'](cmd)
def syncdb(settings_module,
bin_env=None,
migrate=False,
database=None,
pythonpath=None):
"""
run syncdb
if you have south installed, you can pass in the optional
``migrate`` kwarg and run the migrations after the syncdb
finishes.
"""
da = _get_django_admin(bin_env)
cmd = "{0} syncdb --settings={1}".format(da, settings_module)
if migrate:
cmd = "{0} --migrate".format(cmd)
if database:
cmd = "{0} --database={1}".format(cmd, database)
if pythonpath:
cmd = "{0} --pythonpath={1}".format(cmd, pythonpath)
return __salt__['cmd.run'](cmd)
def createsuperuser(settings_module,
username,
email,
bin_env=None,
database=None,
pythonpath=None):
"""
create a super user for the database.
this defaults to use the ``--noinput`` flag which will
not create a password for the superuser.
"""
da = _get_django_admin(bin_env)
cmd = "{0} createsuperuser --settings={1} --noinput --email='{2}' --username={3}".format(
da, settings_module, email, username)
if database:
cmd = "{0} --database={1}".format(cmd, database)
if pythonpath:
cmd = "{0} --pythonpath={1}".format(cmd, pythonpath)
return __salt__['cmd.run'](cmd)
def loaddata(settings_module,
fixtures,
bin_env=None,
database=None,
pythonpath=None):
"""
load fixture data
fixtures:
comma separated list of fixtures to load
"""
da = _get_django_admin(bin_env)
cmd = "{0} loaddata --settings={1} {2}".format(
da, settings_module, " ".join(fixtures.split(",")))
if database:
cmd = "{0} --database={1}".format(cmd, database)
if pythonpath:
cmd = "{0} --pythonpath={1}".format(cmd, pythonpath)
return __salt__['cmd.run'](cmd)
def collectstatic(settings_module,
bin_env=None,
no_post_process=False,
ignore=None,
dry_run=False,
clear=False,
link=False,
no_default_ignore=False,
pythonpath=None):
da = _get_django_admin(bin_env)
cmd = "{0} collectstatic --settings={1} --noinput".format(
da, settings_module)
if no_post_process:
cmd = "{0} --no-post-process".format(cmd)
if ignore:
cmd = "{0} --ignore=".format(cmd, ignore)
if dry_run:
cmd = "{0} --dry-run".format(cmd)
if clear:
cmd = "{0} --clear".format(cmd)
if link:
cmd = "{0} --link".format(cmd)
if no_default_ignore:
cmd = "{0} --no-default-ignore".format(cmd)
if pythonpath:
cmd = "{0} --pythonpath={1}".format(cmd, pythonpath)
return __salt__['cmd.run'](cmd)
```
#### File: salt/modules/ssh.py
```python
import os
import re
def _refine_enc(enc):
'''
Return the properly formatted ssh value for the authorized encryption key
type. ecdsa defaults to 256 bits, must give full ecdsa enc schema string if
using higher enc. If the type is not found, return ssh-rsa, the ssh default.
'''
rsa = ['r', 'rsa', 'ssh-rsa']
dss = ['d', 'dsa', 'dss', 'ssh-dss']
ecdsa = ['e', 'ecdsa', 'ecdsa-sha2-nistp521', 'ecdsa-sha2-nistp384',
'ecdsa-sha2-nistp256']
if enc in rsa:
return 'ssh-rsa'
elif enc in dss:
return 'ssh-dss'
elif enc in ecdsa:
# ecdsa defaults to ecdsa-sha2-nistp256
# otherwise enc string is actual encoding string
if enc in ['e', 'ecdsa']:
return 'ecdsa-sha2-nistp256'
return enc
else:
return 'ssh-rsa'
def _format_auth_line(
key,
enc,
comment,
options):
'''
Properly format user input.
'''
line = ''
if options:
line += '{0} '.format(','.join(options))
line += '{0} {1} {2}\n'.format(enc, key, comment)
return line
def _replace_auth_key(
user,
key,
enc='ssh-rsa',
comment='',
options=[],
config='.ssh/authorized_keys'):
'''
Replace an existing key
'''
auth_line = _format_auth_line(
key,
enc,
comment,
options)
lines = []
uinfo = __salt__['user.info'](user)
full = os.path.join(uinfo['home'], config)
for line in open(full, 'r').readlines():
if line.startswith('#'):
# Commented Line
lines.append(line)
continue
comps = line.split()
if len(comps) < 2:
# Not a valid line
lines.append(line)
continue
key_ind = 1
if comps[0][:4:] not in ['ssh-', 'ecds']:
key_ind = 2
if comps[key_ind] == key:
lines.append(auth_line)
else:
lines.append(line)
open(full, 'w+').writelines(lines)
def _validate_keys(key_file):
'''
Return a dict containing validated keys in the passed file
'''
ret = {}
linere = re.compile(r'^(.*?)\s?((?:ssh\-|ecds).+)$')
try:
for line in open(key_file, 'r').readlines():
if line.startswith('#'):
# Commented Line
continue
# get "{options} key"
ln = re.search(linere, line)
if not ln:
# not an auth ssh key, perhaps a blank line
continue
opts = ln.group(1)
comps = ln.group(2).split()
if len(comps) < 2:
# Not a valid line
continue
if opts:
# It has options, grab them
options = opts.split(',')
else:
options = []
enc = comps[0]
key = comps[1]
comment = ' '.join(comps[2:])
ret[key] = {'enc': enc,
'comment': comment,
'options': options}
except IOError:
return {}
return ret
def host_keys(keydir=None):
'''
Return the minion's host keys
CLI Example::
salt '*' ssh.host_keys
'''
# Set up the default keydir - needs to support sshd_config parsing in the
# future
if not keydir:
if __grains__['kernel'] == 'Linux':
keydir = '/etc/ssh'
keys = {}
for fn_ in os.listdir(keydir):
if fn_.startswith('ssh_host_'):
top = fn_.split('.')
comps = fn_.split('_')
kname = comps[2]
if len(top) > 1:
kname += '.{0}'.format(top[1])
try:
keys[kname] = open(os.path.join(keydir, fn_), 'r').read()
except:
keys[kname] = ''
return keys
def auth_keys(user, config='.ssh/authorized_keys'):
'''
Return the authorized keys for the specified user
CLI Example::
salt '*' ssh.auth_keys root
'''
ret = {}
uinfo = __salt__['user.info'](user)
full = os.path.join(uinfo['home'], config)
if not os.path.isfile(full):
return {}
return _validate_keys(full)
def check_key_file(user, keysource, config='.ssh/authorized_keys'):
'''
Check a keyfile from a source destination against the local keys and
return the keys to change
'''
ret = {}
keyfile = __salt__['cp.cache_file'](keysource)
if not keyfile:
return ret
s_keys = _validate_keys(keyfile)
for key in s_keys:
ret[key] = check_key(
user,
key,
s_keys['enc'],
s_keys['comment'],
s_keys['options'],
config)
return ret
def check_key(user, key, enc, comment, options, config='.ssh/authorized_keys'):
'''
Check to see if a key needs updating, returns "update", "add" or "exists"
CLI Example::
salt '*' ssh.check_key <user> <key>
'''
current = auth_keys(user, config)
nline = _format_auth_line(key, enc, comment, options)
if key in current:
cline = _format_auth_line(
key,
current[key]['enc'],
current[key]['comment'],
current[key]['options'])
if cline != nline:
return 'update'
else:
return 'add'
return 'exists'
def rm_auth_key(user, key, config='.ssh/authorized_keys'):
'''
Remove an authorized key from the specified user's authorized key file
CLI Example::
salt '*' ssh.rm_auth_key <user> <key>
'''
current = auth_keys(user, config)
linere = re.compile(r'^(.*?)\s?((?:ssh\-|ecds).+)$')
if key in current:
# Remove the key
uinfo = __salt__['user.info'](user)
full = os.path.join(uinfo['home'], config)
if not os.path.isfile(full):
return 'User authorized keys file not present'
lines = []
for line in open(full, 'r').readlines():
if line.startswith('#'):
# Commented Line
lines.append(line)
continue
# get "{options} key"
ln = re.search(linere, line)
if not ln:
# not an auth ssh key, perhaps a blank line
continue
opts = ln.group(1)
comps = ln.group(2).split()
if len(comps) < 2:
# Not a valid line
lines.append(line)
continue
if opts:
# It has options, grab them
options = opts.split(',')
else:
options = []
pkey = comps[1]
if pkey == key:
continue
else:
lines.append(line)
open(full, 'w+').writelines(lines)
return 'Key removed'
return 'Key not present'
def set_auth_key_from_file(
user,
source,
config='.ssh/authorized_keys'):
'''
Add a key to the authorized_keys file, using a file as the source.
CLI Example::
salt '*' ssh.set_auth_key_from_file <user> salt://ssh_keys/<user>.id_rsa.pub
'''
# TODO: add support for pulling keys from other file sources as well
lfile = __salt__['cp.cache_file'](source)
if not os.path.isfile(lfile):
return 'fail'
newkey = {}
rval = ''
newkey = _validate_keys(lfile)
for k in newkey.keys():
rval += set_auth_key(user, k, newkey[k]['enc'], newkey[k]['comment'], newkey[k]['options'], config)
# Due to the ability for a single file to have multiple keys, it's possible for a single call
# to this function to have both "replace" and "new" as possible valid returns. I ordered the
# following as I thought best.
if 'fail' in rval:
return 'fail'
elif 'replace' in rval:
return 'replace'
elif 'new' in rval:
return 'new'
else:
return 'no change'
def set_auth_key(
user,
key,
enc='ssh-rsa',
comment='',
options=[],
config='.ssh/authorized_keys'):
'''
Add a key to the authorized_keys file
CLI Example::
salt '*' ssh.set_auth_key <user> <key> dsa 'my key' '[]' .ssh/authorized_keys
'''
if len(key.split()) > 1:
return 'invalid'
enc = _refine_enc(enc)
uinfo = __salt__['user.info'](user)
status = check_key(user, key, enc, comment, options, config)
if status == 'update':
_replace_auth_key(
user,
key,
enc,
comment,
options,
config)
return 'replace'
elif status == 'exists':
return 'no change'
else:
auth_line = _format_auth_line(
key,
enc,
comment,
options)
if not os.path.isdir(uinfo['home']):
return 'fail'
fconfig = os.path.join(uinfo['home'], config)
if not os.path.isdir(os.path.dirname(fconfig)):
dpath = os.path.dirname(fconfig)
os.makedirs(dpath)
os.chown(dpath, uinfo['uid'], uinfo['gid'])
os.chmod(dpath, 448)
if not os.path.isfile(fconfig):
open(fconfig, 'a+').write('{0}'.format(auth_line))
os.chown(fconfig, uinfo['uid'], uinfo['gid'])
os.chmod(fconfig, 384)
else:
open(fconfig, 'a+').write('{0}'.format(auth_line))
return 'new'
```
#### File: salt/modules/win_pkg.py
```python
try:
import win32com.client
import pythoncom
except:
pass
def __virtual__():
'''
Set the virtual pkg module if the os is Windows
'''
return 'pkg' if __grains__['os'] == 'Windows' else False
def _list_removed(old, new):
'''
List the packages which have been removed between the two package objects
'''
pkgs = []
for pkg in old:
if pkg not in new:
pkgs.append(pkg)
return pkgs
def available_version(name):
'''
The available version of the package in the repository
CLI Example::
salt '*' pkg.available_version <package name>
'''
return 'Not implemented on Windows yet'
def upgrade_available(name):
'''
Check whether or not an upgrade is available for a given package
CLI Example::
salt '*' pkg.upgrade_available <package name>
'''
return 'Not implemented on Windows yet'
def list_upgrades():
'''
List all available package upgrades on this system
CLI Example::
salt '*' pkg.list_upgrades
'''
return 'Not implemented on Windows yet'
def version(name):
'''
Returns a version if the package is installed, else returns an empty string
CLI Example::
salt '*' pkg.version <package name>
'''
pkgs = list_pkgs()
if name in pkgs:
return pkgs[name]
else:
return ''
def list_pkgs():
'''
List the packages currently installed as a dict::
{'<package_name>': '<version>'}
CLI Example::
salt '*' pkg.list_pkgs
'''
pythoncom.CoInitialize()
ret = {}
strComputer = "."
objWMIService = win32com.client.Dispatch("WbemScripting.SWbemLocator")
objSWbemServices = objWMIService.ConnectServer(strComputer,"root\cimv2")
colItems = objSWbemServices.ExecQuery("Select * from Win32_Product")
for objItem in colItems:
ret[objItem.Name] = objItem.Version
return ret
def refresh_db():
'''
Just recheck the repository and return a dict::
{'<database name>': Bool}
CLI Example::
salt '*' pkg.refresh_db
'''
return 'Not implemented on Windows yet'
def install(name, refresh=False, **kwargs):
'''
Install the passed package
Return a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>']}
CLI Example::
salt '*' pkg.install <package name>
'''
return 'Not implemented on Windows yet'
def upgrade():
'''
Run a full system upgrade
Return a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>']}
CLI Example::
salt '*' pkg.upgrade
'''
return 'Not implemented on Windows yet'
def remove(name):
'''
Remove a single package
Return a list containing the removed packages.
CLI Example::
salt '*' pkg.remove <package name>
'''
return 'Not implemented on Windows yet'
def purge(name):
'''
Recursively remove a package and all dependencies which were installed
with it
Return a list containing the removed packages.
CLI Example::
salt '*' pkg.purge <package name>
'''
return 'Not implemented on Windows yet'
```
#### File: integration/modules/state.py
```python
import integration
class StateModuleTest(integration.ModuleCase):
'''
Validate the test module
'''
def test_show_highstate(self):
'''
state.show_highstate
'''
high = self.run_function('state.show_highstate')
self.assertTrue(isinstance(high, dict))
self.assertTrue('/testfile' in high)
self.assertEqual(high['/testfile']['__env__'], 'base')
def test_show_lowstate(self):
'''
state.show_lowstate
'''
low = self.run_function('state.show_lowstate')
self.assertTrue(isinstance(low, list))
self.assertTrue(isinstance(low[0], dict))
```
|
{
"source": "jebogaert/networkx",
"score": 3
}
|
#### File: approximation/tests/test_matching.py
```python
import networkx as nx
import networkx.algorithms.approximation as a
def test_min_maximal_matching():
# smoke test
G = nx.Graph()
assert len(a.min_maximal_matching(G)) == 0
```
#### File: algorithms/centrality/voterank_alg.py
```python
__all__ = ["voterank"]
def voterank(G, number_of_nodes=None):
"""Select a list of influential nodes in a graph using VoteRank algorithm
VoteRank [1]_ computes a ranking of the nodes in a graph G based on a
voting scheme. With VoteRank, all nodes vote for each of its in-neighbours
and the node with the highest votes is elected iteratively. The voting
ability of out-neighbors of elected nodes is decreased in subsequent turns.
Note: We treat each edge independently in case of multigraphs.
Parameters
----------
G : graph
A NetworkX graph.
number_of_nodes : integer, optional
Number of ranked nodes to extract (default all nodes).
Returns
-------
voterank : list
Ordered list of computed seeds.
Only nodes with positive number of votes are returned.
References
----------
.. [1] <NAME>. et al. (2016).
Identifying a set of influential spreaders in complex networks.
Sci. Rep. 6, 27823; doi: 10.1038/srep27823.
"""
influential_nodes = []
voterank = {}
if len(G) == 0:
return influential_nodes
if number_of_nodes is None or number_of_nodes > len(G):
number_of_nodes = len(G)
if G.is_directed():
# For directed graphs compute average out-degree
avgDegree = sum(deg for _, deg in G.out_degree()) / len(G)
else:
# For undirected graphs compute average degree
avgDegree = sum(deg for _, deg in G.degree()) / len(G)
# step 1 - initiate all nodes to (0,1) (score, voting ability)
for n in G.nodes():
voterank[n] = [0, 1]
# Repeat steps 1b to 4 until num_seeds are elected.
for _ in range(number_of_nodes):
# step 1b - reset rank
for n in G.nodes():
voterank[n][0] = 0
# step 2 - vote
for n, nbr in G.edges():
# In directed graphs nodes only vote for their in-neighbors
voterank[n][0] += voterank[nbr][1]
if not G.is_directed():
voterank[nbr][0] += voterank[n][1]
for n in influential_nodes:
voterank[n][0] = 0
# step 3 - select top node
n = max(G.nodes, key=lambda x: voterank[x][0])
if voterank[n][0] == 0:
return influential_nodes
influential_nodes.append(n)
# weaken the selected node
voterank[n] = [0, 0]
# step 4 - update voterank properties
for _, nbr in G.edges(n):
voterank[nbr][1] -= 1 / avgDegree
voterank[nbr][1] = max(voterank[nbr][1], 0)
return influential_nodes
```
#### File: coloring/tests/test_coloring.py
```python
import networkx as nx
import pytest
is_coloring = nx.algorithms.coloring.equitable_coloring.is_coloring
is_equitable = nx.algorithms.coloring.equitable_coloring.is_equitable
ALL_STRATEGIES = [
"largest_first",
"random_sequential",
"smallest_last",
"independent_set",
"connected_sequential_bfs",
"connected_sequential_dfs",
"connected_sequential",
"saturation_largest_first",
"DSATUR",
]
# List of strategies where interchange=True results in an error
INTERCHANGE_INVALID = ["independent_set", "saturation_largest_first", "DSATUR"]
class TestColoring:
def test_basic_cases(self):
def check_basic_case(graph_func, n_nodes, strategy, interchange):
graph = graph_func()
coloring = nx.coloring.greedy_color(
graph, strategy=strategy, interchange=interchange
)
assert verify_length(coloring, n_nodes)
assert verify_coloring(graph, coloring)
for graph_func, n_nodes in BASIC_TEST_CASES.items():
for interchange in [True, False]:
for strategy in ALL_STRATEGIES:
check_basic_case(graph_func, n_nodes, strategy, False)
if strategy not in INTERCHANGE_INVALID:
check_basic_case(graph_func, n_nodes, strategy, True)
def test_special_cases(self):
def check_special_case(strategy, graph_func, interchange, colors):
graph = graph_func()
coloring = nx.coloring.greedy_color(
graph, strategy=strategy, interchange=interchange
)
if not hasattr(colors, "__len__"):
colors = [colors]
assert any(verify_length(coloring, n_colors) for n_colors in colors)
assert verify_coloring(graph, coloring)
for strategy, arglist in SPECIAL_TEST_CASES.items():
for args in arglist:
check_special_case(strategy, args[0], args[1], args[2])
def test_interchange_invalid(self):
graph = one_node_graph()
for strategy in INTERCHANGE_INVALID:
pytest.raises(
nx.NetworkXPointlessConcept,
nx.coloring.greedy_color,
graph,
strategy=strategy,
interchange=True,
)
def test_bad_inputs(self):
graph = one_node_graph()
pytest.raises(
nx.NetworkXError,
nx.coloring.greedy_color,
graph,
strategy="invalid strategy",
)
def test_strategy_as_function(self):
graph = lf_shc()
colors_1 = nx.coloring.greedy_color(graph, "largest_first")
colors_2 = nx.coloring.greedy_color(graph, nx.coloring.strategy_largest_first)
assert colors_1 == colors_2
def test_seed_argument(self):
graph = lf_shc()
rs = nx.coloring.strategy_random_sequential
c1 = nx.coloring.greedy_color(graph, lambda g, c: rs(g, c, seed=1))
for u, v in graph.edges:
assert c1[u] != c1[v]
def test_is_coloring(self):
G = nx.Graph()
G.add_edges_from([(0, 1), (1, 2)])
coloring = {0: 0, 1: 1, 2: 0}
assert is_coloring(G, coloring)
coloring[0] = 1
assert not is_coloring(G, coloring)
assert not is_equitable(G, coloring)
def test_is_equitable(self):
G = nx.Graph()
G.add_edges_from([(0, 1), (1, 2)])
coloring = {0: 0, 1: 1, 2: 0}
assert is_equitable(G, coloring)
G.add_edges_from([(2, 3), (2, 4), (2, 5)])
coloring[3] = 1
coloring[4] = 1
coloring[5] = 1
assert is_coloring(G, coloring)
assert not is_equitable(G, coloring)
def test_num_colors(self):
G = nx.Graph()
G.add_edges_from([(0, 1), (0, 2), (0, 3)])
pytest.raises(nx.NetworkXAlgorithmError, nx.coloring.equitable_color, G, 2)
def test_equitable_color(self):
G = nx.fast_gnp_random_graph(n=10, p=0.2, seed=42)
coloring = nx.coloring.equitable_color(G, max_degree(G) + 1)
assert is_equitable(G, coloring)
def test_equitable_color_empty(self):
G = nx.empty_graph()
coloring = nx.coloring.equitable_color(G, max_degree(G) + 1)
assert is_equitable(G, coloring)
def test_equitable_color_large(self):
G = nx.fast_gnp_random_graph(100, 0.1, seed=42)
coloring = nx.coloring.equitable_color(G, max_degree(G) + 1)
assert is_equitable(G, coloring, num_colors=max_degree(G) + 1)
def test_case_V_plus_not_in_A_cal(self):
# Hand crafted case to avoid the easy case.
L = {
0: [2, 5],
1: [3, 4],
2: [0, 8],
3: [1, 7],
4: [1, 6],
5: [0, 6],
6: [4, 5],
7: [3],
8: [2],
}
F = {
# Color 0
0: 0,
1: 0,
# Color 1
2: 1,
3: 1,
4: 1,
5: 1,
# Color 2
6: 2,
7: 2,
8: 2,
}
C = nx.algorithms.coloring.equitable_coloring.make_C_from_F(F)
N = nx.algorithms.coloring.equitable_coloring.make_N_from_L_C(L, C)
H = nx.algorithms.coloring.equitable_coloring.make_H_from_C_N(C, N)
nx.algorithms.coloring.equitable_coloring.procedure_P(
V_minus=0, V_plus=1, N=N, H=H, F=F, C=C, L=L
)
check_state(L=L, N=N, H=H, F=F, C=C)
def test_cast_no_solo(self):
L = {
0: [8, 9],
1: [10, 11],
2: [8],
3: [9],
4: [10, 11],
5: [8],
6: [9],
7: [10, 11],
8: [0, 2, 5],
9: [0, 3, 6],
10: [1, 4, 7],
11: [1, 4, 7],
}
F = {0: 0, 1: 0, 2: 2, 3: 2, 4: 2, 5: 3, 6: 3, 7: 3, 8: 1, 9: 1, 10: 1, 11: 1}
C = nx.algorithms.coloring.equitable_coloring.make_C_from_F(F)
N = nx.algorithms.coloring.equitable_coloring.make_N_from_L_C(L, C)
H = nx.algorithms.coloring.equitable_coloring.make_H_from_C_N(C, N)
nx.algorithms.coloring.equitable_coloring.procedure_P(
V_minus=0, V_plus=1, N=N, H=H, F=F, C=C, L=L
)
check_state(L=L, N=N, H=H, F=F, C=C)
def test_hard_prob(self):
# Tests for two levels of recursion.
num_colors, s = 5, 5
G = nx.Graph()
G.add_edges_from(
[
(0, 10),
(0, 11),
(0, 12),
(0, 23),
(10, 4),
(10, 9),
(10, 20),
(11, 4),
(11, 8),
(11, 16),
(12, 9),
(12, 22),
(12, 23),
(23, 7),
(1, 17),
(1, 18),
(1, 19),
(1, 24),
(17, 5),
(17, 13),
(17, 22),
(18, 5),
(19, 5),
(19, 6),
(19, 8),
(24, 7),
(24, 16),
(2, 4),
(2, 13),
(2, 14),
(2, 15),
(4, 6),
(13, 5),
(13, 21),
(14, 6),
(14, 15),
(15, 6),
(15, 21),
(3, 16),
(3, 20),
(3, 21),
(3, 22),
(16, 8),
(20, 8),
(21, 9),
(22, 7),
]
)
F = {node: node // s for node in range(num_colors * s)}
F[s - 1] = num_colors - 1
params = make_params_from_graph(G=G, F=F)
nx.algorithms.coloring.equitable_coloring.procedure_P(
V_minus=0, V_plus=num_colors - 1, **params
)
check_state(**params)
def test_hardest_prob(self):
# Tests for two levels of recursion.
num_colors, s = 10, 4
G = nx.Graph()
G.add_edges_from(
[
(0, 19),
(0, 24),
(0, 29),
(0, 30),
(0, 35),
(19, 3),
(19, 7),
(19, 9),
(19, 15),
(19, 21),
(19, 24),
(19, 30),
(19, 38),
(24, 5),
(24, 11),
(24, 13),
(24, 20),
(24, 30),
(24, 37),
(24, 38),
(29, 6),
(29, 10),
(29, 13),
(29, 15),
(29, 16),
(29, 17),
(29, 20),
(29, 26),
(30, 6),
(30, 10),
(30, 15),
(30, 22),
(30, 23),
(30, 39),
(35, 6),
(35, 9),
(35, 14),
(35, 18),
(35, 22),
(35, 23),
(35, 25),
(35, 27),
(1, 20),
(1, 26),
(1, 31),
(1, 34),
(1, 38),
(20, 4),
(20, 8),
(20, 14),
(20, 18),
(20, 28),
(20, 33),
(26, 7),
(26, 10),
(26, 14),
(26, 18),
(26, 21),
(26, 32),
(26, 39),
(31, 5),
(31, 8),
(31, 13),
(31, 16),
(31, 17),
(31, 21),
(31, 25),
(31, 27),
(34, 7),
(34, 8),
(34, 13),
(34, 18),
(34, 22),
(34, 23),
(34, 25),
(34, 27),
(38, 4),
(38, 9),
(38, 12),
(38, 14),
(38, 21),
(38, 27),
(2, 3),
(2, 18),
(2, 21),
(2, 28),
(2, 32),
(2, 33),
(2, 36),
(2, 37),
(2, 39),
(3, 5),
(3, 9),
(3, 13),
(3, 22),
(3, 23),
(3, 25),
(3, 27),
(18, 6),
(18, 11),
(18, 15),
(18, 39),
(21, 4),
(21, 10),
(21, 14),
(21, 36),
(28, 6),
(28, 10),
(28, 14),
(28, 16),
(28, 17),
(28, 25),
(28, 27),
(32, 5),
(32, 10),
(32, 12),
(32, 16),
(32, 17),
(32, 22),
(32, 23),
(33, 7),
(33, 10),
(33, 12),
(33, 16),
(33, 17),
(33, 25),
(33, 27),
(36, 5),
(36, 8),
(36, 15),
(36, 16),
(36, 17),
(36, 25),
(36, 27),
(37, 5),
(37, 11),
(37, 15),
(37, 16),
(37, 17),
(37, 22),
(37, 23),
(39, 7),
(39, 8),
(39, 15),
(39, 22),
(39, 23),
]
)
F = {node: node // s for node in range(num_colors * s)}
F[s - 1] = num_colors - 1 # V- = 0, V+ = num_colors - 1
params = make_params_from_graph(G=G, F=F)
nx.algorithms.coloring.equitable_coloring.procedure_P(
V_minus=0, V_plus=num_colors - 1, **params
)
check_state(**params)
# ############################ Utility functions ############################
def verify_coloring(graph, coloring):
for node in graph.nodes():
if node not in coloring:
return False
color = coloring[node]
for neighbor in graph.neighbors(node):
if coloring[neighbor] == color:
return False
return True
def verify_length(coloring, expected):
coloring = dict_to_sets(coloring)
return len(coloring) == expected
def dict_to_sets(colors):
if len(colors) == 0:
return []
k = max(colors.values()) + 1
sets = [set() for _ in range(k)]
for (node, color) in colors.items():
sets[color].add(node)
return sets
# ############################ Graph Generation ############################
def empty_graph():
return nx.Graph()
def one_node_graph():
graph = nx.Graph()
graph.add_nodes_from([1])
return graph
def two_node_graph():
graph = nx.Graph()
graph.add_nodes_from([1, 2])
graph.add_edges_from([(1, 2)])
return graph
def three_node_clique():
graph = nx.Graph()
graph.add_nodes_from([1, 2, 3])
graph.add_edges_from([(1, 2), (1, 3), (2, 3)])
return graph
def disconnected():
graph = nx.Graph()
graph.add_edges_from([(1, 2), (2, 3), (4, 5), (5, 6)])
return graph
def rs_shc():
graph = nx.Graph()
graph.add_nodes_from([1, 2, 3, 4])
graph.add_edges_from([(1, 2), (2, 3), (3, 4)])
return graph
def slf_shc():
graph = nx.Graph()
graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7])
graph.add_edges_from(
[(1, 2), (1, 5), (1, 6), (2, 3), (2, 7), (3, 4), (3, 7), (4, 5), (4, 6), (5, 6)]
)
return graph
def slf_hc():
graph = nx.Graph()
graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8])
graph.add_edges_from(
[
(1, 2),
(1, 3),
(1, 4),
(1, 5),
(2, 3),
(2, 4),
(2, 6),
(5, 7),
(5, 8),
(6, 7),
(6, 8),
(7, 8),
]
)
return graph
def lf_shc():
graph = nx.Graph()
graph.add_nodes_from([1, 2, 3, 4, 5, 6])
graph.add_edges_from([(6, 1), (1, 4), (4, 3), (3, 2), (2, 5)])
return graph
def lf_hc():
graph = nx.Graph()
graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7])
graph.add_edges_from(
[
(1, 7),
(1, 6),
(1, 3),
(1, 4),
(7, 2),
(2, 6),
(2, 3),
(2, 5),
(5, 3),
(5, 4),
(4, 3),
]
)
return graph
def sl_shc():
graph = nx.Graph()
graph.add_nodes_from([1, 2, 3, 4, 5, 6])
graph.add_edges_from(
[(1, 2), (1, 3), (2, 3), (1, 4), (2, 5), (3, 6), (4, 5), (4, 6), (5, 6)]
)
return graph
def sl_hc():
graph = nx.Graph()
graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8])
graph.add_edges_from(
[
(1, 2),
(1, 3),
(1, 5),
(1, 7),
(2, 3),
(2, 4),
(2, 8),
(8, 4),
(8, 6),
(8, 7),
(7, 5),
(7, 6),
(3, 4),
(4, 6),
(6, 5),
(5, 3),
]
)
return graph
def gis_shc():
graph = nx.Graph()
graph.add_nodes_from([1, 2, 3, 4])
graph.add_edges_from([(1, 2), (2, 3), (3, 4)])
return graph
def gis_hc():
graph = nx.Graph()
graph.add_nodes_from([1, 2, 3, 4, 5, 6])
graph.add_edges_from([(1, 5), (2, 5), (3, 6), (4, 6), (5, 6)])
return graph
def cs_shc():
graph = nx.Graph()
graph.add_nodes_from([1, 2, 3, 4, 5])
graph.add_edges_from([(1, 2), (1, 5), (2, 3), (2, 4), (2, 5), (3, 4), (4, 5)])
return graph
def rsi_shc():
graph = nx.Graph()
graph.add_nodes_from([1, 2, 3, 4, 5, 6])
graph.add_edges_from(
[(1, 2), (1, 5), (1, 6), (2, 3), (3, 4), (4, 5), (4, 6), (5, 6)]
)
return graph
def lfi_shc():
graph = nx.Graph()
graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7])
graph.add_edges_from(
[(1, 2), (1, 5), (1, 6), (2, 3), (2, 7), (3, 4), (3, 7), (4, 5), (4, 6), (5, 6)]
)
return graph
def lfi_hc():
graph = nx.Graph()
graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8, 9])
graph.add_edges_from(
[
(1, 2),
(1, 5),
(1, 6),
(1, 7),
(2, 3),
(2, 8),
(2, 9),
(3, 4),
(3, 8),
(3, 9),
(4, 5),
(4, 6),
(4, 7),
(5, 6),
]
)
return graph
def sli_shc():
graph = nx.Graph()
graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7])
graph.add_edges_from(
[
(1, 2),
(1, 3),
(1, 5),
(1, 7),
(2, 3),
(2, 6),
(3, 4),
(4, 5),
(4, 6),
(5, 7),
(6, 7),
]
)
return graph
def sli_hc():
graph = nx.Graph()
graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8, 9])
graph.add_edges_from(
[
(1, 2),
(1, 3),
(1, 4),
(1, 5),
(2, 3),
(2, 7),
(2, 8),
(2, 9),
(3, 6),
(3, 7),
(3, 9),
(4, 5),
(4, 6),
(4, 8),
(4, 9),
(5, 6),
(5, 7),
(5, 8),
(6, 7),
(6, 9),
(7, 8),
(8, 9),
]
)
return graph
# --------------------------------------------------------------------------
# Basic tests for all strategies
# For each basic graph function, specify the number of expected colors.
BASIC_TEST_CASES = {
empty_graph: 0,
one_node_graph: 1,
two_node_graph: 2,
disconnected: 2,
three_node_clique: 3,
}
# --------------------------------------------------------------------------
# Special test cases. Each strategy has a list of tuples of the form
# (graph function, interchange, valid # of colors)
SPECIAL_TEST_CASES = {
"random_sequential": [
(rs_shc, False, (2, 3)),
(rs_shc, True, 2),
(rsi_shc, True, (3, 4)),
],
"saturation_largest_first": [(slf_shc, False, (3, 4)), (slf_hc, False, 4)],
"largest_first": [
(lf_shc, False, (2, 3)),
(lf_hc, False, 4),
(lf_shc, True, 2),
(lf_hc, True, 3),
(lfi_shc, True, (3, 4)),
(lfi_hc, True, 4),
],
"smallest_last": [
(sl_shc, False, (3, 4)),
(sl_hc, False, 5),
(sl_shc, True, 3),
(sl_hc, True, 4),
(sli_shc, True, (3, 4)),
(sli_hc, True, 5),
],
"independent_set": [(gis_shc, False, (2, 3)), (gis_hc, False, 3)],
"connected_sequential": [(cs_shc, False, (3, 4)), (cs_shc, True, 3)],
"connected_sequential_dfs": [(cs_shc, False, (3, 4))],
}
# --------------------------------------------------------------------------
# Helper functions to test
# (graph function, interchange, valid # of colors)
def check_state(L, N, H, F, C):
s = len(C[0])
num_colors = len(C.keys())
assert all(u in L[v] for u in L.keys() for v in L[u])
assert all(F[u] != F[v] for u in L.keys() for v in L[u])
assert all(len(L[u]) < num_colors for u in L.keys())
assert all(len(C[x]) == s for x in C)
assert all(H[(c1, c2)] >= 0 for c1 in C.keys() for c2 in C.keys())
assert all(N[(u, F[u])] == 0 for u in F.keys())
def max_degree(G):
"""Get the maximum degree of any node in G."""
return max([G.degree(node) for node in G.nodes]) if len(G.nodes) > 0 else 0
def make_params_from_graph(G, F):
"""Returns {N, L, H, C} from the given graph."""
num_nodes = len(G)
L = {u: [] for u in range(num_nodes)}
for (u, v) in G.edges:
L[u].append(v)
L[v].append(u)
C = nx.algorithms.coloring.equitable_coloring.make_C_from_F(F)
N = nx.algorithms.coloring.equitable_coloring.make_N_from_L_C(L, C)
H = nx.algorithms.coloring.equitable_coloring.make_H_from_C_N(C, N)
return {"N": N, "F": F, "C": C, "H": H, "L": L}
```
#### File: community/tests/test_lukes.py
```python
from itertools import product
import pytest
import networkx as nx
from networkx.algorithms.community import lukes_partitioning
EWL = "e_weight"
NWL = "n_weight"
# first test from the Lukes original paper
def paper_1_case(float_edge_wt=False, explicit_node_wt=True, directed=False):
# problem-specific constants
limit = 3
# configuration
if float_edge_wt:
shift = 0.001
else:
shift = 0
if directed:
example_1 = nx.DiGraph()
else:
example_1 = nx.Graph()
# graph creation
example_1.add_edge(1, 2, **{EWL: 3 + shift})
example_1.add_edge(1, 4, **{EWL: 2 + shift})
example_1.add_edge(2, 3, **{EWL: 4 + shift})
example_1.add_edge(2, 5, **{EWL: 6 + shift})
# node weights
if explicit_node_wt:
nx.set_node_attributes(example_1, 1, NWL)
wtu = NWL
else:
wtu = None
# partitioning
clusters_1 = {
frozenset(x)
for x in lukes_partitioning(example_1, limit, node_weight=wtu, edge_weight=EWL)
}
return clusters_1
# second test from the Lukes original paper
def paper_2_case(explicit_edge_wt=True, directed=False):
# problem specific constants
byte_block_size = 32
# configuration
if directed:
example_2 = nx.DiGraph()
else:
example_2 = nx.Graph()
if explicit_edge_wt:
edic = {EWL: 1}
wtu = EWL
else:
edic = {}
wtu = None
# graph creation
example_2.add_edge("name", "home_address", **edic)
example_2.add_edge("name", "education", **edic)
example_2.add_edge("education", "bs", **edic)
example_2.add_edge("education", "ms", **edic)
example_2.add_edge("education", "phd", **edic)
example_2.add_edge("name", "telephone", **edic)
example_2.add_edge("telephone", "home", **edic)
example_2.add_edge("telephone", "office", **edic)
example_2.add_edge("office", "no1", **edic)
example_2.add_edge("office", "no2", **edic)
example_2.nodes["name"][NWL] = 20
example_2.nodes["education"][NWL] = 10
example_2.nodes["bs"][NWL] = 1
example_2.nodes["ms"][NWL] = 1
example_2.nodes["phd"][NWL] = 1
example_2.nodes["home_address"][NWL] = 8
example_2.nodes["telephone"][NWL] = 8
example_2.nodes["home"][NWL] = 8
example_2.nodes["office"][NWL] = 4
example_2.nodes["no1"][NWL] = 1
example_2.nodes["no2"][NWL] = 1
# partitioning
clusters_2 = {
frozenset(x)
for x in lukes_partitioning(
example_2, byte_block_size, node_weight=NWL, edge_weight=wtu
)
}
return clusters_2
def test_paper_1_case():
ground_truth = {frozenset([1, 4]), frozenset([2, 3, 5])}
tf = (True, False)
for flt, nwt, drc in product(tf, tf, tf):
part = paper_1_case(flt, nwt, drc)
assert part == ground_truth
def test_paper_2_case():
ground_truth = {
frozenset(["education", "bs", "ms", "phd"]),
frozenset(["name", "home_address"]),
frozenset(["telephone", "home", "office", "no1", "no2"]),
}
tf = (True, False)
for ewt, drc in product(tf, tf):
part = paper_2_case(ewt, drc)
assert part == ground_truth
def test_mandatory_tree():
not_a_tree = nx.complete_graph(4)
with pytest.raises(nx.NotATree):
lukes_partitioning(not_a_tree, 5)
def test_mandatory_integrality():
byte_block_size = 32
ex_1_broken = nx.DiGraph()
ex_1_broken.add_edge(1, 2, **{EWL: 3.2})
ex_1_broken.add_edge(1, 4, **{EWL: 2.4})
ex_1_broken.add_edge(2, 3, **{EWL: 4.0})
ex_1_broken.add_edge(2, 5, **{EWL: 6.3})
ex_1_broken.nodes[1][NWL] = 1.2 # !
ex_1_broken.nodes[2][NWL] = 1
ex_1_broken.nodes[3][NWL] = 1
ex_1_broken.nodes[4][NWL] = 1
ex_1_broken.nodes[5][NWL] = 2
with pytest.raises(TypeError):
lukes_partitioning(
ex_1_broken, byte_block_size, node_weight=NWL, edge_weight=EWL
)
```
#### File: node_classification/tests/test_local_and_global_consistency.py
```python
import pytest
numpy = pytest.importorskip("numpy")
scipy = pytest.importorskip("scipy")
import networkx as nx
from networkx.algorithms import node_classification
class TestLocalAndGlobalConsistency:
def test_path_graph(self):
G = nx.path_graph(4)
label_name = "label"
G.nodes[0][label_name] = "A"
G.nodes[3][label_name] = "B"
predicted = node_classification.local_and_global_consistency(
G, label_name=label_name
)
assert predicted[0] == "A"
assert predicted[1] == "A"
assert predicted[2] == "B"
assert predicted[3] == "B"
def test_no_labels(self):
with pytest.raises(nx.NetworkXError):
G = nx.path_graph(4)
node_classification.local_and_global_consistency(G)
def test_no_nodes(self):
with pytest.raises(nx.NetworkXError):
G = nx.Graph()
node_classification.local_and_global_consistency(G)
def test_no_edges(self):
with pytest.raises(nx.NetworkXError):
G = nx.Graph()
G.add_node(1)
G.add_node(2)
node_classification.local_and_global_consistency(G)
def test_digraph(self):
with pytest.raises(nx.NetworkXNotImplemented):
G = nx.DiGraph()
G.add_edge(0, 1)
G.add_edge(1, 2)
G.add_edge(2, 3)
label_name = "label"
G.nodes[0][label_name] = "A"
G.nodes[3][label_name] = "B"
node_classification.harmonic_function(G)
def test_one_labeled_node(self):
G = nx.path_graph(4)
label_name = "label"
G.nodes[0][label_name] = "A"
predicted = node_classification.local_and_global_consistency(
G, label_name=label_name
)
assert predicted[0] == "A"
assert predicted[1] == "A"
assert predicted[2] == "A"
assert predicted[3] == "A"
def test_nodes_all_labeled(self):
G = nx.karate_club_graph()
label_name = "club"
predicted = node_classification.local_and_global_consistency(
G, alpha=0, label_name=label_name
)
for i in range(len(G)):
assert predicted[i] == G.nodes[i][label_name]
```
#### File: networkx/algorithms/summarization.py
```python
import networkx as nx
__all__ = [
"dedensify",
]
def dedensify(G, threshold, prefix=None, copy=True):
"""Compresses neighborhoods around high-degree nodes
Reduces the number of edges to high-degree nodes by adding compressor nodes
that summarize multiple edges of the same type to high-degree nodes (nodes
with a degree greater than a given threshold). Dedensification also has
the added benefit of reducing the number of edges around high-degree nodes.
The implementation currently supports graphs with a single edge type.
Parameters
----------
G: graph
A networkx graph
threshold: int
Minimum degree threshold of a node to be considered a high degree node.
The threshold must be greater than or equal to 2.
prefix: str or None, optional (default: None)
An optional prefix for denoting compressor nodes
copy: bool, optional (default: True)
Indicates if dedensification should be done inplace
Returns
-------
dedensified networkx graph : (graph, set)
2-tuple of the dedensified graph and set of compressor nodes
Notes
-----
According to the algorithm in [1]_, removes edges in a graph by
compressing/decompressing the neighborhoods around high degree nodes by
adding compressor nodes that summarize multiple edges of the same type
to high-degree nodes. Dedensification will only add a compressor node when
doing so will reduce the total number of edges in the given graph. This
implementation currently supports graphs with a single edge type.
Examples
--------
Dedensification will only add compressor nodes when doing so would result
in fewer edges::
>>> original_graph = nx.DiGraph()
>>> original_graph.add_nodes_from(
... ["1", "2", "3", "4", "5", "6", "A", "B", "C"]
... )
>>> original_graph.add_edges_from(
... [
... ("1", "C"), ("1", "B"),
... ("2", "C"), ("2", "B"), ("2", "A"),
... ("3", "B"), ("3", "A"), ("3", "6"),
... ("4", "C"), ("4", "B"), ("4", "A"),
... ("5", "B"), ("5", "A"),
... ("6", "5"),
... ("A", "6")
... ]
... )
>>> c_graph, c_nodes = nx.dedensify(original_graph, threshold=2)
>>> original_graph.number_of_edges()
15
>>> c_graph.number_of_edges()
14
A dedensified, directed graph can be "densified" to reconstruct the
original graph::
>>> original_graph = nx.DiGraph()
>>> original_graph.add_nodes_from(
... ["1", "2", "3", "4", "5", "6", "A", "B", "C"]
... )
>>> original_graph.add_edges_from(
... [
... ("1", "C"), ("1", "B"),
... ("2", "C"), ("2", "B"), ("2", "A"),
... ("3", "B"), ("3", "A"), ("3", "6"),
... ("4", "C"), ("4", "B"), ("4", "A"),
... ("5", "B"), ("5", "A"),
... ("6", "5"),
... ("A", "6")
... ]
... )
>>> c_graph, c_nodes = nx.dedensify(original_graph, threshold=2)
>>> # re-densifies the compressed graph into the original graph
>>> for c_node in c_nodes:
... all_neighbors = set(nx.all_neighbors(c_graph, c_node))
... out_neighbors = set(c_graph.neighbors(c_node))
... for out_neighbor in out_neighbors:
... c_graph.remove_edge(c_node, out_neighbor)
... in_neighbors = all_neighbors - out_neighbors
... for in_neighbor in in_neighbors:
... c_graph.remove_edge(in_neighbor, c_node)
... for out_neighbor in out_neighbors:
... c_graph.add_edge(in_neighbor, out_neighbor)
... c_graph.remove_node(c_node)
...
>>> nx.is_isomorphic(original_graph, c_graph)
True
References
----------
.. [1] <NAME>., & <NAME>. (2016, August).
Scalable pattern matching over compressed graphs via dedensification.
In Proceedings of the 22nd ACM SIGKDD International Conference on
Knowledge Discovery and Data Mining (pp. 1755-1764).
http://www.cs.umd.edu/~abadi/papers/graph-dedense.pdf
"""
if threshold < 2:
raise nx.NetworkXError("The degree threshold must be >= 2")
degrees = G.in_degree if G.is_directed() else G.degree
# Group nodes based on degree threshold
high_degree_nodes = set([n for n, d in degrees if d > threshold])
low_degree_nodes = G.nodes() - high_degree_nodes
auxillary = {}
for node in G:
high_degree_neighbors = frozenset(high_degree_nodes & set(G[node]))
if high_degree_neighbors:
if high_degree_neighbors in auxillary:
auxillary[high_degree_neighbors].add(node)
else:
auxillary[high_degree_neighbors] = {node}
if copy:
G = G.copy()
compressor_nodes = set()
for index, (high_degree_nodes, low_degree_nodes) in enumerate(auxillary.items()):
low_degree_node_count = len(low_degree_nodes)
high_degree_node_count = len(high_degree_nodes)
old_edges = high_degree_node_count * low_degree_node_count
new_edges = high_degree_node_count + low_degree_node_count
if old_edges <= new_edges:
continue
compression_node = "".join(str(node) for node in high_degree_nodes)
if prefix:
compression_node = str(prefix) + compression_node
for node in low_degree_nodes:
for high_node in high_degree_nodes:
if G.has_edge(node, high_node):
G.remove_edge(node, high_node)
G.add_edge(node, compression_node)
for node in high_degree_nodes:
G.add_edge(compression_node, node)
compressor_nodes.add(compression_node)
return G, compressor_nodes
```
#### File: networkx/algorithms/swap.py
```python
import math
from networkx.utils import py_random_state
import networkx as nx
__all__ = ["double_edge_swap", "connected_double_edge_swap"]
@py_random_state(3)
def double_edge_swap(G, nswap=1, max_tries=100, seed=None):
"""Swap two edges in the graph while keeping the node degrees fixed.
A double-edge swap removes two randomly chosen edges u-v and x-y
and creates the new edges u-x and v-y::
u--v u v
becomes | |
x--y x y
If either the edge u-x or v-y already exist no swap is performed
and another attempt is made to find a suitable edge pair.
Parameters
----------
G : graph
An undirected graph
nswap : integer (optional, default=1)
Number of double-edge swaps to perform
max_tries : integer (optional)
Maximum number of attempts to swap edges
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
G : graph
The graph after double edge swaps.
Notes
-----
Does not enforce any connectivity constraints.
The graph G is modified in place.
"""
if G.is_directed():
raise nx.NetworkXError("double_edge_swap() not defined for directed graphs.")
if nswap > max_tries:
raise nx.NetworkXError("Number of swaps > number of tries allowed.")
if len(G) < 4:
raise nx.NetworkXError("Graph has less than four nodes.")
# Instead of choosing uniformly at random from a generated edge list,
# this algorithm chooses nonuniformly from the set of nodes with
# probability weighted by degree.
n = 0
swapcount = 0
keys, degrees = zip(*G.degree()) # keys, degree
cdf = nx.utils.cumulative_distribution(degrees) # cdf of degree
discrete_sequence = nx.utils.discrete_sequence
while swapcount < nswap:
# if random.random() < 0.5: continue # trick to avoid periodicities?
# pick two random edges without creating edge list
# choose source node indices from discrete distribution
(ui, xi) = discrete_sequence(2, cdistribution=cdf, seed=seed)
if ui == xi:
continue # same source, skip
u = keys[ui] # convert index to label
x = keys[xi]
# choose target uniformly from neighbors
v = seed.choice(list(G[u]))
y = seed.choice(list(G[x]))
if v == y:
continue # same target, skip
if (x not in G[u]) and (y not in G[v]): # don't create parallel edges
G.add_edge(u, x)
G.add_edge(v, y)
G.remove_edge(u, v)
G.remove_edge(x, y)
swapcount += 1
if n >= max_tries:
e = (
f"Maximum number of swap attempts ({n}) exceeded "
f"before desired swaps achieved ({nswap})."
)
raise nx.NetworkXAlgorithmError(e)
n += 1
return G
@py_random_state(3)
def connected_double_edge_swap(G, nswap=1, _window_threshold=3, seed=None):
"""Attempts the specified number of double-edge swaps in the graph `G`.
A double-edge swap removes two randomly chosen edges `(u, v)` and `(x,
y)` and creates the new edges `(u, x)` and `(v, y)`::
u--v u v
becomes | |
x--y x y
If either `(u, x)` or `(v, y)` already exist, then no swap is performed
so the actual number of swapped edges is always *at most* `nswap`.
Parameters
----------
G : graph
An undirected graph
nswap : integer (optional, default=1)
Number of double-edge swaps to perform
_window_threshold : integer
The window size below which connectedness of the graph will be checked
after each swap.
The "window" in this function is a dynamically updated integer that
represents the number of swap attempts to make before checking if the
graph remains connected. It is an optimization used to decrease the
running time of the algorithm in exchange for increased complexity of
implementation.
If the window size is below this threshold, then the algorithm checks
after each swap if the graph remains connected by checking if there is a
path joining the two nodes whose edge was just removed. If the window
size is above this threshold, then the algorithm performs do all the
swaps in the window and only then check if the graph is still connected.
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
int
The number of successful swaps
Raises
------
NetworkXError
If the input graph is not connected, or if the graph has fewer than four
nodes.
Notes
-----
The initial graph `G` must be connected, and the resulting graph is
connected. The graph `G` is modified in place.
References
----------
.. [1] <NAME> and <NAME> and <NAME>,
The Markov chain simulation method for generating connected
power law random graphs, 2003.
http://citeseer.ist.psu.edu/gkantsidis03markov.html
"""
if not nx.is_connected(G):
raise nx.NetworkXError("Graph not connected")
if len(G) < 4:
raise nx.NetworkXError("Graph has less than four nodes.")
n = 0
swapcount = 0
deg = G.degree()
# Label key for nodes
dk = list(n for n, d in G.degree())
cdf = nx.utils.cumulative_distribution(list(d for n, d in G.degree()))
discrete_sequence = nx.utils.discrete_sequence
window = 1
while n < nswap:
wcount = 0
swapped = []
# If the window is small, we just check each time whether the graph is
# connected by checking if the nodes that were just separated are still
# connected.
if window < _window_threshold:
# This Boolean keeps track of whether there was a failure or not.
fail = False
while wcount < window and n < nswap:
# Pick two random edges without creating the edge list. Choose
# source nodes from the discrete degree distribution.
(ui, xi) = discrete_sequence(2, cdistribution=cdf, seed=seed)
# If the source nodes are the same, skip this pair.
if ui == xi:
continue
# Convert an index to a node label.
u = dk[ui]
x = dk[xi]
# Choose targets uniformly from neighbors.
v = seed.choice(list(G.neighbors(u)))
y = seed.choice(list(G.neighbors(x)))
# If the target nodes are the same, skip this pair.
if v == y:
continue
if x not in G[u] and y not in G[v]:
G.remove_edge(u, v)
G.remove_edge(x, y)
G.add_edge(u, x)
G.add_edge(v, y)
swapped.append((u, v, x, y))
swapcount += 1
n += 1
# If G remains connected...
if nx.has_path(G, u, v):
wcount += 1
# Otherwise, undo the changes.
else:
G.add_edge(u, v)
G.add_edge(x, y)
G.remove_edge(u, x)
G.remove_edge(v, y)
swapcount -= 1
fail = True
# If one of the swaps failed, reduce the window size.
if fail:
window = int(math.ceil(window / 2))
else:
window += 1
# If the window is large, then there is a good chance that a bunch of
# swaps will work. It's quicker to do all those swaps first and then
# check if the graph remains connected.
else:
while wcount < window and n < nswap:
# Pick two random edges without creating the edge list. Choose
# source nodes from the discrete degree distribution.
(ui, xi) = nx.utils.discrete_sequence(2, cdistribution=cdf)
# If the source nodes are the same, skip this pair.
if ui == xi:
continue
# Convert an index to a node label.
u = dk[ui]
x = dk[xi]
# Choose targets uniformly from neighbors.
v = seed.choice(list(G.neighbors(u)))
y = seed.choice(list(G.neighbors(x)))
# If the target nodes are the same, skip this pair.
if v == y:
continue
if x not in G[u] and y not in G[v]:
G.remove_edge(u, v)
G.remove_edge(x, y)
G.add_edge(u, x)
G.add_edge(v, y)
swapped.append((u, v, x, y))
swapcount += 1
n += 1
wcount += 1
# If the graph remains connected, increase the window size.
if nx.is_connected(G):
window += 1
# Otherwise, undo the changes from the previous window and decrease
# the window size.
else:
while swapped:
(u, v, x, y) = swapped.pop()
G.add_edge(u, v)
G.add_edge(x, y)
G.remove_edge(u, x)
G.remove_edge(v, y)
swapcount -= 1
window = int(math.ceil(window / 2))
return swapcount
```
#### File: algorithms/tests/test_communicability.py
```python
from collections import defaultdict
import pytest
numpy = pytest.importorskip("numpy")
scipy = pytest.importorskip("scipy")
import networkx as nx
from networkx.testing import almost_equal
from networkx.algorithms.communicability_alg import communicability, communicability_exp
class TestCommunicability:
def test_communicability(self):
answer = {
0: {0: 1.5430806348152435, 1: 1.1752011936438012},
1: {0: 1.1752011936438012, 1: 1.5430806348152435},
}
# answer={(0, 0): 1.5430806348152435,
# (0, 1): 1.1752011936438012,
# (1, 0): 1.1752011936438012,
# (1, 1): 1.5430806348152435}
result = communicability(nx.path_graph(2))
for k1, val in result.items():
for k2 in val:
assert almost_equal(answer[k1][k2], result[k1][k2], places=7)
def test_communicability2(self):
answer_orig = {
("1", "1"): 1.6445956054135658,
("1", "Albert"): 0.7430186221096251,
("1", "Aric"): 0.7430186221096251,
("1", "Dan"): 1.6208126320442937,
("1", "Franck"): 0.42639707170035257,
("Albert", "1"): 0.7430186221096251,
("Albert", "Albert"): 2.4368257358712189,
("Albert", "Aric"): 1.4368257358712191,
("Albert", "Dan"): 2.0472097037446453,
("Albert", "Franck"): 1.8340111678944691,
("Aric", "1"): 0.7430186221096251,
("Aric", "Albert"): 1.4368257358712191,
("Aric", "Aric"): 2.4368257358712193,
("Aric", "Dan"): 2.0472097037446457,
("Aric", "Franck"): 1.8340111678944691,
("Dan", "1"): 1.6208126320442937,
("Dan", "Albert"): 2.0472097037446453,
("Dan", "Aric"): 2.0472097037446457,
("Dan", "Dan"): 3.1306328496328168,
("Dan", "Franck"): 1.4860372442192515,
("Franck", "1"): 0.42639707170035257,
("Franck", "Albert"): 1.8340111678944691,
("Franck", "Aric"): 1.8340111678944691,
("Franck", "Dan"): 1.4860372442192515,
("Franck", "Franck"): 2.3876142275231915,
}
answer = defaultdict(dict)
for (k1, k2), v in answer_orig.items():
answer[k1][k2] = v
G1 = nx.Graph(
[
("Franck", "Aric"),
("Aric", "Dan"),
("Dan", "Albert"),
("Albert", "Franck"),
("Dan", "1"),
("Franck", "Albert"),
]
)
result = communicability(G1)
for k1, val in result.items():
for k2 in val:
assert almost_equal(answer[k1][k2], result[k1][k2], places=7)
result = communicability_exp(G1)
for k1, val in result.items():
for k2 in val:
assert almost_equal(answer[k1][k2], result[k1][k2], places=7)
```
#### File: algorithms/tests/test_cuts.py
```python
import networkx as nx
class TestCutSize:
"""Unit tests for the :func:`~networkx.cut_size` function."""
def test_symmetric(self):
"""Tests that the cut size is symmetric."""
G = nx.barbell_graph(3, 0)
S = {0, 1, 4}
T = {2, 3, 5}
assert nx.cut_size(G, S, T) == 4
assert nx.cut_size(G, T, S) == 4
def test_single_edge(self):
"""Tests for a cut of a single edge."""
G = nx.barbell_graph(3, 0)
S = {0, 1, 2}
T = {3, 4, 5}
assert nx.cut_size(G, S, T) == 1
assert nx.cut_size(G, T, S) == 1
def test_directed(self):
"""Tests that each directed edge is counted once in the cut."""
G = nx.barbell_graph(3, 0).to_directed()
S = {0, 1, 2}
T = {3, 4, 5}
assert nx.cut_size(G, S, T) == 2
assert nx.cut_size(G, T, S) == 2
def test_directed_symmetric(self):
"""Tests that a cut in a directed graph is symmetric."""
G = nx.barbell_graph(3, 0).to_directed()
S = {0, 1, 4}
T = {2, 3, 5}
assert nx.cut_size(G, S, T) == 8
assert nx.cut_size(G, T, S) == 8
def test_multigraph(self):
"""Tests that parallel edges are each counted for a cut."""
G = nx.MultiGraph(["ab", "ab"])
assert nx.cut_size(G, {"a"}, {"b"}) == 2
class TestVolume:
"""Unit tests for the :func:`~networkx.volume` function."""
def test_graph(self):
G = nx.cycle_graph(4)
assert nx.volume(G, {0, 1}) == 4
def test_digraph(self):
G = nx.DiGraph([(0, 1), (1, 2), (2, 3), (3, 0)])
assert nx.volume(G, {0, 1}) == 2
def test_multigraph(self):
edges = list(nx.cycle_graph(4).edges())
G = nx.MultiGraph(edges * 2)
assert nx.volume(G, {0, 1}) == 8
def test_multidigraph(self):
edges = [(0, 1), (1, 2), (2, 3), (3, 0)]
G = nx.MultiDiGraph(edges * 2)
assert nx.volume(G, {0, 1}) == 4
class TestNormalizedCutSize:
"""Unit tests for the :func:`~networkx.normalized_cut_size`
function.
"""
def test_graph(self):
G = nx.path_graph(4)
S = {1, 2}
T = set(G) - S
size = nx.normalized_cut_size(G, S, T)
# The cut looks like this: o-{-o--o-}-o
expected = 2 * ((1 / 4) + (1 / 2))
assert expected == size
def test_directed(self):
G = nx.DiGraph([(0, 1), (1, 2), (2, 3)])
S = {1, 2}
T = set(G) - S
size = nx.normalized_cut_size(G, S, T)
# The cut looks like this: o-{->o-->o-}->o
expected = 2 * ((1 / 2) + (1 / 1))
assert expected == size
class TestConductance:
"""Unit tests for the :func:`~networkx.conductance` function."""
def test_graph(self):
G = nx.barbell_graph(5, 0)
# Consider the singleton sets containing the "bridge" nodes.
# There is only one cut edge, and each set has volume five.
S = {4}
T = {5}
conductance = nx.conductance(G, S, T)
expected = 1 / 5
assert expected == conductance
class TestEdgeExpansion:
"""Unit tests for the :func:`~networkx.edge_expansion` function."""
def test_graph(self):
G = nx.barbell_graph(5, 0)
S = set(range(5))
T = set(G) - S
expansion = nx.edge_expansion(G, S, T)
expected = 1 / 5
assert expected == expansion
class TestNodeExpansion:
"""Unit tests for the :func:`~networkx.node_expansion` function."""
def test_graph(self):
G = nx.path_graph(8)
S = {3, 4, 5}
expansion = nx.node_expansion(G, S)
# The neighborhood of S has cardinality five, and S has
# cardinality three.
expected = 5 / 3
assert expected == expansion
class TestBoundaryExpansion:
"""Unit tests for the :func:`~networkx.boundary_expansion` function."""
def test_graph(self):
G = nx.complete_graph(10)
S = set(range(4))
expansion = nx.boundary_expansion(G, S)
# The node boundary of S has cardinality six, and S has
# cardinality three.
expected = 6 / 4
assert expected == expansion
class TestMixingExpansion:
"""Unit tests for the :func:`~networkx.mixing_expansion` function."""
def test_graph(self):
G = nx.barbell_graph(5, 0)
S = set(range(5))
T = set(G) - S
expansion = nx.mixing_expansion(G, S, T)
# There is one cut edge, and the total number of edges in the
# graph is twice the total number of edges in a clique of size
# five, plus one more for the bridge.
expected = 1 / (2 * (5 * 4 + 1))
assert expected == expansion
```
#### File: algorithms/tests/test_hybrid.py
```python
import networkx as nx
def test_2d_grid_graph():
# FC article claims 2d grid graph of size n is (3,3)-connected
# and (5,9)-connected, but I don't think it is (5,9)-connected
G = nx.grid_2d_graph(8, 8, periodic=True)
assert nx.is_kl_connected(G, 3, 3)
assert not nx.is_kl_connected(G, 5, 9)
(H, graphOK) = nx.kl_connected_subgraph(G, 5, 9, same_as_graph=True)
assert not graphOK
def test_small_graph():
G = nx.Graph()
G.add_edge(1, 2)
G.add_edge(1, 3)
G.add_edge(2, 3)
assert nx.is_kl_connected(G, 2, 2)
H = nx.kl_connected_subgraph(G, 2, 2)
(H, graphOK) = nx.kl_connected_subgraph(
G, 2, 2, low_memory=True, same_as_graph=True
)
assert graphOK
```
#### File: algorithms/tests/test_isolate.py
```python
import networkx as nx
def test_is_isolate():
G = nx.Graph()
G.add_edge(0, 1)
G.add_node(2)
assert not nx.is_isolate(G, 0)
assert not nx.is_isolate(G, 1)
assert nx.is_isolate(G, 2)
def test_isolates():
G = nx.Graph()
G.add_edge(0, 1)
G.add_nodes_from([2, 3])
assert sorted(nx.isolates(G)) == [2, 3]
def test_number_of_isolates():
G = nx.Graph()
G.add_edge(0, 1)
G.add_nodes_from([2, 3])
assert nx.number_of_isolates(G) == 2
```
#### File: algorithms/tests/test_max_weight_clique.py
```python
import networkx as nx
import pytest
class TestMaximumWeightClique:
def test_basic_cases(self):
def check_basic_case(graph_func, expected_weight, weight_accessor):
graph = graph_func()
clique, weight = nx.algorithms.max_weight_clique(graph, weight_accessor)
assert verify_clique(
graph, clique, weight, expected_weight, weight_accessor
)
for graph_func, (expected_weight, expected_size) in TEST_CASES.items():
check_basic_case(graph_func, expected_weight, "weight")
check_basic_case(graph_func, expected_size, None)
def test_key_error(self):
graph = two_node_graph()
with pytest.raises(KeyError):
nx.algorithms.max_weight_clique(graph, "non-existent-key")
def test_error_on_non_integer_weight(self):
graph = two_node_graph()
graph.nodes[2]["weight"] = 1.5
with pytest.raises(ValueError):
nx.algorithms.max_weight_clique(graph)
def test_unaffected_by_self_loops(self):
graph = two_node_graph()
graph.add_edge(1, 1)
graph.add_edge(2, 2)
clique, weight = nx.algorithms.max_weight_clique(graph, "weight")
assert verify_clique(graph, clique, weight, 30, "weight")
graph = three_node_independent_set()
graph.add_edge(1, 1)
clique, weight = nx.algorithms.max_weight_clique(graph, "weight")
assert verify_clique(graph, clique, weight, 20, "weight")
def test_30_node_prob(self):
G = nx.Graph()
G.add_nodes_from(range(1, 31))
for i in range(1, 31):
G.nodes[i]["weight"] = i + 1
# fmt: off
G.add_edges_from(
[
(1, 12), (1, 13), (1, 15), (1, 16), (1, 18), (1, 19), (1, 20),
(1, 23), (1, 26), (1, 28), (1, 29), (1, 30), (2, 3), (2, 4),
(2, 5), (2, 8), (2, 9), (2, 10), (2, 14), (2, 17), (2, 18),
(2, 21), (2, 22), (2, 23), (2, 27), (3, 9), (3, 15), (3, 21),
(3, 22), (3, 23), (3, 24), (3, 27), (3, 28), (3, 29), (4, 5),
(4, 6), (4, 8), (4, 21), (4, 22), (4, 23), (4, 26), (4, 28),
(4, 30), (5, 6), (5, 8), (5, 9), (5, 13), (5, 14), (5, 15),
(5, 16), (5, 20), (5, 21), (5, 22), (5, 25), (5, 28), (5, 29),
(6, 7), (6, 8), (6, 13), (6, 17), (6, 18), (6, 19), (6, 24),
(6, 26), (6, 27), (6, 28), (6, 29), (7, 12), (7, 14), (7, 15),
(7, 16), (7, 17), (7, 20), (7, 25), (7, 27), (7, 29), (7, 30),
(8, 10), (8, 15), (8, 16), (8, 18), (8, 20), (8, 22), (8, 24),
(8, 26), (8, 27), (8, 28), (8, 30), (9, 11), (9, 12), (9, 13),
(9, 14), (9, 15), (9, 16), (9, 19), (9, 20), (9, 21), (9, 24),
(9, 30), (10, 12), (10, 15), (10, 18), (10, 19), (10, 20),
(10, 22), (10, 23), (10, 24), (10, 26), (10, 27), (10, 29),
(10, 30), (11, 13), (11, 15), (11, 16), (11, 17), (11, 18),
(11, 19), (11, 20), (11, 22), (11, 29), (11, 30), (12, 14),
(12, 17), (12, 18), (12, 19), (12, 20), (12, 21), (12, 23),
(12, 25), (12, 26), (12, 30), (13, 20), (13, 22), (13, 23),
(13, 24), (13, 30), (14, 16), (14, 20), (14, 21), (14, 22),
(14, 23), (14, 25), (14, 26), (14, 27), (14, 29), (14, 30),
(15, 17), (15, 18), (15, 20), (15, 21), (15, 26), (15, 27),
(15, 28), (16, 17), (16, 18), (16, 19), (16, 20), (16, 21),
(16, 29), (16, 30), (17, 18), (17, 21), (17, 22), (17, 25),
(17, 27), (17, 28), (17, 30), (18, 19), (18, 20), (18, 21),
(18, 22), (18, 23), (18, 24), (19, 20), (19, 22), (19, 23),
(19, 24), (19, 25), (19, 27), (19, 30), (20, 21), (20, 23),
(20, 24), (20, 26), (20, 28), (20, 29), (21, 23), (21, 26),
(21, 27), (21, 29), (22, 24), (22, 25), (22, 26), (22, 29),
(23, 25), (23, 30), (24, 25), (24, 26), (25, 27), (25, 29),
(26, 27), (26, 28), (26, 30), (28, 29), (29, 30),
]
)
# fmt: on
clique, weight = nx.algorithms.max_weight_clique(G)
assert verify_clique(G, clique, weight, 111, "weight")
# ############################ Utility functions ############################
def verify_clique(
graph, clique, reported_clique_weight, expected_clique_weight, weight_accessor
):
for node1 in clique:
for node2 in clique:
if node1 == node2:
continue
if not graph.has_edge(node1, node2):
return False
if weight_accessor is None:
clique_weight = len(clique)
else:
clique_weight = sum(graph.nodes[v]["weight"] for v in clique)
if clique_weight != expected_clique_weight:
return False
if clique_weight != reported_clique_weight:
return False
return True
# ############################ Graph Generation ############################
def empty_graph():
return nx.Graph()
def one_node_graph():
graph = nx.Graph()
graph.add_nodes_from([1])
graph.nodes[1]["weight"] = 10
return graph
def two_node_graph():
graph = nx.Graph()
graph.add_nodes_from([1, 2])
graph.add_edges_from([(1, 2)])
graph.nodes[1]["weight"] = 10
graph.nodes[2]["weight"] = 20
return graph
def three_node_clique():
graph = nx.Graph()
graph.add_nodes_from([1, 2, 3])
graph.add_edges_from([(1, 2), (1, 3), (2, 3)])
graph.nodes[1]["weight"] = 10
graph.nodes[2]["weight"] = 20
graph.nodes[3]["weight"] = 5
return graph
def three_node_independent_set():
graph = nx.Graph()
graph.add_nodes_from([1, 2, 3])
graph.nodes[1]["weight"] = 10
graph.nodes[2]["weight"] = 20
graph.nodes[3]["weight"] = 5
return graph
def disconnected():
graph = nx.Graph()
graph.add_edges_from([(1, 2), (2, 3), (4, 5), (5, 6)])
graph.nodes[1]["weight"] = 10
graph.nodes[2]["weight"] = 20
graph.nodes[3]["weight"] = 5
graph.nodes[4]["weight"] = 100
graph.nodes[5]["weight"] = 200
graph.nodes[6]["weight"] = 50
return graph
# --------------------------------------------------------------------------
# Basic tests for all strategies
# For each basic graph function, specify expected weight of max weight clique
# and expected size of maximum clique
TEST_CASES = {
empty_graph: (0, 0),
one_node_graph: (10, 1),
two_node_graph: (30, 2),
three_node_clique: (35, 3),
three_node_independent_set: (20, 1),
disconnected: (300, 2),
}
```
#### File: traversal/tests/test_beamsearch.py
```python
import networkx as nx
def identity(x):
return x
class TestBeamSearch:
"""Unit tests for the beam search function."""
def test_narrow(self):
"""Tests that a narrow beam width may cause an incomplete search."""
# In this search, we enqueue only the neighbor 3 at the first
# step, then only the neighbor 2 at the second step. Once at
# node 2, the search chooses node 3, since it has a higher value
# that node 1, but node 3 has already been visited, so the
# search terminates.
G = nx.cycle_graph(4)
edges = nx.bfs_beam_edges(G, 0, identity, width=1)
assert list(edges) == [(0, 3), (3, 2)]
def test_wide(self):
G = nx.cycle_graph(4)
edges = nx.bfs_beam_edges(G, 0, identity, width=2)
assert list(edges) == [(0, 3), (0, 1), (3, 2)]
```
#### File: classes/tests/test_subgraphviews.py
```python
import pytest
import networkx as nx
class TestSubGraphView:
gview = staticmethod(nx.graphviews.subgraph_view)
graph = nx.Graph
hide_edges_filter = staticmethod(nx.filters.hide_edges)
show_edges_filter = staticmethod(nx.filters.show_edges)
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9, create_using=cls.graph())
cls.hide_edges_w_hide_nodes = {(3, 4), (4, 5), (5, 6)}
def test_hidden_nodes(self):
hide_nodes = [4, 5, 111]
nodes_gone = nx.filters.hide_nodes(hide_nodes)
gview = self.gview
print(gview)
G = gview(self.G, filter_node=nodes_gone)
assert self.G.nodes - G.nodes == {4, 5}
assert self.G.edges - G.edges == self.hide_edges_w_hide_nodes
if G.is_directed():
assert list(G[3]) == []
assert list(G[2]) == [3]
else:
assert list(G[3]) == [2]
assert set(G[2]) == {1, 3}
pytest.raises(KeyError, G.__getitem__, 4)
pytest.raises(KeyError, G.__getitem__, 112)
pytest.raises(KeyError, G.__getitem__, 111)
assert G.degree(3) == (3 if G.is_multigraph() else 1)
assert G.size() == (7 if G.is_multigraph() else 5)
def test_hidden_edges(self):
hide_edges = [(2, 3), (8, 7), (222, 223)]
edges_gone = self.hide_edges_filter(hide_edges)
gview = self.gview
G = gview(self.G, filter_edge=edges_gone)
assert self.G.nodes == G.nodes
if G.is_directed():
assert self.G.edges - G.edges == {(2, 3)}
assert list(G[2]) == []
assert list(G.pred[3]) == []
assert list(G.pred[2]) == [1]
assert G.size() == 7
else:
assert self.G.edges - G.edges == {(2, 3), (7, 8)}
assert list(G[2]) == [1]
assert G.size() == 6
assert list(G[3]) == [4]
pytest.raises(KeyError, G.__getitem__, 221)
pytest.raises(KeyError, G.__getitem__, 222)
assert G.degree(3) == 1
def test_shown_node(self):
induced_subgraph = nx.filters.show_nodes([2, 3, 111])
gview = self.gview
G = gview(self.G, filter_node=induced_subgraph)
assert set(G.nodes) == {2, 3}
if G.is_directed():
assert list(G[3]) == []
else:
assert list(G[3]) == [2]
assert list(G[2]) == [3]
pytest.raises(KeyError, G.__getitem__, 4)
pytest.raises(KeyError, G.__getitem__, 112)
pytest.raises(KeyError, G.__getitem__, 111)
assert G.degree(3) == (3 if G.is_multigraph() else 1)
assert G.size() == (3 if G.is_multigraph() else 1)
def test_shown_edges(self):
show_edges = [(2, 3), (8, 7), (222, 223)]
edge_subgraph = self.show_edges_filter(show_edges)
G = self.gview(self.G, filter_edge=edge_subgraph)
assert self.G.nodes == G.nodes
if G.is_directed():
assert G.edges == {(2, 3)}
assert list(G[3]) == []
assert list(G[2]) == [3]
assert list(G.pred[3]) == [2]
assert list(G.pred[2]) == []
assert G.size() == 1
else:
assert G.edges == {(2, 3), (7, 8)}
assert list(G[3]) == [2]
assert list(G[2]) == [3]
assert G.size() == 2
pytest.raises(KeyError, G.__getitem__, 221)
pytest.raises(KeyError, G.__getitem__, 222)
assert G.degree(3) == 1
class TestSubDiGraphView(TestSubGraphView):
gview = staticmethod(nx.graphviews.subgraph_view)
graph = nx.DiGraph
hide_edges_filter = staticmethod(nx.filters.hide_diedges)
show_edges_filter = staticmethod(nx.filters.show_diedges)
hide_edges = [(2, 3), (8, 7), (222, 223)]
excluded = {(2, 3), (3, 4), (4, 5), (5, 6)}
def test_inoutedges(self):
edges_gone = self.hide_edges_filter(self.hide_edges)
hide_nodes = [4, 5, 111]
nodes_gone = nx.filters.hide_nodes(hide_nodes)
G = self.gview(self.G, nodes_gone, edges_gone)
assert self.G.in_edges - G.in_edges == self.excluded
assert self.G.out_edges - G.out_edges == self.excluded
def test_pred(self):
edges_gone = self.hide_edges_filter(self.hide_edges)
hide_nodes = [4, 5, 111]
nodes_gone = nx.filters.hide_nodes(hide_nodes)
G = self.gview(self.G, nodes_gone, edges_gone)
assert list(G.pred[2]) == [1]
assert list(G.pred[6]) == []
def test_inout_degree(self):
edges_gone = self.hide_edges_filter(self.hide_edges)
hide_nodes = [4, 5, 111]
nodes_gone = nx.filters.hide_nodes(hide_nodes)
G = self.gview(self.G, nodes_gone, edges_gone)
assert G.degree(2) == 1
assert G.out_degree(2) == 0
assert G.in_degree(2) == 1
assert G.size() == 4
# multigraph
class TestMultiGraphView(TestSubGraphView):
gview = staticmethod(nx.graphviews.subgraph_view)
graph = nx.MultiGraph
hide_edges_filter = staticmethod(nx.filters.hide_multiedges)
show_edges_filter = staticmethod(nx.filters.show_multiedges)
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9, create_using=cls.graph())
multiedges = {(2, 3, 4), (2, 3, 5)}
cls.G.add_edges_from(multiedges)
cls.hide_edges_w_hide_nodes = {(3, 4, 0), (4, 5, 0), (5, 6, 0)}
def test_hidden_edges(self):
hide_edges = [(2, 3, 4), (2, 3, 3), (8, 7, 0), (222, 223, 0)]
edges_gone = self.hide_edges_filter(hide_edges)
G = self.gview(self.G, filter_edge=edges_gone)
assert self.G.nodes == G.nodes
if G.is_directed():
assert self.G.edges - G.edges == {(2, 3, 4)}
assert list(G[3]) == [4]
assert list(G[2]) == [3]
assert list(G.pred[3]) == [2] # only one 2 but two edges
assert list(G.pred[2]) == [1]
assert G.size() == 9
else:
assert self.G.edges - G.edges == {(2, 3, 4), (7, 8, 0)}
assert list(G[3]) == [2, 4]
assert list(G[2]) == [1, 3]
assert G.size() == 8
assert G.degree(3) == 3
pytest.raises(KeyError, G.__getitem__, 221)
pytest.raises(KeyError, G.__getitem__, 222)
def test_shown_edges(self):
show_edges = [(2, 3, 4), (2, 3, 3), (8, 7, 0), (222, 223, 0)]
edge_subgraph = self.show_edges_filter(show_edges)
G = self.gview(self.G, filter_edge=edge_subgraph)
assert self.G.nodes == G.nodes
if G.is_directed():
assert G.edges == {(2, 3, 4)}
assert list(G[3]) == []
assert list(G.pred[3]) == [2]
assert list(G.pred[2]) == []
assert G.size() == 1
else:
assert G.edges == {(2, 3, 4), (7, 8, 0)}
assert G.size() == 2
assert list(G[3]) == [2]
assert G.degree(3) == 1
assert list(G[2]) == [3]
pytest.raises(KeyError, G.__getitem__, 221)
pytest.raises(KeyError, G.__getitem__, 222)
# multidigraph
class TestMultiDiGraphView(TestMultiGraphView, TestSubDiGraphView):
gview = staticmethod(nx.graphviews.subgraph_view)
graph = nx.MultiDiGraph
hide_edges_filter = staticmethod(nx.filters.hide_multidiedges)
show_edges_filter = staticmethod(nx.filters.show_multidiedges)
hide_edges = [(2, 3, 0), (8, 7, 0), (222, 223, 0)]
excluded = {(2, 3, 0), (3, 4, 0), (4, 5, 0), (5, 6, 0)}
def test_inout_degree(self):
edges_gone = self.hide_edges_filter(self.hide_edges)
hide_nodes = [4, 5, 111]
nodes_gone = nx.filters.hide_nodes(hide_nodes)
G = self.gview(self.G, nodes_gone, edges_gone)
assert G.degree(2) == 3
assert G.out_degree(2) == 2
assert G.in_degree(2) == 1
assert G.size() == 6
# induced_subgraph
class TestInducedSubGraph:
@classmethod
def setup_class(cls):
cls.K3 = G = nx.complete_graph(3)
G.graph["foo"] = []
G.nodes[0]["foo"] = []
G.remove_edge(1, 2)
ll = []
G.add_edge(1, 2, foo=ll)
G.add_edge(2, 1, foo=ll)
def test_full_graph(self):
G = self.K3
H = nx.induced_subgraph(G, [0, 1, 2, 5])
assert H.name == G.name
self.graphs_equal(H, G)
self.same_attrdict(H, G)
def test_partial_subgraph(self):
G = self.K3
H = nx.induced_subgraph(G, 0)
assert dict(H.adj) == {0: {}}
assert dict(G.adj) != {0: {}}
H = nx.induced_subgraph(G, [0, 1])
assert dict(H.adj) == {0: {1: {}}, 1: {0: {}}}
def same_attrdict(self, H, G):
old_foo = H[1][2]["foo"]
H.edges[1, 2]["foo"] = "baz"
assert G.edges == H.edges
H.edges[1, 2]["foo"] = old_foo
assert G.edges == H.edges
old_foo = H.nodes[0]["foo"]
H.nodes[0]["foo"] = "baz"
assert G.nodes == H.nodes
H.nodes[0]["foo"] = old_foo
assert G.nodes == H.nodes
def graphs_equal(self, H, G):
assert G._adj == H._adj
assert G._node == H._node
assert G.graph == H.graph
assert G.name == H.name
if not G.is_directed() and not H.is_directed():
assert H._adj[1][2] is H._adj[2][1]
assert G._adj[1][2] is G._adj[2][1]
else: # at least one is directed
if not G.is_directed():
G._pred = G._adj
G._succ = G._adj
if not H.is_directed():
H._pred = H._adj
H._succ = H._adj
assert G._pred == H._pred
assert G._succ == H._succ
assert H._succ[1][2] is H._pred[2][1]
assert G._succ[1][2] is G._pred[2][1]
# edge_subgraph
class TestEdgeSubGraph:
@classmethod
def setup_class(cls):
# Create a path graph on five nodes.
cls.G = G = nx.path_graph(5)
# Add some node, edge, and graph attributes.
for i in range(5):
G.nodes[i]["name"] = f"node{i}"
G.edges[0, 1]["name"] = "edge01"
G.edges[3, 4]["name"] = "edge34"
G.graph["name"] = "graph"
# Get the subgraph induced by the first and last edges.
cls.H = nx.edge_subgraph(G, [(0, 1), (3, 4)])
def test_correct_nodes(self):
"""Tests that the subgraph has the correct nodes."""
assert [0, 1, 3, 4] == sorted(self.H.nodes)
def test_correct_edges(self):
"""Tests that the subgraph has the correct edges."""
assert [(0, 1, "edge01"), (3, 4, "edge34")] == sorted(self.H.edges(data="name"))
def test_add_node(self):
"""Tests that adding a node to the original graph does not
affect the nodes of the subgraph.
"""
self.G.add_node(5)
assert [0, 1, 3, 4] == sorted(self.H.nodes)
self.G.remove_node(5)
def test_remove_node(self):
"""Tests that removing a node in the original graph
removes the nodes of the subgraph.
"""
self.G.remove_node(0)
assert [1, 3, 4] == sorted(self.H.nodes)
self.G.add_edge(0, 1)
def test_node_attr_dict(self):
"""Tests that the node attribute dictionary of the two graphs is
the same object.
"""
for v in self.H:
assert self.G.nodes[v] == self.H.nodes[v]
# Making a change to G should make a change in H and vice versa.
self.G.nodes[0]["name"] = "foo"
assert self.G.nodes[0] == self.H.nodes[0]
self.H.nodes[1]["name"] = "bar"
assert self.G.nodes[1] == self.H.nodes[1]
def test_edge_attr_dict(self):
"""Tests that the edge attribute dictionary of the two graphs is
the same object.
"""
for u, v in self.H.edges():
assert self.G.edges[u, v] == self.H.edges[u, v]
# Making a change to G should make a change in H and vice versa.
self.G.edges[0, 1]["name"] = "foo"
assert self.G.edges[0, 1]["name"] == self.H.edges[0, 1]["name"]
self.H.edges[3, 4]["name"] = "bar"
assert self.G.edges[3, 4]["name"] == self.H.edges[3, 4]["name"]
def test_graph_attr_dict(self):
"""Tests that the graph attribute dictionary of the two graphs
is the same object.
"""
assert self.G.graph is self.H.graph
def test_readonly(self):
"""Tests that the subgraph cannot change the graph structure"""
pytest.raises(nx.NetworkXError, self.H.add_node, 5)
pytest.raises(nx.NetworkXError, self.H.remove_node, 0)
pytest.raises(nx.NetworkXError, self.H.add_edge, 5, 6)
pytest.raises(nx.NetworkXError, self.H.remove_edge, 0, 1)
```
#### File: networkx/generators/stochastic.py
```python
from networkx.classes import DiGraph
from networkx.classes import MultiDiGraph
from networkx.utils import not_implemented_for
__all__ = ["stochastic_graph"]
@not_implemented_for("undirected")
def stochastic_graph(G, copy=True, weight="weight"):
"""Returns a right-stochastic representation of directed graph `G`.
A right-stochastic graph is a weighted digraph in which for each
node, the sum of the weights of all the out-edges of that node is
1. If the graph is already weighted (for example, via a 'weight'
edge attribute), the reweighting takes that into account.
Parameters
----------
G : directed graph
A :class:`~networkx.DiGraph` or :class:`~networkx.MultiDiGraph`.
copy : boolean, optional
If this is True, then this function returns a new graph with
the stochastic reweighting. Otherwise, the original graph is
modified in-place (and also returned, for convenience).
weight : edge attribute key (optional, default='weight')
Edge attribute key used for reading the existing weight and
setting the new weight. If no attribute with this key is found
for an edge, then the edge weight is assumed to be 1. If an edge
has a weight, it must be a a positive number.
"""
if copy:
G = MultiDiGraph(G) if G.is_multigraph() else DiGraph(G)
# There is a tradeoff here: the dictionary of node degrees may
# require a lot of memory, whereas making a call to `G.out_degree`
# inside the loop may be costly in computation time.
degree = dict(G.out_degree(weight=weight))
for u, v, d in G.edges(data=True):
if degree[u] == 0:
d[weight] = 0
else:
d[weight] = d.get(weight, 1) / degree[u]
return G
```
|
{
"source": "Jebq/causalnex",
"score": 2
}
|
#### File: tests/structure/test_data_generators.py
```python
import operator
import string
from itertools import product
from typing import Hashable, Tuple, Union
import networkx as nx
import numpy as np
import pandas as pd
import pytest
from networkx.algorithms.dag import is_directed_acyclic_graph
from scipy.stats import anderson, stats
from causalnex.structure.data_generators import (
generate_binary_data,
generate_binary_dataframe,
generate_categorical_dataframe,
generate_continuous_data,
generate_continuous_dataframe,
generate_structure,
sem_generator,
)
from causalnex.structure.structuremodel import StructureModel
@pytest.fixture
def graph():
graph = StructureModel()
edges = [(n, n + 1, 1) for n in range(5)]
graph.add_weighted_edges_from(edges)
return graph
@pytest.fixture
def schema():
# use the default schema for 3
schema = {
0: "binary",
1: "categorical:3",
2: "binary",
4: "continuous",
5: "categorical:5",
}
return schema
@pytest.fixture()
def graph_gen():
def generator(num_nodes, seed, weight=None):
np.random.seed(seed)
sm = StructureModel()
nodes = list(
"".join(x) for x in product(string.ascii_lowercase, string.ascii_lowercase)
)[:num_nodes]
np.random.shuffle(nodes)
sm.add_nodes_from(nodes)
# one edge:
sm.add_weighted_edges_from([("aa", "ab", weight)])
return sm
return generator
class TestGenerateStructure:
@pytest.mark.parametrize("graph_type", ["erdos-renyi", "barabasi-albert", "full"])
def test_is_dag_graph_type(self, graph_type):
""" Tests that the generated graph is a dag for all graph types. """
degree, d_nodes = 4, 10
sm = generate_structure(d_nodes, degree, graph_type)
assert is_directed_acyclic_graph(sm)
@pytest.mark.parametrize("num_nodes,degree", [(5, 2), (10, 3), (15, 5)])
def test_is_dag_nodes_degrees(self, num_nodes, degree):
""" Tests that generated graph is dag for different numbers of nodes and degrees
"""
sm = generate_structure(num_nodes, degree)
assert nx.is_directed_acyclic_graph(sm)
def test_bad_graph_type(self):
""" Test that a value other than "erdos-renyi", "barabasi-albert", "full" throws ValueError """
graph_type = "invalid"
degree, d_nodes = 4, 10
with pytest.raises(ValueError, match="unknown graph type"):
generate_structure(d_nodes, degree, graph_type)
@pytest.mark.parametrize("num_nodes,degree", [(5, 2), (10, 3), (15, 5)])
def test_expected_num_nodes(self, num_nodes, degree):
""" Test that generated structure has expected number of nodes = num_nodes """
sm = generate_structure(num_nodes, degree)
assert len(sm.nodes) == num_nodes
@pytest.mark.parametrize(
"num_nodes,degree,w_range",
[(5, 2, (1, 2)), (10, 3, (100, 200)), (15, 5, (1.0, 1.0))],
)
def test_weight_range(self, num_nodes, degree, w_range):
""" Test that w_range is respected in output """
w_min = w_range[0]
w_max = w_range[1]
sm = generate_structure(num_nodes, degree, w_min=w_min, w_max=w_max)
assert all(abs(sm[u][v]["weight"]) >= w_min for u, v in sm.edges)
assert all(abs(sm[u][v]["weight"]) <= w_max for u, v in sm.edges)
@pytest.mark.parametrize("num_nodes", [-1, 0, 1])
def test_num_nodes_exception(self, num_nodes):
""" Check a single node graph can't be generated """
with pytest.raises(ValueError, match="DAG must have at least 2 nodes"):
generate_structure(num_nodes, 1)
def test_min_max_weights_exception(self):
""" Check that w_range is valid """
with pytest.raises(
ValueError,
match="Absolute minimum weight must be less than or equal to maximum weight",
):
generate_structure(4, 1, w_min=0.5, w_max=0)
def test_min_max_weights_equal(self):
""" If w_range (w, w) has w=w, check abs value of all weights respect this """
w = 1
sm = generate_structure(4, 1, w_min=w, w_max=w)
w_mat = nx.to_numpy_array(sm)
assert np.all((w_mat == 0) | (w_mat == w) | (w_mat == -w))
def test_erdos_renyi_degree_increases_edges(self):
""" Erdos-Renyi degree increases edges """
edge_counts = [
max(
[
len(generate_structure(100, degree, "erdos-renyi").edges)
for _ in range(10)
]
)
for degree in [10, 90]
]
assert edge_counts == sorted(edge_counts)
def test_barabasi_albert_degree_increases_edges(self):
""" Barabasi-Albert degree increases edges """
edge_counts = [
max(
[
len(generate_structure(100, degree, "barabasi-albert").edges)
for _ in range(10)
]
)
for degree in [10, 90]
]
assert edge_counts == sorted(edge_counts)
def test_full_network(self):
""" Fully connected network has expected edge counts """
sm = generate_structure(40, degree=0, graph_type="full")
assert len(sm.edges) == (40 * 39) / 2
class TestGenerateContinuousData:
@pytest.mark.parametrize(
"distribution", ["gaussian", "normal", "student-t", "exponential", "gumbel"]
)
def test_returns_ndarray(self, distribution):
""" Return value is an ndarray - test over all sem_types """
graph_type, degree, d_nodes = "erdos-renyi", 4, 10
sm = generate_structure(d_nodes, degree, graph_type)
ndarray = generate_continuous_data(sm, distribution=distribution, n_samples=10)
assert isinstance(ndarray, np.ndarray)
def test_bad_distribution_type(self):
""" Test that invalid sem-type other than "gaussian", "normal", "student-t",
"exponential", "gumbel" is not accepted """
graph_type, degree, d_nodes = "erdos-renyi", 4, 10
sm = generate_structure(d_nodes, degree, graph_type)
with pytest.raises(ValueError, match="Unknown continuous distribution"):
generate_continuous_data(sm, distribution="invalid", n_samples=10, seed=10)
@pytest.mark.parametrize("num_nodes", [5, 10, 15])
def test_number_of_nodes(self, num_nodes):
""" Length of each row in generated data equals num_nodes """
graph = StructureModel()
edges = [(n, n + 1, 1) for n in range(num_nodes - 1)]
graph.add_weighted_edges_from(edges)
data = generate_continuous_data(graph, 100, seed=10)
assert all(len(sample) == num_nodes for sample in data)
@pytest.mark.parametrize("num_samples", [5, 10, 15])
def test_number_of_samples(self, num_samples, graph):
""" Assert number of samples generated (rows) = num_samples """
data = generate_continuous_data(graph, num_samples, "gaussian", 1, seed=10)
assert len(data) == num_samples
def test_linear_gauss_parent_dist(self, graph):
""" Anderson-Darling test for data coming from a particular distribution, for gaussian."""
data = generate_continuous_data(graph, 1000000, "gaussian", 1, seed=10)
stat, crit, sig = anderson(data[:, 0], "norm")
assert stat < crit[list(sig).index(5)]
def test_linear_normal_parent_dist(self, graph):
""" Anderson-Darling test for data coming from a particular distribution, for normal."""
data = generate_continuous_data(
graph, distribution="normal", n_samples=1000000, noise_scale=1, seed=10
)
stat, crit, sig = anderson(data[:, 0], "norm")
assert stat < crit[list(sig).index(5)]
def test_linear_studentt_parent_dist(self, graph):
"""
Kolmogorov-Smirnov test for data coming from a student-t (degree of freedom = 3).
"""
np.random.seed(10)
data = generate_continuous_data(
graph, distribution="student-t", noise_scale=1, n_samples=100000, seed=10
)
x = data[:, 0]
_, p_val = stats.kstest(x, "t", args=[3])
assert p_val < 0.01
def test_linear_exp_parent_dist(self, graph):
""" Anderson-Darling test for data coming from a particular distribution, for exponential."""
data = generate_continuous_data(
graph, distribution="exponential", noise_scale=1, n_samples=100000, seed=10
)
stat, crit, sig = anderson(data[:, 0], "expon")
assert stat < crit[list(sig).index(5)]
def test_linear_gumbel_parent_dist(self, graph):
""" Anderson-Darling test for data coming from a particular distribution, for gumbel."""
data = generate_continuous_data(
graph, distribution="gumbel", noise_scale=1, n_samples=100000, seed=10
)
stat, crit, sig = anderson(data[:, 0], "gumbel_r")
assert stat < crit[list(sig).index(5)]
@pytest.mark.parametrize(
"distribution", ["gaussian", "normal", "student-t", "exponential", "gumbel"]
)
def test_intercept(self, distribution):
graph = StructureModel()
graph.add_node("123")
data_noint = generate_continuous_data(
graph,
n_samples=100000,
distribution=distribution,
noise_scale=0,
seed=10,
intercept=False,
)
data_intercept = generate_continuous_data(
graph,
n_samples=100000,
distribution=distribution,
noise_scale=0,
seed=10,
intercept=True,
)
assert not np.isclose(data_noint[:, 0].mean(), data_intercept[:, 0].mean())
assert np.isclose(data_noint[:, 0].std(), data_intercept[:, 0].std())
@pytest.mark.parametrize("num_nodes", (10, 20, 30))
@pytest.mark.parametrize("seed", (10, 20, 30))
def test_order_is_correct(self, graph_gen, num_nodes, seed):
"""
Check if the order of the nodes is the same order as `sm.nodes`, which in turn is the same order as the
adjacency matrix.
To do so, we create graphs with degree in {0,1} by doing permutations on identity.
The edge values are always 100 and the noise is 1, so we expect `edge_from` < `edge_to` in absolute value
almost every time.
"""
sm = graph_gen(num_nodes=num_nodes, seed=seed, weight=100)
nodes = sm.nodes()
node_seq = {node: ix for ix, node in enumerate(sm.nodes())}
data = generate_continuous_data(
sm,
n_samples=10000,
distribution="normal",
seed=seed,
noise_scale=1.0,
intercept=False,
)
assert data[:, node_seq["aa"]].std() < data[:, node_seq["ab"]].std()
tol = 0.15
# for gaussian distribution: var=0 iff independent:
for node in nodes:
if node == "aa":
continue
if node == "ab":
assert not np.isclose(
np.corrcoef(data[:, [node_seq["aa"], node_seq["ab"]]].T)[0, 1],
0,
atol=tol,
)
else:
assert np.isclose(
np.corrcoef(data[:, [node_seq["aa"], node_seq[node]]].T)[0, 1],
0,
atol=tol,
)
@pytest.mark.parametrize(
"distribution", ["gaussian", "normal", "student-t", "exponential", "gumbel"]
)
@pytest.mark.parametrize("noise_std", [0.1, 1, 2])
@pytest.mark.parametrize("intercept", [True, False])
@pytest.mark.parametrize("seed", [10, 12])
def test_dataframe(self, graph, distribution, noise_std, intercept, seed):
"""
Tests equivalence of dataframe wrapper
"""
data = generate_continuous_data(
graph,
1000,
distribution,
noise_scale=noise_std,
seed=seed,
intercept=intercept,
)
df = generate_continuous_dataframe(
graph,
1000,
distribution,
noise_scale=noise_std,
seed=seed,
intercept=intercept,
)
assert np.array_equal(data, df[list(graph.nodes())].values)
class TestGenerateBinaryData:
@pytest.mark.parametrize("distribution", ["probit", "normal", "logit"])
def test_returns_ndarray(self, distribution):
""" Return value is an ndarray - test over all sem_types """
graph_type, degree, d_nodes = "erdos-renyi", 4, 10
sm = generate_structure(d_nodes, degree, graph_type)
ndarray = generate_binary_data(sm, distribution=distribution, n_samples=10)
assert isinstance(ndarray, np.ndarray)
def test_bad_distribution_type(self):
""" Test that invalid sem-type other than "probit", "normal", "logit" is not accepted """
graph_type, degree, d_nodes = "erdos-renyi", 4, 10
sm = generate_structure(d_nodes, degree, graph_type)
with pytest.raises(ValueError, match="Unknown binary distribution"):
generate_binary_data(sm, distribution="invalid", n_samples=10, seed=10)
@pytest.mark.parametrize("num_nodes", [5, 10, 15])
def test_number_of_nodes(self, num_nodes):
""" Length of each row in generated data equals num_nodes """
graph = StructureModel()
edges = [(n, n + 1, 1) for n in range(num_nodes - 1)]
graph.add_weighted_edges_from(edges)
data = generate_binary_data(graph, 100, seed=10)
assert all(len(sample) == num_nodes for sample in data)
@pytest.mark.parametrize("num_samples", [5, 10, 15])
def test_number_of_samples(self, num_samples, graph):
""" Assert number of samples generated (rows) = num_samples """
data = generate_binary_data(graph, num_samples, "logit", 1, seed=10)
assert len(data) == num_samples
@pytest.mark.parametrize("distribution", ["logit", "probit", "normal"])
def test_baseline_probability_probit(self, graph, distribution):
""" Test whether probability centered around 50% if no intercept given"""
graph = StructureModel()
graph.add_nodes_from(["A"])
data = generate_binary_data(
graph,
1000000,
distribution=distribution,
noise_scale=0.1,
seed=10,
intercept=False,
)
assert 0.45 < data[:, 0].mean() < 0.55
@pytest.mark.parametrize("distribution", ["logit", "probit", "normal"])
def test_intercept_probability_logit(self, graph, distribution):
""" Test whether probability is not centered around 50% when using an intercept"""
graph = StructureModel()
graph.add_nodes_from(["A"])
data = generate_binary_data(
graph,
1000000,
distribution=distribution,
noise_scale=0.1,
seed=10,
intercept=True,
)
mean_prob = data[:, 0].mean()
assert not np.isclose(mean_prob, 0.5, atol=0.05)
@pytest.mark.parametrize("distribution", ["logit", "probit", "normal"])
def test_intercept(self, distribution):
graph = StructureModel()
graph.add_node("123")
data_noint = generate_binary_data(
graph, 100000, distribution, noise_scale=0, seed=10, intercept=False
)
data_intercept = generate_binary_data(
graph, 100000, distribution, noise_scale=0, seed=10, intercept=True
)
assert not np.isclose(data_noint[:, 0].mean(), data_intercept[:, 0].mean())
@pytest.mark.parametrize("num_nodes", (10, 20, 30))
@pytest.mark.parametrize("seed", (10, 20, 30))
def test_order_is_correct(self, graph_gen, num_nodes, seed):
"""
Check if the order of the nodes is the same order as `sm.nodes`, which in turn is the same order as the
adjacency matrix.
To do so, we create graphs with degree in {0,1} by doing permutations on identity.
The edge values are always 100 and the noise is 1, so we expect `edge_from` < `edge_to` in absolute value
almost every time.
"""
sm = graph_gen(num_nodes=num_nodes, seed=seed, weight=None)
nodes = sm.nodes()
node_seq = {node: ix for ix, node in enumerate(sm.nodes())}
data = generate_binary_data(
sm,
n_samples=10000,
distribution="normal",
seed=seed,
noise_scale=0.1,
intercept=False,
)
tol = 0.15
# since we dont have an intercept, the mean proba for the parent is 0.5,
# which has the highest possible std for a binary feature (std= p(1-p)),
# hence, any child necessarily has a lower probability.
assert data[:, node_seq["aa"]].std() > data[:, node_seq["ab"]].std()
for node in nodes:
if node == "aa":
continue
joint_proba, factored_proba = calculate_proba(
data, node_seq["aa"], node_seq[node]
)
if node == "ab":
# this is the only link
assert not np.isclose(joint_proba, factored_proba, rtol=tol, atol=0)
else:
assert np.isclose(joint_proba, factored_proba, rtol=tol, atol=0)
@pytest.mark.parametrize("distribution", ["probit", "normal", "logit"])
@pytest.mark.parametrize("noise_std", [0.1, 1, 2])
@pytest.mark.parametrize("intercept", [True, False])
@pytest.mark.parametrize("seed", [10, 12])
def test_dataframe(self, graph, distribution, noise_std, intercept, seed):
"""
Tests equivalence of dataframe wrapper
"""
data = generate_binary_data(
graph,
100,
distribution,
noise_scale=noise_std,
seed=seed,
intercept=intercept,
)
df = generate_binary_dataframe(
graph,
100,
distribution,
noise_scale=noise_std,
seed=seed,
intercept=intercept,
)
assert np.array_equal(data, df[list(graph.nodes())].values)
@pytest.mark.parametrize("num_nodes", (2, 3, 10, 20, 30))
@pytest.mark.parametrize("seed", (10, 20, 30))
def test_independence(self, graph_gen, seed, num_nodes):
"""
test whether the relation is accurate, implicitely tests sequence of
nodes.
"""
sm = graph_gen(num_nodes=num_nodes, seed=seed, weight=None)
nodes = sm.nodes()
df = generate_binary_dataframe(
sm,
n_samples=100000,
distribution="normal",
seed=seed,
noise_scale=0.5,
intercept=False,
)
tol = 0.05
for node in nodes:
if node == "aa":
continue
joint_proba, factored_proba = calculate_proba(df, "aa", node)
if node == "ab":
# this is the only link
assert not np.isclose(
joint_proba, factored_proba, atol=tol, rtol=0
), df.mean()
else:
assert np.isclose(joint_proba, factored_proba, atol=tol, rtol=0)
class TestGenerateCategoricalData:
@pytest.mark.parametrize("distribution", ["probit", "normal", "logit", "gumbel"])
def test_returns_dataframe(self, distribution):
""" Return value is an ndarray - test over all sem_types """
graph_type, degree, d_nodes = "erdos-renyi", 4, 10
sm = generate_structure(d_nodes, degree, graph_type)
ndarray = generate_categorical_dataframe(
sm, distribution=distribution, n_samples=10
)
assert isinstance(ndarray, pd.DataFrame)
def test_bad_distribution_type(self):
""" Test that invalid sem-type other than "probit", "normal", "logit", "gumbel" is not accepted """
graph_type, degree, d_nodes = "erdos-renyi", 4, 10
sm = generate_structure(d_nodes, degree, graph_type)
with pytest.raises(ValueError, match="Unknown categorical distribution"):
generate_categorical_dataframe(
sm, distribution="invalid", n_samples=10, seed=10
)
@pytest.mark.parametrize(
"num_nodes,n_categories", list(product([5, 10, 15], [3, 5, 7]))
)
def test_number_of_columns(self, num_nodes, n_categories):
""" Length of dataframe is in the correct shape"""
graph = StructureModel()
edges = [(n, n + 1, 1) for n in range(num_nodes - 1)]
graph.add_weighted_edges_from(edges)
data = generate_categorical_dataframe(
graph, 100, seed=10, n_categories=n_categories
)
assert data.shape[1] == (num_nodes * n_categories)
@pytest.mark.parametrize("num_samples", [5, 10, 15])
def test_number_of_samples(self, num_samples, graph):
""" Assert number of samples generated (rows) = num_samples """
data = generate_categorical_dataframe(graph, num_samples, "logit", 1, seed=10)
assert len(data) == num_samples
@pytest.mark.parametrize(
"distribution, n_categories",
list(product(["logit", "probit", "normal", "gumbel"], [3, 5, 7])),
)
def test_baseline_probability(self, graph, distribution, n_categories):
""" Test whether probability centered around 50% if no intercept given"""
graph = StructureModel()
graph.add_nodes_from(["A"])
data = generate_categorical_dataframe(
graph,
10000,
distribution=distribution,
n_categories=n_categories,
noise_scale=1.0,
seed=10,
intercept=False,
)
# without intercept, the probabilities should be fairly uniform
assert np.allclose(data.mean(axis=0), 1 / n_categories, atol=0.01, rtol=0)
@pytest.mark.parametrize(
"distribution,n_categories",
list(product(["logit", "probit", "normal", "gumbel"], [3, 5, 7])),
)
def test_intercept_probability(self, graph, distribution, n_categories):
""" Test whether probability is not centered around 50% when using an intercept"""
graph = StructureModel()
graph.add_nodes_from(["A"])
data = generate_categorical_dataframe(
graph,
1000000,
distribution=distribution,
n_categories=n_categories,
noise_scale=0.1,
seed=10,
intercept=True,
)
assert not np.allclose(data.mean(axis=0), 1 / n_categories, atol=0.01, rtol=0)
@pytest.mark.parametrize("n_categories", (2, 10,))
@pytest.mark.parametrize("distribution", ["probit", "logit"])
def test_intercept(self, distribution, n_categories):
graph = StructureModel()
graph.add_node("A")
data_noint = generate_categorical_dataframe(
graph,
100000,
distribution,
noise_scale=0.1,
n_categories=n_categories,
seed=10,
intercept=False,
)
data_intercept = generate_categorical_dataframe(
graph,
100000,
distribution,
noise_scale=0.1,
n_categories=n_categories,
seed=10,
intercept=True,
)
assert np.all(
~np.isclose(
data_intercept.mean(axis=0), data_noint.mean(axis=0), atol=0.05, rtol=0
)
)
@pytest.mark.parametrize("num_nodes", (3, 6))
@pytest.mark.parametrize("seed", (10, 20))
@pytest.mark.parametrize("n_categories", (2, 6,))
@pytest.mark.parametrize("distribution", ["probit", "logit"])
def test_independence(self, graph_gen, seed, num_nodes, n_categories, distribution):
"""
test whether the relation is accurate, implicitely tests sequence of
nodes.
"""
sm = graph_gen(num_nodes=num_nodes, seed=seed, weight=None)
nodes = sm.nodes()
df = generate_categorical_dataframe(
sm,
n_samples=100000,
distribution=distribution,
n_categories=n_categories,
seed=seed,
noise_scale=1,
intercept=False,
)
tol = 0.05
# independent links
for node in nodes:
if node == "aa":
continue
joint_proba, factored_proba = calculate_proba(df, "aa_0", node + "_0")
if node == "ab":
assert not np.isclose(joint_proba, factored_proba, rtol=tol, atol=0)
else:
assert np.isclose(joint_proba, factored_proba, rtol=tol, atol=0)
class TestMixedDataGen:
def test_run(self, graph, schema):
df = sem_generator(
graph=graph,
schema=schema,
default_type="continuous",
noise_std=1.0,
n_samples=1000,
intercept=False,
seed=12,
)
# test binary:
assert df[0].nunique() == 2
assert df[0].nunique() == 2
# test categorical:
for col in ["1_{}".format(i) for i in range(3)]:
assert df[col].nunique() == 2
assert len([x for x in df.columns if isinstance(x, str) and "1_" in x]) == 3
for col in ["5_{}".format(i) for i in range(5)]:
assert df[col].nunique() == 2
assert len([x for x in df.columns if isinstance(x, str) and "5_" in x]) == 5
# test continuous
assert df[3].nunique() == 1000
assert df[4].nunique() == 1000
def test_graph_not_a_dag(self):
graph = StructureModel()
graph.add_edges_from([(0, 1), (1, 2), (2, 0)])
with pytest.raises(ValueError, match="Provided graph is not a DAG"):
_ = sem_generator(graph=graph)
def test_not_permissible_type(self, graph):
schema = {
0: "unknown data type",
}
with pytest.raises(ValueError, match="Unknown data type"):
_ = sem_generator(
graph=graph,
schema=schema,
default_type="continuous",
noise_std=1.0,
n_samples=1000,
intercept=False,
seed=12,
)
def test_missing_cardinality(self, graph):
schema = {
0: "categorical",
1: "categorical:3",
5: "categorical:5",
}
with pytest.raises(ValueError, match="Missing cardinality for categorical"):
_ = sem_generator(
graph=graph,
schema=schema,
default_type="continuous",
noise_std=1.0,
n_samples=1000,
intercept=False,
seed=12,
)
def test_missing_default_type(self, graph):
with pytest.raises(ValueError, match="Unknown default data type"):
_ = sem_generator(
graph=graph,
schema=schema,
default_type="unknown",
noise_std=1.0,
n_samples=1000,
intercept=False,
seed=12,
)
def test_incorrect_weight_dist(self):
sm = StructureModel()
nodes = list(str(x) for x in range(6))
np.random.shuffle(nodes)
sm.add_nodes_from(nodes)
sm.add_weighted_edges_from([("0", "1", None), ("2", "4", None)])
with pytest.raises(ValueError, match="Unknown weight distribution"):
_ = sem_generator(
graph=sm,
schema=None,
default_type="continuous",
distributions={"weight": "unknown"},
noise_std=2.0,
n_samples=1000,
intercept=False,
seed=10,
)
def test_incorrect_intercept_dist(self, graph):
with pytest.raises(ValueError, match="Unknown intercept distribution"):
_ = sem_generator(
graph=graph,
schema=None,
default_type="continuous",
distributions={"intercept": "unknown"},
noise_std=2.0,
n_samples=10,
intercept=True,
seed=10,
)
# def test_mixed_type_independence(self):
@pytest.mark.parametrize("seed", (10, 20))
@pytest.mark.parametrize("n_categories", (2, 5,))
@pytest.mark.parametrize("weight_distribution", ["uniform", "gaussian"])
@pytest.mark.parametrize("intercept_distribution", ["uniform", "gaussian"])
def test_mixed_type_independence(
self, seed, n_categories, weight_distribution, intercept_distribution
):
"""
Test whether the relation is accurate, implicitly tests sequence of
nodes.
"""
np.random.seed(seed)
sm = StructureModel()
nodes = list(str(x) for x in range(6))
np.random.shuffle(nodes)
sm.add_nodes_from(nodes)
# binary -> categorical
sm.add_weighted_edges_from([("0", "1", 10)])
# binary -> continuous
sm.add_weighted_edges_from([("2", "4", None)])
schema = {
"0": "binary",
"1": "categorical:{}".format(n_categories),
"2": "binary",
"4": "continuous",
"5": "categorical:{}".format(n_categories),
}
df = sem_generator(
graph=sm,
schema=schema,
default_type="continuous",
distributions={
"weight": weight_distribution,
"intercept": intercept_distribution,
},
noise_std=2,
n_samples=100000,
intercept=True,
seed=seed,
)
atol = 0.05 # 5% difference bewteen joint & factored!
# 1. dependent links
# 0 -> 1 (we look at the class with the highest deviation from uniform
# to avoid small values)
c, _ = max(
[
(c, np.abs(df["1_{}".format(c)].mean() - 1 / n_categories))
for c in range(n_categories)
],
key=operator.itemgetter(1),
)
joint_proba, factored_proba = calculate_proba(df, "0", "1_{}".format(c))
assert not np.isclose(joint_proba, factored_proba, rtol=0, atol=atol)
# 2 -> 4
assert not np.isclose(
df["4"].mean(), df["4"][df["2"] == 1].mean(), rtol=0, atol=atol
)
tol = 0.15 # relative tolerance of +- 15% of the
# 2. independent links
# categorical
c, _ = max(
[
(c, np.abs(df["1_{}".format(c)].mean() - 1 / n_categories))
for c in range(n_categories)
],
key=operator.itemgetter(1),
)
joint_proba, factored_proba = calculate_proba(df, "0", "5_{}".format(c))
assert np.isclose(joint_proba, factored_proba, rtol=tol, atol=0)
# binary
joint_proba, factored_proba = calculate_proba(df, "0", "2")
assert np.isclose(joint_proba, factored_proba, rtol=tol, atol=0)
# categorical
c, _ = max(
[
(c, np.abs(df["1_{}".format(c)].mean() - 1 / n_categories))
for c in range(n_categories)
],
key=operator.itemgetter(1),
)
d, _ = max(
[
(d, np.abs(df["5_{}".format(d)].mean() - 1 / n_categories))
for d in range(n_categories)
],
key=operator.itemgetter(1),
)
joint_proba, factored_proba = calculate_proba(
df, "1_{}".format(d), "5_{}".format(c)
)
assert np.isclose(joint_proba, factored_proba, rtol=tol, atol=0)
# continuous
# for gaussian distributions, zero variance is equivalent to independence
assert np.isclose(df[["3", "4"]].corr().values[0, 1], 0, atol=tol)
def calculate_proba(
df: Union[pd.DataFrame, np.ndarray], col_0: Hashable, col_1: Hashable
) -> Tuple[float, float]:
if isinstance(df, pd.DataFrame):
marginal_0 = df[col_0].mean()
marginal_1 = df[col_1].mean()
joint_proba = (df[col_0] * df[col_1]).mean()
else:
marginal_0 = df[:, col_0].mean()
marginal_1 = df[:, col_1].mean()
joint_proba = (df[:, col_0] * df[:, col_1]).mean()
factored_proba = marginal_0 * marginal_1
return joint_proba, factored_proba
```
|
{
"source": "Jebrayam/systemsbiology",
"score": 3
}
|
#### File: systemsbiology/GUI/model2moment.py
```python
import numpy as np
from numba import jit
@jit
def ODEmodel(y,t,hog,vPars):
if t > len(hog):
t = len(hog)-1
u = hog[int(t)]
c1 = vPars[0]
c2 = vPars[1]
c3 = vPars[2]
c4 = vPars[3]
dx1 = y[0]
dx2 = y[1]
dx3 = y[2]
dx4 = y[3]
dx5 = y[4]
ddx1 = c1*u - c2*dx1
ddx2 = c3*dx1 - c4*dx2
ddx3 = 2*c1*dx1*u + c1*u + c2*dx1 - 2*c2*dx3
ddx4 = c1*dx2*u - c2*dx4 + c3*dx3 - c4*dx4
ddx5 = c3*dx1 + 2*c3*dx4 + c4*dx2 - 2*c4*dx5
return np.array([ddx1, ddx2, ddx3, ddx4, ddx5])
```
#### File: systemsbiology/GUI/mplwidget.py
```python
from PyQt5.QtWidgets import QWidget, QVBoxLayout
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.figure import Figure
class MplWidget(QWidget):
def __init__(self, parent = None):
QWidget.__init__(self, parent)
self.canvas = FigureCanvas(Figure())
vertical_layout = QVBoxLayout()
vertical_layout.addWidget(self.canvas)
self.canvas.axes = self.canvas.figure.add_subplot(111)
self.canvas.axes.tick_params(axis='both', direction='in', labelsize=8)
self.setLayout(vertical_layout)
```
|
{
"source": "jebreimo/Advent2020",
"score": 3
}
|
#### File: Advent2020/src/december22.py
```python
INPUT = """Player 1:
25
37
35
16
9
26
17
5
47
32
11
43
40
15
7
19
36
20
50
3
21
34
44
18
22
Player 2:
12
1
27
41
4
39
13
29
38
2
33
28
10
6
24
31
42
8
23
45
46
48
49
30
14""".split("\n")
def make_list(values):
head = [None, None]
tail = head
for value in values:
tail[1] = [value, None]
tail = tail[1]
return head, tail
def get_list_length(head):
n = 0
while head[1]:
n += 1
head = head[1]
return n
CHARS = "".join(chr(ord("A") + n) for n in range(0, 26)) + "".join(
chr(ord("a") + n) for n in range(0, 26))
class Player:
def __init__(self, name, cards):
self.name = name
self.head, self.tail = make_list(cards)
self.card_count = get_list_length(self.head)
def is_done(self):
return self.head == self.tail
def draw(self):
if self.tail is self.head:
return None
card = self.head[1]
self.head[1] = card[1]
if self.tail is card:
self.tail = self.head
self.card_count -= 1
return card[0]
def add(self, card):
self.tail[1] = [card, None]
self.tail = self.tail[1]
self.card_count += 1
def cards(self):
cards = []
card = self.head[1]
while card:
cards.append(card[0])
card = card[1]
return cards
def score(self):
cards = self.cards()
factor = len(cards)
score = 0
for card in cards:
score += card * factor
factor -= 1
return score
def deck_id(self):
return str(self.name) + "".join(CHARS[c] for c in self.cards())
def play(player1, player2):
while not player1.is_done() and not player2.is_done():
card1, card2 = player1.draw(), player2.draw()
if card1 > card2:
player1.add(card1)
player1.add(card2)
yield f"Player 1 wins the round! {card1} vs {card2}"
else:
player2.add(card2)
player2.add(card1)
yield f"Player 2 wins the round! {card2} vs {card1}"
PLAYER1 = Player("1", (int(s) for s in INPUT[1:26]))
PLAYER2 = Player("2", (int(s) for s in INPUT[28:]))
for i, s in enumerate(play(PLAYER1, PLAYER2)):
if i > 1000:
break
print(i, s)
print(PLAYER1.score())
def play2(player1, player2):
play2.MAX_GAME_NO = 1000
def recursive_play(player1, player2, game, known_outcomes):
input_id = f"{player1.deck_id()}:{player2.deck_id()}"
if input_id in known_outcomes:
if game <= play2.MAX_GAME_NO:
print(
f"Player {known_outcomes[input_id]} wins game {game}. Known outcome.")
play2.MAX_GAME_NO = game
return player1 if known_outcomes[
input_id] == player1.name else player2
known_deck_ids = set()
rnd = 0
while not player1.is_done() and not player2.is_done():
deck_ids = f"{player1.deck_id()}:{player2.deck_id()}"
if deck_ids in known_deck_ids:
break
known_deck_ids.add(deck_ids)
card1, card2 = player1.draw(), player2.draw()
if card1 <= player1.card_count and card2 <= player2.card_count:
winner = recursive_play(Player(player1.name,
player1.cards()[:card1]),
Player(player2.name,
player2.cards()[:card2]),
game + 1,
known_outcomes)
winner = player1 if player1.name == winner.name else player2
elif card1 > card2:
winner = player1
else:
winner = player2
winner.add(max(card1, card2))
winner.add(min(card1, card2))
rnd += 1
if rnd > 100000:
raise Exception("Too many rounds!!")
winner = player2 if player1.is_done() else player1
if game < 7:
print(f"Player {winner.name} wins game {game} after {rnd} rounds! ({len(known_outcomes)})")
known_outcomes[input_id] = winner.name
return winner
return recursive_play(player1, player2, 1, {})
PLAYER1 = Player("1", (int(s) for s in INPUT[1:26]))
PLAYER2 = Player("2", (int(s) for s in INPUT[28:]))
WINNER = play2(PLAYER1, PLAYER2)
print(WINNER.score())
print(WINNER.cards())
print(PLAYER1.cards())
print(PLAYER2.cards())
```
|
{
"source": "jebreimo/Argen",
"score": 3
}
|
#### File: Argen/Argen/argen.py
```python
import argparse
import os
import sys
import textwrap
from error import Error
import helptextparser
import argparser_hpp
import argparser_cpp
def find_first(s, func):
for i, c in enumerate(s):
if func(c):
return i
return -1
def formatText(text, definitionLineNos, width, definitionIndent):
result = []
wrapper = textwrap.TextWrapper(width=width)
for i, line in enumerate(text.split("\n")):
line = line.rstrip()
if len(line) <= width:
result.append(line)
else:
if i in definitionLineNos:
indent = definitionIndent
else:
indent = find_first(line, lambda c: not c.isspace())
if indent == -1:
indent = 0
wrapper.subsequent_indent = " " * indent
result.extend(wrapper.wrap(line))
return "\n".join(result)
def makeArgParser():
ap = argparse.ArgumentParser(description='Generates a C++ argument parser.')
ap.add_argument("helpfile", metavar="text file",
help="a text file containing the help text")
ap.add_argument("-i", "--indent", metavar="N", type=int,
dest="indent", default=-1,
help="indentation width when option help text is word-wrapped")
ap.add_argument("-c", "--class", metavar="NAME",
dest="className", default="Arguments",
help="the name of the generated class")
ap.add_argument("-f", "--file", metavar="NAME",
dest="fileName", default="ParseArguments",
help="the file name (without extension) of the generated files")
ap.add_argument("--cpp", metavar="CPP",
dest="cpp", default="cpp",
help="the extension of the generated implementation file (default is cpp)")
ap.add_argument("--hpp", metavar="HPP",
dest="hpp", default="h",
help="the extension of the generated header file (default is h)")
ap.add_argument("--function", metavar="NAME",
dest="functionName", default="parse_arguments",
help="the name of the generated function (default is parse_arguments)")
ap.add_argument("--namespace", metavar="NAME",
dest="namespace", default="",
help="the namespace of the generated functions and classes")
ap.add_argument("--parenthesis", metavar="PARENS",
dest="parenthesis", default="",
help="Set the parenthesis used to enclose the "
"definitions and separate properties from each "
"other in the help file. (Defalut is \"{{ | }}\"")
ap.add_argument("--test",
dest="includeTest", action="store_const",
const=True, default=False,
help="Include a main-function the source file")
ap.add_argument("--width", metavar="N", type=int,
dest="width", default=79,
help='line width for help text word wrapping (default is 79)')
ap.add_argument("--debug",
dest="listProperties", action="store_const",
const=True, default=False,
help="list the parser result without generating files")
return ap
def inferIndentation(line):
gaps = []
start = -1
for i, c in enumerate(line):
if c.isspace():
if start == -1:
start = i
elif start != -1:
if i - start > 1:
gaps.append((i - start, start))
start = -1
if start != -1:
gaps.append((len(line) - start, start))
if not gaps:
return 0
gaps.sort()
return gaps[-1][0] + gaps[-1][1]
def inferOptionIndentation(text, lineNos):
lines = text.split("\n")
widths = {}
for lineNo in lineNos:
width = inferIndentation(lines[lineNo])
if width in widths:
widths[width] += 1
else:
widths[width] = 1
n, width = 0, 0
for key in widths:
if widths[key] > n or widths[key] == n and key > width:
n, width = widths[key], key
return width
def main(args):
try:
args = makeArgParser().parse_args()
except argparse.ArgumentError:
return 1
try:
if args.parenthesis:
parens = args.parenthesis.split()
if len(parens) == 3 and parens[0] and parens[1] and parens[2]:
helptextparser.StartDefinition = parens[0]
helptextparser.DefinitionSeparator = parens[1]
helptextparser.EndDefinition = parens[2]
else:
print("Invalid parenthesis: " + args.parenthesis)
print("The parenthesis string must consist of the opening "
" parenthesis (defaults is \"${\"), the property "
" separator (default is \"|\") and the closing "
" parenthesis (default is \"}$\")separated by a space "
" character. The space character must either be "
" escaped or the entire option must be enclosed in "
" quotes. For instance to produce the default "
" perenthesis: \"--parenthesis=${ }$\".")
return 1
parserResult = helptextparser.parseFile(args.helpfile)
except IOError as ex:
print(ex)
return 2
except Error as ex:
print(ex)
return 3
if args.indent != -1:
indentation = args.indent
else:
indentation = inferOptionIndentation(parserResult.text,
parserResult.definitionLineNos)
text = formatText(parserResult.text,
parserResult.definitionLineNos,
args.width,
indentation)
if args.listProperties:
print("")
print("Options")
print("=======")
for o in parserResult.options:
print(o.props)
print("")
print("Arguments")
print("=========")
for a in parserResult.arguments:
print(a.props)
print("")
print("Members")
print("=======")
for m in parserResult.members:
print(m.props)
print("")
print("Help text")
print("=========")
print(text)
return 0
try:
hppFile = args.fileName + "." + args.hpp
argparser_hpp.createFile(hppFile,
parserResult.members,
className=args.className,
functionName=args.functionName,
namespace=args.namespace)
cppFile = args.fileName + "." + args.cpp
argparser_cpp.createFile(cppFile,
text,
parserResult.options,
parserResult.arguments,
parserResult.members,
className=args.className,
functionName=args.functionName,
namespace=args.namespace,
headerFileName=os.path.basename(hppFile),
includeTest=args.includeTest)
print("%s: generated %s and %s"
% (os.path.basename(sys.argv[0]), hppFile, cppFile))
except Exception as ex:
print("Error: " + str(ex))
raise
return 3
# argparser_generator.crateSourceFile("CommandLine.cpp", helpText)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: Argen/Argen/error.py
```python
class Error(Exception):
def __init__(self, msg, lineNo="", fileName=""):
Exception.__init__(self, msg)
self.lineNo = lineNo
self.fileName = fileName
def formattedMsg(self):
msg = Exception.__str__(self)
if self.lineNo:
return "%s[%s] Error: %s" % (self.fileName, self.lineNo, msg)
elif self.fileName:
return "%s Error: %s" % (self.fileName, msg)
else:
return "Error: " + msg
def __str__(self):
return self.formattedMsg()
```
#### File: Argen/Argen/helptextparser.py
```python
import constants
from error import Error
import properties
import utilities
ArgCounter = 0
OptCounter = 0
StartDefinition = constants.DefaultStartDefinition
EndDefinition = constants.DefaultEndDefinition
DefinitionSeparator = constants.DefaultDefinitionSeparator
class ParserResult:
def __init__(self, text, args, members, argLineNos):
self.text = text
self.options = [a for a in args if a.flags]
self.arguments = [a for a in args if not a.flags]
self.members = members
self.definitionLineNos = argLineNos
########################
## PARSING THE HELP TEXT
########################
def isOption(s):
return s and len(s) >= 2 and s[0] in "-/" and not s[1].isspace()
def variableName(s):
return "_".join(utilities.translateToSpace(
s, "'!\"#$%&/()=?+*@.:,;<>^`-[]{}").split())
def variableNameFromFlags(flags):
name = ""
for flag in flags:
if flag in ("/?", "-?"):
return "help"
elif flag == "--":
return "end_of_options"
else:
newName = variableName(flag)
if not name:
name = newName
elif len(name) <= 1 and len(newName) > 1:
name = newName
if not name:
global OptCounter
name = "option_" + str(OptCounter)
return name
def parseFlags(text):
flags = []
argName = None
prevWasFlag = False
for word in text.split():
if word[-1] == ",":
word = word[:-1]
if word[0] in "-/":
index = word.find("=")
if index in (-1, 1) or word[:3] == "--=":
flag, arg = word, None
prevWasFlag = True
elif len(word) >= 2:
flag, arg = word[:index], word[index + 1:] or "VALUE"
prevWasFlag = False
else:
raise Error("Invalid option: " + word)
elif prevWasFlag:
prevWasFlag = False
flag, arg = None, word
else:
raise Error("Invalid option: " + word)
if flag:
flags.append(flag)
if arg:
argName = arg
return flags, argName
def isLegalFlag(f):
if 1 <= len(f) <= 2:
return True
elif len(f) > 2 and f[0] in "-/" and "=" in f:
return False
else:
return f
def findSingle(s, sub, start = 0):
"""findSingle(s, sub[, start]) -> int
Returns the index of the first instance of sub in s which isn't
immediately followed by another instance of sub. The search starts at
start.
"""
index = s.find(sub, start)
while index != -1:
nextStart = index + len(sub)
while s[nextStart:nextStart + len(sub)] == sub:
nextStart += len(sub)
if (nextStart == index + len(sub)):
return index
index = s.find(sub, nextStart)
return index
def splitSingle(s, separator, maxCount=-1):
"""
splitSingle(s, separator[, maxCount]) -> list of strings
Essentially performs the same operation as
s.split(separator, maxCount), but sequences of separators are
left alone (e.g. the C++ logical or-operator "||" is not considered a
separator when the separator is "|").
"""
result = []
start = 0
next = findSingle(s, separator)
while next != -1 and maxCount != 0:
result.append(s[start:next])
start = next + len(separator)
next = findSingle(s, separator, start)
maxCount -= 1
result.append(s[start:])
return result
def parseProperties(s):
props = {}
parts = [p.strip() for p in splitSingle(s, DefinitionSeparator)]
for prop in parts:
kv = [s.strip() for s in prop.split(":", 1)]
if len(kv) != 2:
raise Error("invalid property: \"%s\"" % prop)
key = kv[0].lower()
props[constants.PropAliases.get(key, key)] = kv[1]
if "flags" in props:
flags = props["flags"].split()
for f in flags:
if not isLegalFlag(f):
raise Error("\"%s\" is an illegal flag (it contains =)" % f)
if "type" in props:
props["type"] = props["type"].lower()
if props["type"] not in constants.LegalTypeValues:
raise Error("%(type)s is an illegal value for the type property" %
props)
return props
def parseArg(s):
global ArgCounter
if not s:
s = "arg %d" % ArgCounter
props = dict(argument=s,
name=variableName(s),
textName=s,
autoindex=str(ArgCounter))
props["member"] = props["name"]
ArgCounter += 1
if s:
if s[0] == "[":
if s.endswith("...") or s.endswith("...]"):
props["count"] = "0.."
else:
props["count"] = "0..1"
elif s.endswith("..."):
props["count"] = "1.."
else:
props["count"] = "1"
return props
def parseOption(s):
global OptCounter
OptCounter += 1
flags, argument = parseFlags(s)
props = dict(flags=" ".join(flags),
textName=s,
name=variableNameFromFlags(flags))
props["member"] = props["name"]
if argument:
props["argument"] = argument
return props
def parseFlagsProperty(flags):
global OptCounter
OptCounter += 1
props = dict(name=variableNameFromFlags(flags.split()),
textName=flags)
props["member"] = props["name"]
return props
def parseDefinition(s):
parts = splitSingle(s, DefinitionSeparator, 1)
text = parts[0]
if len(parts) == 2:
explicitProps = parseProperties(parts[1])
else:
explicitProps = {}
for key in explicitProps:
if key not in constants.LegalProps:
raise Error("Unknown property name: " + key)
stripped = explicitProps.get("text", text).strip()
if "flags" in explicitProps:
props = parseFlagsProperty(explicitProps["flags"])
elif isOption(stripped):
props = parseOption(stripped)
else:
props = parseArg(stripped)
props.update(explicitProps)
if "index" in props and "flags" in props:
raise Error("Options can't have the index property.")
return text, props
def appendText(lst, s):
if s:
lst.append(s)
return s.count("\n")
def isStartOfLine(textList):
for s in reversed(textList):
i = s.rfind("\n")
if i != -1:
return i + 1 == len(s) or s[i + 1:].isspace()
elif not s.isspace():
return False
return True
def parseText(text):
outText = []
argProps = []
argLineNos = set()
prv = (0, 0, None)
cur = utilities.findToken(text, StartDefinition, EndDefinition)
lineNo = 1
skippedNewlines = 0
while cur[0] != cur[1]:
lineNo += appendText(outText, text[prv[1]:cur[0]])
if StartDefinition in cur[2]:
raise Error("Definition seems to be missing a closing \"%s\""
% EndDefinition, lineNo)
try:
txt, props = parseDefinition(cur[2])
props["lineno"] = str(lineNo)
except Error as ex:
ex.lineNo = str(lineNo)
raise ex
if txt and isStartOfLine(outText):
argLineNos.add(lineNo - skippedNewlines - 1)
if ((not txt) and
(not outText or outText[-1][-1] == "\n") and
(cur[1] != len(text) and text[cur[1]] == "\n")):
lineNo += 1
skippedNewlines += 1
cur = cur[0], cur[1] + 1, cur[2]
appendText(outText, txt)
lineNo += cur[2].count("\n")
argProps.append(props)
prv = cur
cur = utilities.findToken(text, StartDefinition, EndDefinition, cur[1])
appendText(outText, text[prv[1]:])
return "".join(outText), argProps, argLineNos
def parseFile(fileName):
try:
text, argProps, argLineNos = parseText(open(fileName).read())
properties.inferIndexProperties(argProps)
args = properties.makeArguments(argProps)
members = properties.makeMembers(args)
for m in members:
if m.type == "help":
for a in m.arguments:
if not a.flags:
raise Error("Only options can be of type \"help\".",
a.lineNo)
break
else:
raise Error("There is no help-option. Use property "
"\"type: help\" to indicate the help-option.")
return ParserResult(text, args, members, argLineNos)
except Error as ex:
ex.fileName = fileName
raise ex
# if __name__ == "__main__":
# import sys
# def test(lines, args, s):
# try:
# txt, argProps, argLines = parseText(s)
# properties.inferIndexProperties(argProps)
# arg = [Argument(p) for p in argProps]
# args.extend(arg)
# lines.append(txt)
# except Error as ex:
# print(ex)
# def main(args):
# args = []
# lines = []
# # test(lines, args, "${<file1>}$")
# # test(lines, args, "${<file2 ...>}$")
# # test(lines, args, "${[file3]}$")
# # test(lines, args, "${[file4 ...]}$")
# # test(lines, args, "${knut VALUE, finn VALUE|flags: knut finn|argument: VALUE}$")
# # test(lines, args, "${-s TEXT| count: ..10}$\n${--special| member: s | value: \"$spec$\"}$")
# # test(lines, args, "${-h, --help }$ Show help.")
# # test(lines, args, "${-o, --outfile=FILE}$ Output file for logging info.")
# # test(lines, args, "${-o FILE, --outfile=FILE}$ Output file for logging info.")
# # test(lines, args, "${-p, --point=X,Y,Z | Default: 0.0}$ Set the point.")
# # test(lines, args, "${-i PATH, --include=PATH| delimiter: :}$ Set paths to include.")
# # test(lines, args, "With${| Text: --top-secret}$\nnewline")
# # test(lines, args, "${| Text: --secret}$\nNo newline")
# test(lines, args, "${-- }$ End of options, remainder are arguments.")
# # test(lines, args, "${YYYY-MM-DD| delimiter: - | Member: date}$")
# # test(lines, args, "${HH:MM:SS| delimiter: : | Member: time}$")
# # test(lines, args, "${<Kid yoU not>|index:3}$")
# # test(lines, args, "${}$")
# # test(lines, args, "${-a | text: -a -A -all}$ All of them")
# # test(lines, args, "Kjell\n${|Text:--foo=N}$${|Text:--bar=N}$\nKaare")
# # test(lines, args, "[OPTIONS]\n\t${-b}$\n\t${-c}$\n\t${-d}$")
# # test(lines, args, "${-e=N| values: [0.0..5.0)}$ All of them")
# # test(lines, args, "${-a --bah|flags: foot ball}$ Odd options")
# print("\n".join(lines))
# for arg in args:
# print(arg, arg.props)
# try:
# members = properties.makeMembers(args)
# for m in members:
# print(m)
# except Error as ex:
# print(str(ex))
# return 0
# sys.exit(main(sys.argv[1:]))
```
#### File: Argen/Argen/member.py
```python
import utilities
class Member(object):
"""This class represents a member variable in the struct that is
returned by the generated argument parser function.
"""
def __init__(self, props):
self.default = props["default"]
self.props = props
self.arguments = props["arguments"]
self.name = props["name"]
self.values = utilities.parseValues(props.get("values", ""))
self.valueType = props["valuetype"]
self.count = utilities.parseCount(props["count"])
self.minCount, self.maxCount = self.count
self.include = props.get("include")
self.includeCpp = props.get("includecpp")
self.action = props.get("memberaction")
self.condition = props.get("membercondition")
self.conditionMessage = props.get("memberconditionmessage")
self.type = props.get("type")
if not self.type:
self.type = "value" if self.maxCount == 1 else "list"
if self.type in ("list", "multivalue"):
self.memberType = "std::vector<%s>" % self.valueType
else:
self.memberType = self.valueType
self.isOption = not any(a for a in self.arguments if not a.flags)
def __getitem__(self, key):
return self.__getattribute__(key)
def __str__(self):
return "%s %s;" % (self.memberType, self.name)
@property
def lineNo(self):
return ", ".join(set(a.lineNo for a in self.arguments))
@property
def flags(self):
flags = []
for a in self.arguments:
flags.extend(a.flags)
return ", ".join(flags)
```
#### File: Argen/Argen/properties.py
```python
from argument import Argument
import constants
from error import Error
from member import Member
import utilities
def updateProperties(props, other):
for key in other:
if key not in props:
props[key] = other[key]
elif props[key] != other[key]:
raise Error(
"Multiple definitions of property %s: \"%s\" and \"%s\""
% (key, props[key], other[key]))
def maxCount(a, b):
return -1 if -1 in (a, b) else max(a, b)
def getValueType(s):
if s in ("true", "false"):
return "bool"
elif len(s) > 1 and s[0] == '"' and s[-1] == '"':
return "std::string"
try:
int(s)
return "int"
except ValueError:
pass
try:
float(s)
return "double"
except ValueError:
pass
return ""
def inferValueType(props):
vals = []
if "default" in props:
vals.extend(props["default"].split("|"))
if "values" in props:
vals.extend(utilities.translateToSpace(
props["values"].replace("..", " "),
"[]<>").split())
vals.extend(a.value for a in props["arguments"] if a.value)
if not vals:
argNames = set(o.argument for o in props["arguments"] if o.flags)
if len(argNames) == 1:
argName = list(argNames)[0]
if argName in constants.IntegerArguments:
return "int"
return "std::string"
types = set(v for v in [getValueType(s) for s in vals] if v)
if len(types) == 1:
return list(types)[0]
elif len(types) == 2 and "int" in types and "double" in types:
return "double"
elif types:
raise Error("%s: unable to infer correct value type, can be any of %s."
% (props["name"], ", ".join(types)),
joinLineNos(*props["arguments"]))
else:
raise Error("%s: unable to infer correct value type. "
"(String values must be enclosed by quotes, e.g. \"foo\")"
% (props["name"]), joinLineNos(*props["arguments"]))
def inferIndexProperties(propsList):
propsWithIndex = [p for p in propsList if "autoindex" in p]
propsWithIndex.sort(key=lambda p: int(p["autoindex"]))
indexedProps = [None] * len(propsWithIndex)
for props in propsWithIndex:
if "index" in props:
try:
i = int(props["index"])
except ValueError:
raise Error("Invalid index property: %(index)s" % props,
props["lineno"])
if 0 > i or i >= len(indexedProps):
raise Error("Index is too large, it is %d, maximum is %d."
% (i, len(indexedProps) - 1), props["lineno"])
elif indexedProps[i]:
raise Error("Two arguments can't have the same index.",
joinLineNos(indexedProps[i]["lineno"],
props["lineno"]))
i = 0
for p in propsWithIndex:
if "index" not in props:
while indexedProps[i]:
i += 1
p["index"] = str(i)
i += 1
def inferDefaultValue(props):
value = ""
if "values" in props:
vals = utilities.parseValues(props["values"])
if vals and vals[0][2] in ("=", "<="):
value = utilities.parseValues(props["values"])[0][0]
if not value:
if props["valuetype"] == "bool":
value = "false"
elif props["valuetype"] != "std::string":
value = "0"
if value and props["type"] == "multivalue":
count = min(a.minDelimiters for a in props["arguments"])
value = "|".join([value] * (count + 1))
return value
def expandReferences(s, operator):
text = []
references = set()
prevEnd = 0
start, end, name = utilities.findToken(s, "$", "$")
while start != end:
text.append(s[prevEnd:start])
text.append("result" + operator)
text.append(name)
references.add(name)
prevEnd = end
start, end, name = utilities.findToken(s, "$", "$", end)
text.append(s[prevEnd:])
return "".join(text), references
def prepareCondition(props, name, members, operator):
if name not in props:
return
props[name], refs = expandReferences(props[name], operator)
for ref in refs:
if ref not in members:
raise Error('Condition references unknown member "%s"' % ref)
if name + "message" not in props:
kind = "option" if "flags" in props else "argument"
if len(refs) == 0:
msg = "this " + kind + " can't be used here"
elif len(refs) == 1 and props["member"] in refs:
if "value" in props:
msg = "illegal option"
else:
msg = "illegal value"
else:
verb = "doesn't" if len(refs) == 1 else "don't"
msg = (utilities.verbalJoin(refs) + " " + verb +
" have the value this " + kind + " requires.")
props[name + "message"] = msg
def prepareAction(props, name, members, operator):
if name not in props:
return
text, refs = expandReferences(props["action"], operator)
for ref in refs:
if ref not in members:
raise Error('Action references unknown member "%s"' % ref)
props["action"] = text if text[-1] == ";" else text + ";"
def reusePropertyCombinations(propsList, keyName, valueName):
values = {}
for props in propsList:
if keyName in props:
if valueName in props:
values[props[keyName]] = props[valueName]
elif props[keyName] in values:
props[valueName] = values[props[keyName]]
def minmaxCount(counts):
counts = list(counts)
if not counts:
return 0, 0
ma = 0
for c in counts:
if c[1] == -1:
ma = -1
elif ma != -1:
ma = max(c[1], ma)
return min(c[0] for c in counts), ma
def inferMemberProperties(props):
args = props["arguments"]
if args[0].flags:
counts = list(set(a.count for a in args))
if len(counts) == 1:
count = counts[0]
elif len(counts) == 2 and (0, 1) in counts:
count = counts[1] if counts[0] == (0, 1) else counts[0]
else:
raise Error("%(name)s: conflicting count properties. "
"Options writing to the same member must have "
"the same count." % props)
else:
count = (0, 0)
for lo, hi in (a.count for a in args):
if count[1] == -1:
hi = -1
elif hi != -1:
hi += count[1]
lo += count[0]
count = lo, hi
minDel, maxDel = minmaxCount(a.delimiterCount for a in args)
if count[1] == -1 or maxDel == -1:
props["count"] = "%d..-1" % (count[0] * (minDel + 1))
else:
props["count"] = "%d..%d" % (count[0] * (minDel + 1),
count[1] * (maxDel + 1))
if "valuetype" not in props:
props["valuetype"] = inferValueType(props)
if "type" not in props:
if (props["valuetype"] == "bool" and count == (0, 1) and
not [a for a in args if not a.flags]):
if props["name"] == "help":
props["type"] = "help"
elif props["name"] == "end_of_options":
props["type"] = "final"
else:
props["type"] = "value"
elif count[1] != 1 or maxDel == -1:
props["type"] = "list"
elif maxDel == 0:
props["type"] = "value"
elif minDel == maxDel:
props["type"] = "multivalue"
else:
for lo, hi in (a.delimiterCount for a in args):
if lo != hi:
props["type"] = "list"
break
else:
props["type"] = "multivalue"
elif props["type"] != "list" and count[1] != 1:
raise Error('%(name)s: type must be "list" when '
'the maximum count is greater than 1.' % props)
elif props["type"] == "multivalue":
for lo, hi in (a.delimiterCount for a in args):
if lo != hi:
raise Error("%(name)s: type \"multivalue\" requires a "
"fixed number of delimiters." % props)
for a in args:
if a.minDelimiters != a.maxDelimiters:
raise Error("%(name)s: type \"multivalue\" requires a "
"fixed number of delimiters." % props)
elif props["type"] in ("help", "info", "final"):
if props["valuetype"] != "bool":
raise Error("%(name)s: when type is \"%(type)s\", valuetype "
"must be bool, not %(valuetype)s." % props)
elif any(a for a in args if not a.flags):
raise Error("only options can have type \"%(type)s\"." % props)
elif "values" in props:
raise Error("options of type \"%(type)s\" cant have "
"the \"values\" property." % props)
if "default" not in props:
if not minDel and (props["type"] != "list" or count[0] != 0):
props["default"] = inferDefaultValue(props)
else:
props["default"] = None
elif props["default"] and count[0] != 0:
raise Error("%(name)s: can't have default value when minimum count "
"is non-zero." % props)
def getMemberProperties(args):
members = {}
for arg in args:
name = arg.memberName
if name in members:
memargs = members[name]["arguments"]
memargs.append(arg)
if (len(arg.flags) != 0) != (len(memargs[0].flags) != 0):
raise Error(name + ": arguments and options can't write to "
"the same member. (Use the \"member\" propery to "
"set a different member name)",
joinLineNos(*memargs))
try:
updateProperties(members[name], arg.memberProps)
except Error as e:
raise Error(name + ": " + str(e), joinLineNos(*memargs))
else:
members[name] = arg.memberProps.copy()
members[name]["arguments"] = [arg]
members[name]["name"] = name
return members
def inferArgumentProperties(props):
if not props.get("flags", " "):
raise Error("Flags property can't be empty.")
if "flags" not in props and "value" in props:
raise Error("Arguments can't have the value property.")
if "argument" in props and "value" in props:
raise Error("An option can't have both argument and "
"value properties.")
if not props.get("argument", props.get("flags")):
props["argument"] = "VALUE"
if not props.get("value", " "):
raise Error("Value property can't be empty.")
if "value" not in props and "argument" not in props:
props["value"] = "true"
if "delimitercount" in props:
lo, hi = utilities.parseCount(props["delimitercount"])
if lo < 0:
raise Error("Minimum DelimiterCount can't be less than zero.")
elif hi != -1 and hi < lo:
raise Error("Maximum DelimiterCount can't be less than "
"the minimum.")
if "delimiter" not in props:
s = props.get("value") or props.get("argument", "")
if "," in s:
props["delimiter"] = ","
if len(props.get("delimiter", " ")) != 1:
msg = "Delimiter must be a single non-whitespace character."
if "," in props.get("value") or props.get("argument", ""):
msg += ' Use "DelimiterCount: 0" to disable the comma-delimiter.'
raise Error(msg)
if "delimiter" not in props and props.get("delimitercount", "0") != "0":
raise Error("DelimiterCount property where there is no delimiter.")
if "delimiter" in props and "delimitercount" not in props:
s = props.get("value") or props.get("argument", "")
n = s.count(props["delimiter"])
if n != 0:
props["delimitercount"] = str(n)
else:
props["delimitercount"] = "0.."
if "value" in props and "delimiter" in props:
props["value"] = "|".join(props["value"].split(props["delimiter"]))
if "type" in props:
props["type"] = props["type"].lower()
if props["type"] in ("help", "info", "final"):
if "value" not in props:
raise Error("Options of type %(type)s can't take an argument."
% props)
elif props["value"] != "true":
raise Error('Options of type %(type)s must have value "true".'
% props)
if "default" in props and "delimiter" in props:
count = utilities.parseCount(props["delimitercount"])
defvals = props["default"].split(props["delimiter"], count[1])
if len(defvals) > 1 and count[0] <= len(defvals) <= count[1]:
raise Error("Default has too few delimited values (expects %d)." %
count[0])
elif len(defvals) == 1 and count[0] > 1:
defvals = defvals * count[0]
props["default"] = "|".join(defvals)
if "count" not in props:
props["count"] = "0..1" if "flags" in props else "1"
else:
c = utilities.parseCount(props["count"])
if c[0] < 0:
raise Error("Min-count can't be less than 0.")
elif c[1] == 0:
raise Error("Max-count can't be 0.")
if props.get("valuetype") == "string":
props["valuetype"] = "std::string"
if "conditionmessage" in props and "condition" not in props:
raise Error("ConditionMessage, but no Condition.")
if "postconditionmessage" in props and "postcondition" not in props:
raise Error("PostConditionMessage, but no PostCondition.")
def joinLineNos(*args):
nos = set()
for a in args:
if type(a) == str:
nos.add(a)
else:
nos.add(a.lineNo)
return ", ".join(nos)
def isShortOption(s):
return len(s) == 2 and not s[0].isalnum()
def hasCaseSensitiveShortOptions(args):
cases = set()
for a in args:
if not a.flags:
continue
for f in a.flags:
if (len(f) == 1 or isShortOption(f)) and f[-1].isalpha():
cases.add(f.islower())
return len(cases) != 1
def addOpositeCaseFlags(args):
for a in args:
if not a.flags:
continue
newFlags = []
for f in a.flags:
if (len(f) == 1 or isShortOption(f)) and f[-1].isalpha():
newFlags.append(f.swapcase())
a.flags.extend(newFlags)
def makeArguments(allProps):
args = []
reusePropertyCombinations(allProps, "condition", "conditionmessage")
reusePropertyCombinations(allProps, "membercondition", "memberconditionmessage")
members = [p["member"] for p in allProps]
for props in allProps:
try:
inferArgumentProperties(props)
prepareCondition(props, "condition", members, ".")
prepareCondition(props, "membercondition", members, "->")
prepareAction(props, "action", members, ".")
prepareAction(props, "memberaction", members, "->")
args.append(Argument(props))
except Error as ex:
ex.lineNo = joinLineNos(*props[key]["arguments"])
if not hasCaseSensitiveShortOptions(args):
addOpositeCaseFlags(args)
return args
def makeMembers(args):
"""makeMembers(list of Argument instances) -> list of Member instances
Create Member instances based on the memberName- and memberProps-members
of the Argument instances in args.
"""
members = {}
allProps = getMemberProperties(args)
for key in allProps:
props = allProps[key]
try:
inferMemberProperties(props)
members[key] = Member(props)
except Error as ex:
ex.lineNo = joinLineNos(*props["arguments"])
raise ex
for arg in args:
arg.member = members[arg.memberName]
return sorted(members.values(), key=lambda m: m.name)
def ensureUniqueNames(args):
names = set()
for a in args:
if a.memberName in names:
name = a.memberName
i = 1
while name in names:
name = a.memberName + str(i)
a.memberName = name
names.add(a.memberName)
```
|
{
"source": "jebreimo/Yson",
"score": 3
}
|
#### File: Yson/tools/mergecpp.py
```python
import argparse
import glob
import os
import re
import sys
INCLUDE_FILE_REGEX = re.compile(r"\s*#include\s*(?:\"([^\"]+)\"|<([^>]+)>).*")
def get_all_files_names(dir_name):
result = {}
for top_dir, subdirs, file_names in os.walk(dir_name):
for file in file_names:
path = os.path.join(top_dir, file)
relpath = os.path.relpath(path, dir_name)
abspath = os.path.realpath(path)
if relpath not in result:
result[relpath] = abspath
return result
def get_includes(file_path):
includes = []
for line in open(file_path):
m = INCLUDE_FILE_REGEX.match(line)
if m:
includes.append(m.group(1) or m.group(2))
return includes
def get_dependency_map(files, known_files):
dependencies = {}
next_files_to_check = [os.path.realpath(f) for f in files]
while next_files_to_check:
files_to_check = next_files_to_check
next_files_to_check = []
for file_path in files_to_check:
real_file_path = os.path.realpath(file_path)
if real_file_path in dependencies:
continue
current_known_files = known_files.copy()
dir_name = os.path.dirname(file_path)
if not dir_name:
dir_name = os.getcwd()
current_known_files.update(get_all_files_names(dir_name))
dependencies[real_file_path] = []
visited = set()
for include in get_includes(file_path):
real_include_path = current_known_files.get(include)
if not real_include_path or real_include_path in visited:
continue
visited.add(real_include_path)
dependencies[real_file_path].append(real_include_path)
if real_include_path not in dependencies:
next_files_to_check.append(real_include_path)
return dependencies
def get_file_inclusion_order_rec(result, file_path, dependencies, visited):
visited.add(file_path)
for dependency in dependencies[file_path]:
if dependency not in visited:
get_file_inclusion_order_rec(result, dependency,
dependencies, visited)
result.append(file_path)
def get_file_inclusion_order(file_paths, dependencies, ignore_files):
visited = set(ignore_files)
result = []
for file_path in file_paths:
get_file_inclusion_order_rec(result, os.path.realpath(file_path),
dependencies, visited)
return result
def get_file_contents(file_path, known_files):
file_path = os.path.realpath(file_path)
output = []
current_known_files = known_files.copy()
dir_name = os.path.dirname(file_path)
if not dir_name:
dir_name = os.getcwd()
current_known_files.update(get_all_files_names(dir_name))
for line in open(file_path, encoding="utf-8-sig"):
m = INCLUDE_FILE_REGEX.match(line)
if not m or not current_known_files.get(m.group(1) or m.group(2)):
output.append(line)
return output
def get_all_file_contents(file_paths, known_files):
output = []
for file_path in file_paths:
# if output and not output[-1].isspace():
# output.append("\n")
output.extend(get_file_contents(file_path, known_files))
return output
def remove_pragma_once(lines, keep_first):
output = []
regex = re.compile(r"\s*#pragma\s+once\b.*")
for line in lines:
m = regex.match(line)
if not m:
output.append(line)
elif keep_first:
output.append(line)
keep_first = False
return output
def remove_redundant_includes(lines):
output = []
known_includes = set()
for line in lines:
m = INCLUDE_FILE_REGEX.match(line)
if not m:
output.append(line)
elif (m.group(1) or m.group(2)) not in known_includes:
output.append(line)
known_includes.add(m.group(1) or m.group(2))
return output
def remove_matching_lines(lines, regex):
output = []
regex = re.compile(regex)
for line in lines:
m = regex.match(line)
if not m:
output.append(line)
return output
def remove_successive_empty_lines(lines):
output = []
for line in lines:
if not output or not line.isspace() or not output[-1].isspace():
output.append(line)
return output
def make_argument_parser():
ap = argparse.ArgumentParser(
description='Generates source files for a C++ command line argument'
' parser.')
ap.add_argument("files", metavar="C++ files", nargs="+",
help="The C++ files that are to be merged.")
ap.add_argument("-o", "--output", metavar="FILE",
help="The output file.")
ap.add_argument("--no-pragma-once", action="store_const", const=True,
default=False,
help="Don't insert a pragma once at the beginning of the"
" output file.")
ap.add_argument("-p", "--prepend", metavar="TEXT", action="append",
help="Write TEXT at the start of the output file.")
ap.add_argument("-i", "--include", metavar="DIR", action="append",
help="Add DIR to the list of include and source dirs.")
ap.add_argument("-f", "--filter", metavar="FILE", action="append",
help="Filter out FILE or files in FILE if FILE is a directory.")
return ap
def main():
args = make_argument_parser().parse_args()
paths = []
visited = set()
for path in args.files:
if not os.path.exists(path):
tmp = glob.glob(path)
if not tmp:
print(f"WARNING: {path} not found.")
for p in tmp:
if p not in visited:
paths.append(p)
visited.add(p)
elif path not in visited:
paths.append(path)
visited.add(path)
else:
print(f"WARNING: {path} is listed more than once among the input"
f" files. All but the first will be ignored.")
known_files = {}
if args.include:
for dir_name in args.include[::-1]:
known_files.update(get_all_files_names(dir_name))
ignore_files = set()
if args.filter:
for name in args.filter:
if os.path.isdir(name):
ignore_files.update(get_all_files_names(name).values())
elif os.path.isfile(name):
ignore_files.add(os.path.realpath(name))
dependencies = get_dependency_map(paths, known_files)
file_order = get_file_inclusion_order(paths, dependencies, ignore_files)
lines = get_all_file_contents(file_order, known_files)
lines = remove_pragma_once(lines, not args.no_pragma_once)
lines = remove_redundant_includes(lines)
lines = remove_successive_empty_lines(lines)
if args.prepend:
text = "".join(args.prepend) + "".join(lines)
else:
text = "".join(lines)
if args.output:
if os.path.exists(args.output) and open(args.output).read() == text:
return 0
if not os.path.exists(os.path.dirname(args.output)):
os.makedirs(os.path.dirname(args.output))
open(args.output, "w").write(text)
print(f"Updated {args.output}")
else:
sys.stdout.write(text)
return 0
if __name__ == "__main__":
sys.exit(main())
```
|
{
"source": "jebreimo/Ystring2",
"score": 2
}
|
#### File: tools/unicode_character_tables/make_normalization_file.py
```python
import datetime
import os
import sys
import codegen
Template = """\
//****************************************************************************
// Copyright © [[[year]]] <NAME>. All rights reserved.
// Created by <NAME> on [[[date]]]
//
// This file is distributed under the Simplified BSD License.
// License text is included with the source distribution.
//****************************************************************************
#include <cstdint>
namespace Ystring
{
constexpr uint64_t TO_NORMALIZED[] =
{
[[[to_normalized]]]
};
constexpr uint64_t TO_DENORMALIZED[] =
{
[[[to_denormalized]]]
};
}
"""
def get_rules(file_name):
result = []
for line in open(file_name):
line = line.strip()
if not line:
continue
parts = line.split(";")
char_id = int(parts[0], 16)
if not parts[5] or parts[5][0] == '<':
continue
values = [int(p, 16) for p in parts[5].split()]
if len(values) != 2:
continue
result.append((char_id, values))
return result
def encode_normalized(rule):
return (rule[0] << 40) | (rule[1][0] << 20) | rule[1][1]
def encode_denormalized(rule):
return (rule[1][0] << 40) | (rule[1][1] << 20) | rule[0]
def make_normalized_table_rows(rules):
result = []
n = 3
remainder = len(rules) % n or n
for i in range(0, len(rules) - remainder, n):
values = (encode_normalized(r) for r in rules[i:i + n])
result.append(", ".join("0x%016X" % v for v in values) + ",")
values = (encode_normalized(r) for r in rules[len(rules) - remainder:])
result.append(", ".join("0x%016X" % v for v in values))
return result
def make_denormalized_table_rows(rules):
rules = sorted(rules, key=lambda r: (r[1][0], r[1][1]))
result = []
n = 3
remainder = len(rules) % n or n
for i in range(0, len(rules) - remainder, n):
values = (encode_denormalized(r) for r in rules[i:i + n])
result.append(", ".join("0x%016X" % v for v in values) + ",")
values = (encode_denormalized(r) for r in rules[len(rules) - remainder:])
result.append(", ".join("0x%016X" % v for v in values))
return result
def write_cpp(normalized, denormalized):
date = datetime.date.today()
codegen_params = {'year': date.year,
'date': "%d-%02d-%02d" % (date.year, date.month, date.day),
'to_normalized': normalized,
'to_denormalized': denormalized}
print(codegen.make_text(Template, codegen.DictExpander(codegen_params)))
def analyze(rules):
import math
for i in range(1, 6):
f = lambda n: (n >> i, n & ((1 << i) - 1))
la, lb, lc = set(), set(), set()
for rule in rules:
# print("%05X -> %05X %05X" % (rule[0], rule[1][0], rule[1][1]))
# print("%5X %5X -> %5X" % (rule[1][0], rule[1][1], rule[0]))
a, b, c = rule[0], rule[1][0], rule[1][1]
a, b, c = f(a), f(b), f(c)
la.add(a[0])
lb.add(b[0])
lc.add(c[0])
# print("%04X %02X -> %04X %02X %04X %02X" % (a[0], a[1], b[0], b[1], c[0], c[1]))
m, n, o = len(la), len(lb), len(lc)
print("Shift %d: %d %d %d" % (i, m, n, o))
bits = sum(int(math.ceil(math.log2(v))) for v in (m, n, o)) + 3 * i
print(f"Bits = {bits}")
def main(args):
if len(args) != 1:
print("usage: %s <unicode data file>" % os.path.basename(sys.argv[0]))
return 1
rules = get_rules(args[0])
# analyze(rules)
write_cpp(make_normalized_table_rows(rules),
make_denormalized_table_rows(rules))
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
```
|
{
"source": "jebtang/ml-suite",
"score": 2
}
|
#### File: examples/classification/test_classify.py
```python
import os.path
import math
import sys
import timeit
import xdnn, xdnn_io
import numpy as np
import types
def main():
args = xdnn_io.processCommandLine()
ret = xdnn.createHandle(args['xclbin'], "kernelSxdnn_0", args['xlnxlib'])
if ret != 0:
sys.exit(1)
(weightsBlob, fcWeight, fcBias ) = xdnn_io.loadWeights( args )
(fpgaInputs, batch_sz) = xdnn_io.prepareInput( args )
fpgaOutput = xdnn_io.prepareOutput(args['fpgaoutsz'], batch_sz)
for i in range(1):
startTime = timeit.default_timer()
xdnn.execute(args['netcfg'],
weightsBlob, fpgaInputs, fpgaOutput,
batch_sz, # num batches
args['quantizecfg'], args['scaleB'], args['PE'])
elapsedTime = timeit.default_timer() - startTime
print "\nAfter FPGA (%f ms)" % (elapsedTime*1000)
startTime = timeit.default_timer()
fcOut = xdnn.computeFC(fcWeight, fcBias, fpgaOutput,
batch_sz, args['outsz'], args['fpgaoutsz'], args['useblas'])
elapsedTime = timeit.default_timer() - startTime
print "\nAfter FC (%f ms)" % (elapsedTime*1000)
#for i in range(10):
# print "%f" % fpgaOutput[i],
startTime = timeit.default_timer()
softmaxOut = xdnn.computeSoftmax(fcOut, batch_sz)
elapsedTime = timeit.default_timer() - startTime
print "\nAfter Softmax (%f ms)" % (elapsedTime*1000)
#for i in range(10):
# print "%f" % fpgaOutput[i],
xdnn_io.printClassification(softmaxOut, args);
print "\nSuccess!\n"
xdnn.closeHandle()
if __name__ == '__main__':
main()
```
#### File: ext/PyTurboJPEG/example.py
```python
import cv2
from turbojpeg import TurboJPEG
import time
import os
import multiprocessing as mp
# specifying library path explicitly
# jpeg = TurboJPEG(r'D:\turbojpeg.dll')
# jpeg = TurboJPEG('/usr/lib64/libturbojpeg.so')
# jpeg = TurboJPEG('/usr/local/lib/libturbojpeg.dylib')
def absoluteFilePaths(directory):
dirlist = []
for dirpath,_,filenames in os.walk(directory):
for f in filenames:
dirlist.append( os.path.abspath(os.path.join(dirpath, f)))
return dirlist
# decoding input.jpg to BGR array
def img_decode (f):
in_file = open(f, 'rb')
bgr_array = jpeg.decode(in_file.read())
bgr_array = cv2.resize(bgr_array, (224,224))
in_file.close()
return bgr_array
# using default library installation
jpeg = TurboJPEG('../libjpeg-turbo/lib/libturbojpeg.so')
file_dir = absoluteFilePaths("/tmp/ilsvrc12_img_val/")
elapsed = time.time()
numiter = 50
p = mp.Pool(processes=num_proc)
for i in range(numiter):
result = p.map(img_decode, file_dir)
print ("time:" , (time.time() - elapsed) / (len(file_dir) *numiter ) )
```
#### File: xfdnn/rt/xdnn_rt.py
```python
import tensorflow as tf
import numpy as np
from xfdnn_compiler_tensorflow import TFFrontend
#from xfdnn.tools.compile.frontends.frontend_caffe import CaffeFrontend
from tensorflow.python.platform import gfile
import xdnn_opt
class xdnnRT:
def __init__(self, compiler, rtargs):
#print ("compiler args", cargs)
self._inputs = self.list_inputs_of_graph()
pydotGraph, schedule, self._out, _ = compiler.compile()
# print ("compiled pydot graph", pydotGraph)
# print ("compiled schedule", schedule)
opt = None
if rtargs.device == "CPU":
opt = xdnn_opt.CPUTransform( self._inputs, pydotGraph, schedule)
elif rtargs.device == "FPGA":
if rtargs.xclbin:
opt = xdnn_opt.FPGATransform( self._inputs, pydotGraph, schedule, rtargs.xclbin)
else:
raise AttributeError("Must specify path to xclbin when device = FPGA")
else:
raise AttributeError("Unsupported device type", rtargs.device)
#variables hold the inputs/consts of graph
self._variables = opt.variables
self._layers = opt.getLayers()
for l in self._layers:
l.setup()
def list_inputs_of_graph(self):
pass
def preprocess(self,inputs):
pass
def batch_classify(self, img_list, batch, preprocess) :
bctr = 0
ictr = 0
pred = None
prepdata = {}
prep = self._inputs[0]
print(len(img_list))
ctr = 0
pred = []
while ctr < len(img_list) :
ctrmax = min(ctr+batch, len(img_list))
pred.append(self.feed_forward(img_list[ctr:ctrmax], preprocess = preprocess))
ctr = ctrmax
if len(pred) == 0 : return []
elif len(pred) == 1 :
return pred[0]
return np.concatenate(pred)
def feed_forward(self, inputs, out=None, preprocess = None):
inp_dict = {}
if not preprocess:
preprocess = self.preprocess
inp_dict[self._inputs[0]] = preprocess(inputs)
for k, v in inp_dict.items():
self._variables[k] = v
for layer in self._layers:
layer_inputs = []
layer_inputs = [self._variables[inp] for inp in layer.inputs]
self._variables[layer.output] = layer.forward_exec( layer_inputs )
if out is None:
return self._variables[self._out]
return self._variables[out]
class TFxdnnRT(xdnnRT):
def __init__ ( self, cargs):
self._tfGraph = tf.GraphDef()
with gfile.FastGFile(cargs.networkfile, 'rb') as f:
self._tfGraph.ParseFromString(f.read())
compiler = TFFrontend(cargs)
xdnnRT.__init__(self, compiler, cargs)
def list_inputs_of_graph(self) :
res = []
for node in self._tfGraph.node :
if node.op == 'Placeholder' :
res.append(node.name)
return res
def preprocess(self, inputs):
if type(inputs) is not np.ndarray:
inputs = np.transpose(self.read_tensor_from_image_file(inputs), [0,3,1,2]) # assuming that there is only one input
return inputs
def read_tensor_from_image_file(self, file_name,
input_height=299,
input_width=299,
input_mean=0,
input_std=255):
input_name = "file_reader"
file_reader = tf.read_file(file_name, input_name)
if file_name.endswith(".png"):
image_reader = tf.image.decode_png(file_reader, channels=3, name="png_reader")
elif file_name.endswith(".gif"):
image_reader = tf.squeeze(
tf.image.decode_gif(file_reader, name="gif_reader"))
elif file_name.endswith(".bmp"):
image_reader = tf.image.decode_bmp(file_reader, name="bmp_reader")
else:
image_reader = tf.image.decode_jpeg(
file_reader, channels=3, name="jpeg_reader")
float_caster = tf.cast(image_reader, tf.float32)
dims_expander = tf.expand_dims(float_caster, 0)
resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
with tf.Session() as sess :
result = sess.run(normalized)
return result
```
|
{
"source": "jebuss/pydisteval",
"score": 3
}
|
#### File: pydisteval/disteval/basic_classification.py
```python
from __future__ import absolute_import, print_function, division
from logging import getLogger
import numpy as np
from sklearn.model_selection import StratifiedKFold
from .basics.classifier_characteristics import ClassifierCharacteristics
logger = getLogger('disteval.basic_classification')
def cv_test_ref_classification(clf,
X,
y,
sample_weight=None,
cv_steps=10,
return_all_models=False,
random_state=None):
"""Runs a classification betwenn the test data and the reference data.
This classification is run in a cross-validation with a provided
classifier. The classifier needs a fit function to start the model
building process and a predict_func to obtain the classifier score.
The score is expected to be between 0 and 1.
Parameters
----------
clf: object
Classifier that should be used for the classification.
It needs a fit and a predict_proba function.
X : numpy.float32array, shape=(n_samples, n_obs)
Values describing the samples.
y : numpy.float32array, shape=(n_samples)
Array of the true labels.
sample_weight : None or numpy.float32array, shape=(n_samples)
If weights are used this has to contains the sample weights.
None in the case of no weights.
cv_steps: int, optional (default=10)
Number of cross-validation steps. If < 2 the model is trained on
all samples and no prediction is made.
return_all_models: bool, optional (default=False)
If all models for the cross-validiation should be saved and
returned.
random_state: None, int or RandomState
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random.
Returns
-------
clf: object
Trained classifier. If return_all_models, a liste of all trained
classifiers, is returned.
y_pred : numpy.float32array, shape=(n_samples)
Array of the classifier score.
cv_step : numpy.int, shape=(n_samples)
Iteration in which the sample was classified.
"""
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
desired_characteristics = ClassifierCharacteristics()
desired_characteristics.opts['callable:fit'] = True
desired_characteristics.opts['callable:predict_proba'] = True
clf_characteristics = ClassifierCharacteristics(clf)
assert clf_characteristics.fulfilling(desired_characteristics), \
'Classifier sanity check failed!'
if cv_steps < 2:
clf = clf.fit(X=X,
y=y,
sample_weight=sample_weight)
logger.info('cv_steps were < 2, so the classifier was trained with'
' all provided data!')
return clf, None, None
else:
strat_kfold = StratifiedKFold(n_splits=cv_steps,
shuffle=True,
random_state=random_state)
cv_iterator = strat_kfold.split(X, y)
y_pred = np.zeros_like(y, dtype=float)
cv_step = np.zeros_like(y, dtype=int)
if return_all_models:
from copy import deepcopy
trained_clfs = []
for i, [train_idx, test_idx] in enumerate(cv_iterator):
X_train = X[train_idx]
X_test = X[test_idx]
y_train = y[train_idx]
if sample_weight is None:
sample_weight_train = None
else:
sample_weight_train = sample_weight[train_idx]
clf = clf.fit(X=X_train,
y=y_train,
sample_weight=sample_weight_train)
y_pred[test_idx] = clf.predict_proba(X_test)[:, 1]
cv_step[test_idx] = i
if return_all_models:
trained_clfs.append(deepcopy(clf))
if return_all_models:
clf = trained_clfs
return clf, y_pred, cv_step
```
#### File: disteval/basics/classifier_characteristics.py
```python
from __future__ import absolute_import, print_function, division
class ClassifierCharacteristics(object):
"""Class to define and compare Characteristics of classifier.
The core of the Class is the dict ops containing keys whether
attributes or functions are required/forbidden. Keys like
'callable:fit' are True if the classifier has a callable function
'fit'. Keys like 'has:feature_importance' are True if the classifier
has an atribute 'feature_importance'.
True in the dict means function/attribute is needed or present.
False means function/attribute is forbidden or not present.
None in the dict means is ignore in the evaluation
Parameters
----------
clf: None or object
If None the dict is initiated with None for all keys.
If clf is provided the dict is contains only True and False
depending on the clf characteristics
Attributes
----------
opts : dict
Dictionary containing all the needed/desired characteristics.
clf : object
If a clf is provided, a pointer to the classifier is stored.
To check characteristics later on."""
def __init__(self, clf=None):
self.opts = {
'callable:fit': None,
'callable:predict': None,
'callable:predict_proba': None,
'callable:decision_function': None,
'has:feature_importance': None}
if clf is not None:
self.clf = clf
for key in self.opts.keys():
self.opts[key] = self.__evalute_clf__(key)
def __evalute_clf__(self, key):
"""Check if the classifier provides the attribute/funtions
asked for with the key. Keys must start with either "callable:"
or "has:".
"callable:<name>" would check for a funtions with the name <name>.
"has:<name>" would check for a attribute with the name <name>.
Parameters
----------
key: str
If None the dict is initiated with None for all keys.
If clf is provided the dict is contains only True and False
depending on the clf characteristics
Returns
----------
present : bool
Boolean whether the asked for characteristic is present"""
if key.startswith('callable:'):
desired_callable = key.replace('callable:', '')
if hasattr(self.clf, desired_callable):
if callable(getattr(self.clf, desired_callable)):
return True
elif key.startswith('has:'):
desired_attribute = key.replace('has:', '')
if hasattr(self.clf, desired_attribute):
return True
else:
print(key)
raise ValueError('Opts keys have to start with eiter callable:'
' for functions or has: for attributes')
return False
def fulfilling(self, second_instance, two_sided=False):
"""Check if the classifier provides the attribute/funtions
asked for with the key. Keys must start with either "callable:"
or "has:".
"callable:<name>" would check for a funtions with the name <name>.
"has:<name>" would check for a attribute with the name <name>.
Parameters
----------
second_instance: ClassifierCharacteristics
Second instance of a ClassifierCharacteristics which defines
the needed characteristics.
two_sided: boolean, optional (default=False)
If False only the characteristics asked for in the second
instance has to be fulfilled. If two_sided is True. Both
instances has to be the same (equivalent to __eq__)
Returns
----------
present : bool
Boolean whether the asked for characteristic is present"""
if two_sided:
check_keys_1 = set([k for k, v in self.opts.items()
if v is not None])
check_keys_2 = set([k for k, v in second_instance.opts.items()
if v is not None])
check_keys = check_keys_1.intersection(check_keys_2)
else:
check_keys = [k for k, v in second_instance.opts.items()
if v is not None]
for key in check_keys:
if key not in self.opts.keys():
if hasattr(self, 'clf'):
value = self.__evalute_clf__(key)
self.opts[key] = value
else:
raise KeyError('%s not set for the comparison partner')
if key not in second_instance.opts.keys():
if hasattr(second_instance, 'clf'):
value = second_instance.__evalute_clf__(key)
second_instance.opts[key] = value
else:
raise KeyError('%s not set for the comparison partner')
if self.opts[key] != second_instance.opts[key]:
att = key.replace('callable:', '')
att = att.replace('has:', '')
if self.opts[key]:
msg = 'Provided classifier has %s' % att
else:
msg = 'Provided classifier is missing %s' % att
raise AttributeError(msg)
return True
def __eq__(self, second_instance):
return self.fulfilling(second_instance, two_sided=True)
```
#### File: disteval/basics/preparation.py
```python
from __future__ import absolute_import, print_function, division
from logging import getLogger
import warnings
import numpy as np
logger = getLogger('disteval.preparation')
def prepare_data(test_df,
ref_df,
test_weight=None,
ref_weight=None,
test_ref_ratio=1.,
random_state=None):
"""Makes the data usable for sklearn.
Parameters
----------
test_df : pandas.Dataframe, shape=(n_samples, X_names)
Dataframe of the test data
ref_df : pandas.Dataframe, shape=(n_samples, X_names)
Dataframe of the reference data
test_weight : str or None, optional (default=None)
Name of the columns containing the sample weight of the test
data. If None no weights will be used.
ref_weight : str or None, optional (default=None)
Name of the columns containing the sample weight of the
reference data. If None no weights will be used.
test_ref_ratio: float, optional (default=1.)
Ratio of test and train data. If weights are provided, the ratio
is for the sum of weights.
random_state: None, int or RandomState
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random.
Returns
-------
X : numpy.float32array, shape=(n_samples, n_obs)
Values of the columns which appeared in both Dataframes and
are not used as Weights
y : numpy.float32array, shape=(n_samples)
Array of the true labels.
1 = Reference
0 = Test
sample_weight : None or numpy.float32array, shape=(n_samples)
Not None if ref_weight and/or test_weight was provided. If array
is returned, it contains the sample weights
X_names : list[str]
List of the names of the columns of X
"""
logger.debug("Praparing Data")
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
# make the dataframe homogenious
test_X_names = set(test_df.columns)
ref_X_names = set(ref_df.columns)
# Check if weights are used
use_weights = False
if test_weight is not None:
use_weights = True
try:
test_X_names.remove(test_weight)
except KeyError:
raise KeyError('Weight \'%s\' not in test dataframe')
else:
# convert to float32 numpy array for sklearn
sample_weight_test = np.array(test_df.loc[:, test_weight].values,
dtype=np.float32)
elif test_weight is not None:
# If ref uses weights, dummy weights are created
sample_weight_test = np.ones(len(test_X_names), dtype=np.float32)
else:
sample_weight_test = None
if ref_weight is not None:
use_weights = True
try:
ref_X_names.remove(ref_weight)
except KeyError:
raise KeyError('Weight \'%s\' not in reference dataframe')
else:
# convert to float32 numpy array for sklearn
sample_weight_ref = np.array(ref_df.loc[:, ref_weight].values,
dtype=np.float32)
elif test_weight is not None:
# If test uses weights, dummy weights are created
sample_weight_ref = np.ones(len(ref_df), dtype=np.float32)
else:
sample_weight_ref = None
# This sections warns the user about differences between the datasets
if len(set.difference(ref_X_names, test_X_names)) > 0:
unique_X_names_test = test_X_names.difference(ref_X_names)
unique_X_names_ref = ref_X_names.difference(test_X_names)
msg = 'Dataset are not consistent: '
for o in unique_X_names_ref:
msg += ' ref.%s' % o
for o in unique_X_names_test:
msg += ' test.%s' % o
msg += ' will be ignored'
warnings.warn(msg)
X_names = set.intersection(test_X_names, ref_X_names)
test_df = test_df.loc[:, X_names]
X_test, y_test, sample_weight_test = convert_and_remove_non_finites(
test_df, sample_weight_test, is_ref=False)
ref_df = ref_df.loc[:, X_names]
X_ref, y_ref, sample_weight_ref = convert_and_remove_non_finites(
ref_df, sample_weight_ref, is_ref=True)
# In this section the desired test/ref ratio si realized
if use_weights:
n_test = np.sum(sample_weight_test)
n_ref = np.sum(sample_weight_ref)
else:
n_test = len(y_test)
n_ref = len(y_ref)
if n_test / n_ref > test_ref_ratio:
probability = (test_ref_ratio * n_ref) / n_test
seleceted = random_state.uniform(size=y_test.shape[0]) <= probability
X_test, y_test, sample_weight_test = shrink_data(
seleceted, X_test, y_test, sample_weight_test)
elif n_test / n_ref <= test_ref_ratio:
probability = n_test / (n_ref * test_ref_ratio)
seleceted = random_state.uniform(size=y_ref.shape[0]) <= probability
X_ref, y_ref, sample_weight_ref = shrink_data(
seleceted, X_ref, y_ref, sample_weight_ref)
# Combining ref and test data into single numpy arrays
X = np.vstack((X_test, X_ref))
y = np.hstack((y_test, y_ref))
if use_weights:
sample_weight = np.hstack((sample_weight_test, sample_weight_ref))
else:
sample_weight = None
return X, y, sample_weight, list(X_names)
def convert_and_remove_non_finites(df, sample_weight, is_ref=False):
"""Makes the dataframes usable for sklearn.
For this purpose they are converted to numpy arrays and non finites
are removed.
Parameters
----------
df : pandas.Dataframe, shape=(n_samples, n_obs)
Dataframe that should be converted and filtered
sample_weight : array-like or None
Array containing the weights for the samples.
is_ref : boolean
Indicates if the provided dataframe is should be treated as
reference data, so that y is set to 1.
Returns
-------
X : numpy.float32array, shape=(n_samples, n_obs)
Values of the columns which appeared in both Dataframes and
are not used as Weights
y : numpy.float32array, shape=(n_samples)
Array of the true labels.
1 = Reference
0 = Test
sample_weight : None or numpy.float32array, shape=(n_samples)
Not None if ref_weight and/or test_weight was provided. If array
is returned, it contains the sample weights
"""
logger.debug("Converting")
X = np.array(df.values, dtype=np.float32)
if is_ref:
y = np.ones(X.shape[0], dtype=int)
set_name = 'reference set'
else:
y = np.zeros(X.shape[0], dtype=int)
set_name = 'test set'
isfinite = np.isfinite(X)
selected = np.sum(isfinite, axis=1) == len(df.columns)
n_selected = np.sum(selected)
if n_selected < X.shape[0]:
n_removed = X.shape[0] - n_selected
msg = '%d non finites removed from %s' % (n_removed, set_name)
logger.info(msg)
X = X[selected, :]
y = y[selected]
if sample_weight is not None:
sample_weight = sample_weight[selected, :]
return X, y, sample_weight
def shrink_data(selected, X, y, sample_weight=None):
"""Shrinks the data arrays by applying the selected mask on X, y
and the sample weights.
Parameters
----------
selected : array-like with booleans
Indicated if a sample should be used or not.
X : numpy.float32array, shape=(n_samples, n_obs)
Values describing the samples.
y : numpy.float32array, shape=(n_samples)
Array of the true labels.
sample_weight : None (default) or numpy.float32array, shape=(n_samples)
If weights are used this array contains the sample weights.
Returns
-------
X : numpy.float32array, shape=(n_samples, n_obs)
Shrinked values describing the samples.
y : numpy.float32array, shape=(n_samples)
Shrinked array of the true labels.
sample_weight : None or numpy.float32array, shape=(n_samples)
If weights are used this shrinked array contains the sample weights.
"""
X = X[selected, :]
y = y[selected]
if sample_weight is not None:
sample_weight = sample_weight[selected, :]
return X, y, sample_weight
```
#### File: disteval/evaluation/feature_importance_test.py
```python
from __future__ import absolute_import, print_function, division
import numpy as np
from scipy.stats import norm
from ..basics.classifier_characteristics import ClassifierCharacteristics
def feature_importance_mad(clf,
alpha=0.05,
ignored_percentile=None):
"""This function fetches the feature importance values and runs a
criteria using the median absolute deviation. If a feature
importance difference to the median importance is greater than
a certain threshold and the feature is more important than the
median feature, the feature is removed. The threshold is:
1.4826 * cdf_norm**-1(1 - alpha/2) * MAD
The distribution of the feature importance can be expected, to have
a relativ flat distribution up from 0 upto a normal distributed
peak. The flat part is for constant or close to constant features.
The rest of the features can be expected to be choosen in a random
fashion. Therefore they build a normal distributed peak
around ~(1. / (n_features - n_constant_features)). To have a robust
measure for outliers the meadian absolute diviation (MAD) is used.
The definition of the MAD is:
median(|X_i - median(X)|)
For a mormal distribution the 1 sigma region is included in the
region between 1.4826 * MAD - median(X) and 1.4826 * MAD + median(X).
With the parameter alpha the used threshold is tuned in a way, for
a pure normal distribution alpha / 2 (only features above the
median are removed) features would be removed.
Parameters
----------
clf: object or list
Trained classifier or list of trained classifier.
alpha : float, optional (default=0.05)
Parameter tuning the threshold. See function describtion.
Returns
-------
kept: numpy.boolarray, shape=(n_features)
Whether the feature passes the MAD criteria.
feature_importance: numpy.array, shape=(n_features)
Array of the importance values for the features. If a list of
classifier is passed, it is the mean over all classifier.
feature_importance_std: None or numpy.array, shape=(n_features)
If a list of classifier is passed the standard deviation is of
the feature importance values is returned. Otherwise None is
returned
"""
desired_characteristics = ClassifierCharacteristics()
desired_characteristics.opts['has:feature_importances_'] = True
if isinstance(clf, list):
feature_importances = []
for i, clf_i in enumerate(clf):
clf_characteristics = ClassifierCharacteristics(clf_i)
assert clf_characteristics.fulfilling(desired_characteristics), \
'Classifier sanity check failed!'
feature_importances.append(clf_i.feature_importances_)
feature_importances = np.array(feature_importances)
feature_importance = np.mean(feature_importances, axis=0)
feature_importance_std = np.std(feature_importances, axis=0, ddof=1)
else:
clf_characteristics = ClassifierCharacteristics(clf)
assert clf_characteristics.fulfilling(desired_characteristics), \
'Classifier sanity check failed!'
feature_importance = clf.feature_importances_
feature_importance_std = np.NaN
threshold = norm.ppf(1 - alpha/2) * 1.4826 # see docstring
if ignored_percentile is not None:
ignore_threshold = np.percentile(feature_importance,
ignored_percentile * 100)
idx = feature_importance > ignore_threshold
feature_importance_MAD = feature_importance[idx]
else:
feature_importance_MAD = feature_importance
median_importance = np.median(feature_importance_MAD)
MAD = np.median(np.absolute(feature_importance_MAD - median_importance))
diff = feature_importance - median_importance
kept = np.logical_or(np.absolute(diff) < threshold * MAD,
feature_importance <= median_importance)
return kept, feature_importance, feature_importance_std
def feature_importance_mad_majority(clfs,
ratio=0.9,
alpha=0.10,
ignored_percentile=None):
"""In this function a list of classifier must be provided. To decide
if a feature is removed, for each classifier the function
feature_importance_mad with the provided alpha is evaluated. And if
a feature is removed in atleast ratio-percent of the classifiers
the feature is removed. The motivation behind the majority vote is,
that if a feature is just above the threshold in a single test
because of statistical fluctuation is should be below the threshold
for most of the classifications. The alpha can be set less
conservative because this criteria is more robust against
statistical fluctuationsc.
Parameters
----------
clf: list
List of trained classifier.
ratio : float, optional (default=0.9)
Ratio of classifiers in which the feature should be removed.
alpha : float, optional (default=0.05)
Parameter tuning the threshold. See feature_importance_mad
describtion.
Returns
-------
kept: numpy.boolarray, shape=(n_features)
Whether the feature passes the MAD criteria.
feature_importance: numpy.array, shape=(n_features)
Array of the importance values for the features. If a list of
classifier is passed, it is the mean over all classifier.
feature_importance_std: numpy.array, shape=(n_features)
If a list of classifier is passed the standard deviation is of
the feature importance values is returned. Otherwise None is
returned
"""
desired_characteristics = ClassifierCharacteristics()
desired_characteristics.opts['has:feature_importances_'] = True
assert isinstance(clfs, list), 'List of classifier has to be provided'
kept_arr = []
feature_importances = []
for i, clf_i in enumerate(clfs):
kept, feature_importance, _ = feature_importance_mad(
clf_i,
alpha=alpha,
ignored_percentile=ignored_percentile)
kept_arr.append(kept)
feature_importances.append(feature_importance)
kept_arr = np.array(kept_arr)
feature_importances = np.array(feature_importances)
feature_importance = np.mean(feature_importances, axis=0)
feature_importance_std = np.std(feature_importances, axis=0, ddof=1)
kept = np.sum(kept_arr, axis=0) >= ratio * kept_arr.shape[0]
return kept, feature_importance, feature_importance_std
```
#### File: pydisteval/disteval/recursive_selection_parallel.py
```python
from __future__ import absolute_import, print_function, division
from logging import getLogger
from concurrent.futures import ProcessPoolExecutor, wait
import numpy as np
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
from .basics.classifier_characteristics import ClassifierCharacteristics
logger = getLogger('disteval.recursive_selection')
def recursive_feature_selection_roc_auc(clf,
X,
y,
sample_weight=None,
n_features=10,
cv_steps=10,
n_jobs=1,
forward=True,
matching_features=True):
"""Method building a feature set in a recursive fashion. Depending
on the setting it is run as a forward selection/backward elimination
searching for a set of n features with the highest/lowest mismatch.
To get the set with the size n starting from n_total features the
following approaches are used:
Forward Selection:
To get the k+1 set every not yet selected feature is used to
generate (n_total - k sets). The set with the best score is the
k + 1 set. Those steps are repeated until n features are selected
Backward Elimination:
To get k+1 eliminated features every not yet eleminated feature is used
to generate (n_total - k) sets. The sets consist of all not yet
eliminated features minus the one that is tested. The set with the
best score determines the next feature to eliminate. Those steps are
repeated until n features are eliminated.
What the best score depends also on the settings:
matching_features:
forward: min(|auc - 0.5|)
not forward: max(|aux - 0.5|)
not matching_features:
forward: max(auc )
not forward: min(aux)
Parameters
----------
clf: object
Classifier that should be used for the classification.
It needs a fit and a predict_proba function.
X : numpy.float32array, shape=(n_samples, n_obs)
Values describing the samples.
y : numpy.float32array, shape=(n_samples)
Array of the true labels.
sample_weight : None or numpy.float32array, shape=(n_samples)
If weights are used this has to contains the sample weights.
None in the case of no weights.
n_features : int, optional (default=10)
Number of feature that are selected (forward=True) or eliminated
(forward=False)
n_jobs: int, optional (default=1)
Number of parallel jobs spawned in each a classification in run.
Total number of used cores is the product of n_jobs from the clf
and the n_jobs of this function.
forward: bool, optional (default=True)
If True it is a 'forward selection'. If False it is a 'backward
elimination'.
matching_features: bool, optional (default=True)
Wether for matching or mismatching feature should be searched
Returns
-------
selected_features: list of ints
Return a list containing the indeces of X, that were
selected/eliminated. The order corresponds to the order the
features were selected/eliminated.
auc_scores: np.array float shape(n_features_total, n_features)
Return a array containing the auc values for all steps.
np.nan is the feature was already selected in the specific run.
"""
desired_characteristics = ClassifierCharacteristics()
desired_characteristics.opts['callable:fit'] = True
desired_characteristics.opts['callable:predict_proba'] = True
clf_characteristics = ClassifierCharacteristics(clf)
assert clf_characteristics.fulfilling(desired_characteristics), \
'Classifier sanity check failed!'
if n_features > X.shape[1]:
logger.info(' \'n_features\' higher than total number of features.'
' \'n_features\' reduced!')
n_features = X.shape[1]
auc_scores = np.zeros((X.shape[1], n_features))
selected_features = []
while len(selected_features) != n_features:
auc_scores_i = get_all_auc_scores(clf,
selected_features,
X,
y,
sample_weight=sample_weight,
cv_steps=cv_steps,
n_jobs=n_jobs,
forward=forward)
value_best = None
index_best = None
for idx, auc in enumerate(auc_scores_i):
if not np.isfinite(auc):
continue
if value_best is None:
value_best = auc
index_best = idx
if matching_features:
if forward:
if np.abs(auc - 0.5) < np.abs(value_best - 0.5):
value_best = auc
index_best = idx
else:
if np.abs(auc - 0.5) > np.abs(value_best - 0.5):
value_best = auc
index_best = idx
else:
if forward:
if auc > value_best:
value_best = auc
index_best = idx
else:
if auc < value_best:
value_best = auc
index_best = idx
auc_scores[:, len(selected_features)] = auc_scores_i
selected_features.append(index_best)
return selected_features, auc_scores
def __single_auc_score__(feature_i,
clf,
cv_indices,
X,
y,
sample_weight=None):
"""Method determining the 'area under curve' for a single test set.
This function is intended for internal use.
Parameters
----------
feature_i: int
Index of the tested feature.
clf: object
Classifier that should be used for the classification.
It needs a fit and a predict_proba function.
cv_indices: list of tuples
Indices for all the cross validation steps. They are explicit
pass, so all test sets use the same splitting.
X : numpy.float32array, shape=(n_samples, n_obs)
Values describing the samples.
y : numpy.float32array, shape=(n_samples)
Array of the true labels.
sample_weight : None or numpy.float32array, shape=(n_samples)
If weights are used this has to contain the sample weights.
None in the case of no weights.
Returns
-------
feature_i: int
Index of the tested feature. It is need as a return value for
asynchronous parallel processing
auc_score: float
Returns calculated auc score.
"""
y_pred = np.zeros_like(y, dtype=float)
for i, [train_idx, test_idx] in enumerate(cv_indices):
X_train = X[train_idx]
X_test = X[test_idx]
y_train = y[train_idx]
if sample_weight is None:
sample_weight_train = None
sample_weight_test = None
else:
sample_weight_train = sample_weight[train_idx]
sample_weight_test = sample_weight[test_idx]
clf = clf.fit(X=X_train,
y=y_train,
sample_weight=sample_weight_train)
y_pred[test_idx] = clf.predict_proba(X_test)[:, 1]
auc_score = roc_auc_score(y, y_pred, sample_weight=sample_weight_test)
return feature_i, auc_score
def get_all_auc_scores(clf,
selected_features,
X,
y,
sample_weight=None,
cv_steps=10,
n_jobs=1,
forward=True,
random_state=None):
"""Method determining the 'area under curve' for all not yet
selected features. In this function also the feature sets for the
tests are created.
Parameters
----------
clf: object
Classifier that should be used for the classification.
It needs a fit and a predict_proba function.
selected_features: list of ints
List of already selected features
X : numpy.float32array, shape=(n_samples, n_obs)
Values describing the samples.
y : numpy.float32array, shape=(n_samples)
Array of the true labels.
sample_weight : None or numpy.float32array, shape=(n_samples)
If weights are used this has to contains the sample weights.
None in the case of no weights.
n_jobs: int, optional (default=1)
Number of parallel jobs spawned in each a classification in run.
Total number of used cores is the product of n_jobs from the clf
and the n_jobs of this function.
forward: bool, optional (default=True)
If True it is a 'forward selection'. If False it is a 'backward
elimination'.
random_state: None, int or RandomState
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random.
Returns
-------
auc_scores: np.array float shape(n_features_total)
Return a array containing the auc values. np.nan is the feature
is already selected.
"""
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
selected_features = np.array(selected_features, dtype=int)
if cv_steps < 2:
raise ValueError('\'cv_steps\' must be 2 or higher')
else:
cv_iterator = StratifiedKFold(n_splits=cv_steps,
shuffle=True,
random_state=random_state)
cv_indices = [[train, test] for train, test in cv_iterator.split(X, y)]
test_features = np.array([int(i) for i in range(X.shape[1])
if i not in selected_features], dtype=int)
process_args = []
for feature_i in test_features:
if forward:
set_i = np.hstack((selected_features, feature_i))
test_set = np.sort(set_i)
else:
set_i = list(test_features)
set_i.remove(feature_i)
test_set = np.array(set_i)
process_args.append([feature_i, X[:, test_set],
y,
sample_weight,
clf])
test_sets = {}
for feature_i in test_features:
if forward:
set_i = np.hstack((selected_features, feature_i))
test_sets[feature_i] = np.sort(set_i)
else:
set_i = list(test_features)
set_i.remove(feature_i)
test_sets[feature_i] = np.array(set_i)
auc_scores = np.empty(X.shape[1])
auc_scores[:] = np.nan
if n_jobs > 1:
futures = []
with ProcessPoolExecutor(max_workers=n_jobs) as executor:
for feature_i, test_set in test_sets.items():
futures.append(executor.submit(__single_auc_score__,
feature_i=feature_i,
clf=clf,
cv_indices=cv_indices,
X=X[:, test_set],
y=y,
sample_weight=sample_weight))
results = wait(futures)
for future_i in results.done:
feature_i, auc = future_i.result()
auc_scores[feature_i] = auc
else:
auc_scores = []
for feature_i, test_set in test_sets.items():
_, auc = __single_auc_score__(feature_i=feature_i,
clf=clf,
cv_indices=cv_indices,
X=X[:, test_set],
y=y,
sample_weight=sample_weight)
auc_scores[feature_i] = auc
return auc_scores
```
#### File: visualization/comparison_plotter/comparison_plotter.py
```python
from __future__ import absolute_import, print_function, division
import logging
from matplotlib import pyplot as plt
from . import elements
from .components import Component, ColorPalette
from .base_classes import ResultTray, Element
REGISTERED_ELEMENTS = {'aggarwalhisto': elements.AggarwalHisto,
'aggarwalratio': elements.AggarwalRatio,
'limitedmchisto': elements.LimitedMCHisto,
'limitedmcratio': elements.LimitedMCRatio,
'classichisto': elements.ClassicHisto,
'classicratio': elements.ClassicRatio,
'normalization': elements.Normalization}
logger = logging.getLogger("ComparisonPlotter ")
class ComparisonPlotter(object):
"""Class to build up the plot layout and produce the plots!
All parts of the plots are in some way histograms, so continuous
datapoints discretized in bins. For those bins a Poisson
distribution is assumed.
The general concept is that the plot layout is build up with
'Elements'. Elements consists of 'Parts'. There are two kind of
parts: 'CalcParts' doing calculations and 'PlotParts' visualizing
calculations. The layout is setup once. There is no way to remove
elements once they are added! If you want to change the layout
just create a new ComparisonPlotter instance.
After the Layout is created, you have to add components. Components
mus have at least a name and the data you want to visualize. They
two mandatory components are the 'test' and the 'ref' components.
It is not possible to add more than one of those type of components.
If one of the components consists of multiple contributions that
should also be plotted, they can be added as 'ref_part's and
'test_parts'. Those parts and their data are expected to be
part of the full test/ref component.
As soon the components are added, the plot are simply drawn with
the draw() method. To produce the same plot for a different feature
you have to reset the plotter with the method finish(). The finish
functions only removes the calculated results, added components
and resets all the Calc/PlotPart's.
The process to create the plots is:
1. Adding elements: (add_element() for each element of the plot)
for each feature:
2. Adding Components (add_ref() for the reference component.
add_test() for the test component.
add_ref_part() for parts of the ref component.
add_test_part() for parts of the test component.
3. Create the plot (draw())
(4. Reset the plotter, another feature should be plotted (finish())
Parameters
----------
title : str, optional
Title of the plot.
Attributes
----------
title : str
Title of the plot added to the top of the plot.
"""
def __init__(self, title=''):
self.title = title
self._plot_parts = []
self._calc_parts = []
self._components = []
self._ref_idx = None
self._test_idx = None
self._fig = None
self.color_palette = ColorPalette()
def add_element(self, element, **kwargs):
"""Method to add a Element.
The first element added is plotted at the top of the plot. If parts
of the element are already registered for the plot, they aren't added
a second time.
Note
----
Many elements add the 'CalcBinningPart'. If you want to set
parameters of the part, you have to do it with the first element
added.
Parameters
----------
element : :obj:`comparison_plotter.base_classes.Element`
The first parameter.
**kwarg
Keyword arguments of the element. See element documentation.
"""
if isinstance(element, Element):
element = element(**kwargs)
elif isinstance(element, str):
element_class = REGISTERED_ELEMENTS[element.lower()]
element = element_class(**kwargs)
elif isinstance(element, elements.Element):
pass
else:
raise TypeError('Invalid Type \'element\'!')
logger.debug(u'Adding {}! (Element)'.format(element.name))
element.register(self)
def _register_calc_part(self, part):
if part not in self._calc_parts:
logger.debug(u'\tRegistered {} (CalcPart)!'.format(part.name))
self._calc_parts.append(part)
def _register_plot_part(self, part):
if part not in self._plot_parts:
logger.debug(u'\tRegistered {} (PlotPart)!'.format(part.name))
self._plot_parts.append(part)
def add_ref(self,
label,
X,
livetime=1,
weights=None,
color=None,
cmap=None):
"""Method to add the reference component.
Parameters
----------
label : str
Label of the component.
X : array_like
Datapoints.
livetime : float, optional
Livetime is the time in which the data is taken/produced.
This is used to be able to proper normalize components of
different livetime on each other. For this purpose only
the relative difference between the livetimes is needed.
So if you have datasets with the same livetime just set
them all to 1 respectively use the default.
weights : array_like
Array of weights. Must be of the same length like X.
color : matplotlib compatible color code, optional
A specific color you want this component to have. If None
a color from the color cycle defined in 'components' module
is used.
cmap : matplotlib compatible cmap, optional
This can be the name of a standard matplotlib colormape.
Colormaps is mainly used to get ordered colors to indicate
different confidence levels.
"""
idx = len(self._components)
self._ref_idx = idx
if color is None:
color = self.color_palette.get_color()
if cmap is None:
cmap = self.color_palette.get_cmap()
logger.debug(u'Added \'{}\' (Ref-Component)!'.format(label))
self._components.append(Component(idx=idx,
label=label,
c_type='ref',
X=X,
livetime=livetime,
weights=weights,
color=color,
cmap=cmap))
def add_ref_part(self,
label,
X,
livetime=1,
weights=None,
color=None):
"""Method to add a reference component part.
Parameters
----------
label : str
Label of the component.
X : array_like
Datapoints.
livetime : float, optional
Livetime is the time in which the data is taken/produced.
This is used to be able to proper normalize components of
different livetime on each other. For this purpose only
the relative difference between the livetimes is needed.
So if you have datasets with the same livetime just set
them all to 1 respectively use the default.
weights : array_like
Array of weights. Must be of the same length like X.
color : matplotlib compatible color code, optional
A specific color you want this component to have. If None
a color from the color cycle defined in 'components' module
is used.
"""
logger.debug(u'Added \'{}\' (RefPart-Component)!'.format(label))
self._components.append(Component(idx=len(self._components),
label=label,
c_type='ref_part',
X=X,
livetime=livetime,
weights=weights,
color=color))
def add_test(self,
label,
X,
livetime=1,
weights=None,
color=None,
cmap=None):
"""Method to add a reference component part.
Parameters
----------
label : str
Label of the component.
X : array_like
Datapoints.
livetime : float, optional
Livetime is the time in which the data is taken/produced.
This is used to be able to proper normalize components of
different livetime on each other. For this purpose only
the relative difference between the livetimes is needed.
So if you have datasets with the same livetime just set
them all to 1 respectively use the default.
weights : array_like
Array of weights. Must be of the same length like X.
color : matplotlib compatible color code, optional
A specific color you want this component to have. If None
a color from the color cycle defined in 'components' module
is used.
cmap : matplotlib compatible cmap, optional
This can be the name of a standard matplotlib colormape.
Colormaps is mainly used to get ordered colors to indicate
different confidence levels.
"""
logger.debug(u'Added \'{}\' (Test-Component)!'.format(label))
if color is None:
color = self.color_palette.get_color()
if cmap is None:
cmap = self.color_palette.get_cmap()
self._components.append(Component(idx=len(self._components),
label=label,
c_type='test',
X=X,
livetime=livetime,
weights=weights,
color=color,
cmap=cmap))
def add_test_part(self,
label,
X,
livetime=1,
weights=None,
color=None):
"""Method to add a test component part.
Parameters
----------
label : str
Label of the component.
X : array_like
Datapoints.
livetime : float, optional
Livetime is the time in which the data is taken/produced.
This is used to be able to proper normalize components of
different livetime on each other. For this purpose only
the relative difference between the livetimes is needed.
So if you have datasets with the same livetime just set
them all to 1 respectively use the default.
weights : array_like
Array of weights. Must be of the same length like X.
color : matplotlib compatible color code, optional
A specific color you want this component to have. If None
a color from the color cycle defined in 'components' module
is used.
"""
logger.debug(u'Added \'{}\' (TestPart-Component)!'.format(label))
self._components.append(Component(idx=len(self._components),
label=label,
c_type='test_part',
X=X,
livetime=livetime,
weights=weights,
color=color))
def draw(self, x_label='Feature', fig=None, figsize=(10, 8), **kwargs):
"""Method to start the actual draw process.
In a first step all CalcParts are called for each component.
In a second step the PlotParts are called for each component.
Parameters
----------
x_label : str, optional
Label of the x-axis.
fig : matplotlib.Figure, optional
Figure instance that should be used to draw on. If no instance
is provided a new figure of 'figsize' is created.
figsize : (width, height), optional
Tuple of width and height of the figure that is created.
**kwargs
Arbitrary keyword arguments. These will be passed on to execute
method.
Returns
-------
fig : matplotlib.Figure
The figure to which the plot is added
ax_dict : dict
Dictionary with all the axes created by the PlotParts.
The key is the name of the PlotPart. If a PlotPart creates
more than one axis (e.g. AggarwalRatio) a list with all
axes is added to the dict under the name of the part.
result_tray : comparison_plotter.base_classes.ResultTray
A simple object with all the results of the CalcParts as
the attribute.
"""
logger.debug(u'Start Draw Process!')
logger.debug(u'===================')
result_tray = ResultTray()
result_tray.add(x_label, 'x_label')
result_tray = self._calc(result_tray)
if not isinstance(fig, plt.Figure):
self._fig = plt.figure(figsize=figsize)
else:
self._fig = fig
result_tray.add(self._fig, 'fig')
total_rows = sum([part_i.get_rows() for part_i in self._plot_parts])
row_pointer = total_rows
logger.debug(u'Starting Plotting...')
ax_dict = {}
for i, part_i in enumerate(self._plot_parts):
part_rows = part_i.get_rows()
y1 = row_pointer / total_rows
y0 = (row_pointer - part_rows) / total_rows
x0 = 0.
x1 = 1.
part_i.set_ax(fig=self._fig,
total_parts=len(self._plot_parts),
idx=i,
x0=x0,
x1=x1,
y0=y0,
y1=y1)
row_pointer -= part_rows
for comp_i in self._components:
result_tray = part_i.execute(result_tray, comp_i, **kwargs)
ax_dict[part_i.name] = part_i.get_ax()
part_i.finish(result_tray)
logger.debug(u'Finished!')
return self._fig, ax_dict, result_tray
def draw_multiples(self, gridsize,
x_pos, y_pos,
x0_offset=-0.065,
x1_offset=0.065,
x_label='Feature',
fig=None,
**kwargs):
"""Method to start the actual draw process.
In a first step all CalcParts are called for each component.
In a second step the PlotParts are called for each component.
Parameters
----------
gridsize : tuple
Size of the plot grid (Number of rows, number of columns)
x_pos : int
Column number to draw to
y_pos : int
Row number to draw to
x0_offset : float, optional
The x0 offset for the new supblot axis.
x1_offset : float, optional
The x1 offset for the new supblot axis.
x_label : str, optional
Label of the x-axis.
fig : matplotlib.Figure, optional
Figure instance that should be used to draw on. If no instance
is provided a new figure of 'figsize' is created.
**kwargs
Arbitrary keyword arguments. These will be passed on to execute
method.
Returns
-------
fig : matplotlib.Figure
The figure to which the plot is added
ax_dict : dict
Dictionary with all the axes created by the PlotParts.
The key is the name of the PlotPart. If a PlotPart creates
more than one axis (e.g. AggarwalRatio) a list with all
axes is added to the dict under the name of the part.
result_tray : comparison_plotter.base_classes.ResultTray
A simple object with all the results of the CalcParts as
the attribute.
"""
logger.debug(u'Start Draw Process!')
logger.debug(u'===================')
result_tray = ResultTray()
result_tray.add(x_label, 'x_label')
result_tray = self._calc(result_tray)
self._fig = fig
result_tray.add(self._fig, 'fig')
total_rows = sum([part_i.get_rows() for part_i in self._plot_parts])
row_pointer = total_rows
logger.debug(u'Starting Plotting...')
ax_dict = {}
for i, part_i in enumerate(self._plot_parts):
part_rows = part_i.get_rows()
y1_rel = row_pointer / total_rows
y1_abs = (1 - (y_pos + 1) / gridsize[1]) + \
y1_rel / gridsize[1]
y0_rel = (row_pointer - part_rows) / total_rows
y0_abs = (1 - (y_pos + 1) / gridsize[1]) + \
y0_rel / gridsize[1]
x0 = 0. + x_pos / gridsize[0]
x1 = (x_pos + 1.) / gridsize[0]
part_i.set_ax(fig=self._fig,
total_parts=len(self._plot_parts),
idx=i,
x0=x0+x0_offset,
x1=x1+x1_offset,
y0=y0_abs,
y1=y1_abs,
medium_offsets_only=True)
row_pointer -= part_rows
for comp_i in self._components:
result_tray = part_i.execute(result_tray, comp_i, **kwargs)
ax_dict[part_i.name] = part_i.get_ax()
part_i.finish(result_tray)
logger.debug(u'Finished!')
return self._fig, ax_dict, result_tray
def _calc(self, result_tray):
logger.debug(u'Starting Calculating...')
n_components = len(self._components)
self._calc_parts = sorted(self._calc_parts)
self._components = sorted(self._components)
ref_idx = None
test_idx = None
for i, comp in enumerate(self._components):
comp.idx = i
if comp.c_type == 'ref':
if ref_idx is None:
ref_idx = i
else:
raise RuntimeError('More than one ref component added!')
elif comp.c_type == 'test':
if test_idx is None:
test_idx = i
else:
raise RuntimeError('More than one ref component added!')
result_tray.add(n_components, 'n_components')
result_tray.add(ref_idx, 'ref_idx')
result_tray.add(test_idx, 'test_idx')
result_tray.add(self._components[ref_idx].livetime, 'ref_livetime')
result_tray.add(self._components[test_idx].livetime, 'test_livetime')
for part_i in self._calc_parts:
for comp_i in self._components:
result_tray = part_i.execute(result_tray, comp_i)
part_i.finish(result_tray)
logger.debug(u'Finished!')
return result_tray
def finish(self):
"""Method that should be called after drawing.
It resets the color cycle, removes the components and closes
the figure. The all added elements are kept.
"""
logger.debug(u'Finishing: resetting color cycle, component and figure!')
if self._fig is not None:
plt.close(self._fig)
self._fig = None
self._components = []
self._ref_idx = None
self._test_idx = None
self.color_palette.reset()
def reset(self, title=''):
"""Method that can be used to do a full reset of the plotter.
Additional to ComparisonPlotter.finish() also all elements are
removed. If no title is provided to reset, also the title is emptied.
Parameters
----------
title : str, optional
Title of the plot.
"""
logger.debug(u'Reset: resetting parts and components!')
self.finish()
self.title = title
self._plot_parts = []
self._calc_parts = []
```
#### File: comparison_plotter/functions/plot_funcs.py
```python
from __future__ import unicode_literals
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ColorConverter
import matplotlib.transforms as transforms
from colorsys import rgb_to_hls, hls_to_rgb
from .calc_funcs import map_aggarwal_ratio, rescale_limit
from . import legend_entries as le
MAIN_ZORDER = 4
def modify_color(color,
d_saturation=0.,
d_lightness=0.):
conv = ColorConverter()
if not isinstance(color, tuple):
rgb_color = conv.to_rgb(color)
else:
rgb_color = color
hls_color = rgb_to_hls(*rgb_color)
new_l = max(0, min(0.9, hls_color[1] + d_lightness))
new_s = max(0, min(1, hls_color[2] + d_saturation))
return hls_to_rgb(hls_color[0], new_l, new_s)
def plot_inf_marker(fig,
ax,
binning,
place_marker,
markeredgecolor='k',
markerfacecolor='none',
bot=True,
alpha=1.,
rel_marker_size=0.007):
# compute marker size
pixel_width, pixel_height = fig.canvas.get_width_height()
markersize = pixel_height * rel_marker_size
# get coordinate transformation
trans = transforms.blended_transform_factory(
ax.transData, fig.transFigure)
bbox = ax.get_position()
if bot:
y0 = bbox.y0 + rel_marker_size
marker = 'v'
else:
y0 = bbox.y1 - rel_marker_size
marker = '^'
bin_center = (binning[1:] + binning[:-1]) / 2
for bin_i, place in zip(bin_center, place_marker):
if place:
ax.plot([bin_i, ], [y0, ],
transform=trans,
marker=marker,
markerfacecolor=markerfacecolor,
markeredgecolor=markeredgecolor,
markersize=markersize,
figure=fig,
linewidth=1.,
zorder=MAIN_ZORDER + 1,
alpha=alpha)
def plot_finite_marker(ax, x, y, facecolor, edgecolor, alpha):
ax.plot(x,
y,
ls='',
mew=1.,
marker='o',
markeredgecolor=edgecolor,
markerfacecolor=facecolor,
alpha=alpha,
ms='5',
zorder=MAIN_ZORDER + 1)
def plot_data_style(fig,
ax,
bin_edges,
y,
facecolor,
edgecolor,
alpha,
ms='5'):
zero_mask = y > 0
bin_mids = (bin_edges[1:] + bin_edges[:-1]) / 2.
plot_finite_marker(ax,
x=bin_mids[zero_mask],
y=y[zero_mask],
facecolor=facecolor,
edgecolor=edgecolor,
alpha=alpha)
plot_inf_marker(fig, ax,
bin_edges,
~zero_mask,
markerfacecolor=facecolor,
markeredgecolor=edgecolor,
alpha=alpha)
return le.DataObject(facecolor,
edgecolor,
facecolor,
edgecolor)
def plot_uncertainties(ax, bin_edges, uncert, color, cmap):
n_alpha = uncert.shape[1]
cmap = plt.get_cmap(cmap)
colors = cmap(np.linspace(0.1, 0.9, n_alpha))
legend_entries = []
legend_entries.append(le.UncertObject(colors, color))
for i, c in enumerate(colors[::-1]):
j = n_alpha - i - 1
lower_limit = uncert[:, j, 0]
upper_limit = uncert[:, j, 1]
mask = np.isfinite(lower_limit)
lower_limit[~mask] = 0.
mask = np.isfinite(upper_limit)
upper_limit[~mask] = 0.
plot_band(ax,
bin_edges,
lower_limit,
upper_limit,
c,
alpha=1.,
borders=False,
brighten=False,
zorder=MAIN_ZORDER)
for i, c in enumerate(colors):
legend_entries.append(le.UncertObject_single(c))
return legend_entries
def plot_band(ax,
bin_edges,
y_err_low,
y_err_high,
color,
alpha=0.5,
borders=1.,
brighten=True,
zorder=None):
if isinstance(borders, bool):
if borders:
border_lw = 0.3
plot_borders = True
else:
plot_borders = False
elif isinstance(borders, float):
border_lw = borders
plot_borders = True
else:
plot_borders = False
if zorder is None:
zorder = MAIN_ZORDER - 1
if brighten:
band_color = modify_color(color, 0, 0.4)
else:
band_color = color
alpha = min(1., max(0., alpha))
ax.fill_between(bin_edges,
np.append(y_err_low[0], y_err_low),
np.append(y_err_high[0], y_err_high),
step='pre',
color=band_color,
edgecolor=band_color,
linewidth=0.0,
alpha=alpha,
zorder=zorder - 1)
if plot_borders:
if brighten:
band_color = modify_color(color, 0, 0.2)
else:
band_color = color
plot_hist(ax,
bin_edges,
y_err_low,
color,
lw=border_lw,
alpha=1.0,
zorder=zorder)
plot_hist(ax,
bin_edges,
y_err_high,
color,
lw=border_lw,
alpha=1.0,
zorder=zorder)
# legend_obj = le.
legend_obj = None
return legend_obj
def plot_hist(ax,
bin_edges,
y,
color,
yerr=None,
lw=1.6,
alpha=1.0,
zorder=None):
if zorder is None:
zorder = MAIN_ZORDER
alpha = min(1., max(0., alpha))
bin_mids = (bin_edges[1:] + bin_edges[:-1]) / 2.
nan_mask = np.isfinite(y)
bin_mids_masked = bin_mids[nan_mask]
y_masked = y[nan_mask]
xerr_masked = (np.diff(bin_edges) / 2)[nan_mask]
if yerr is not None:
yerr_masked = yerr[nan_mask]
else:
yerr_masked = None
errorbar = ax.errorbar(x=bin_mids_masked,
y=y_masked,
ls='',
xerr=xerr_masked,
yerr=yerr_masked,
color=color,
markersize=0,
capsize=0,
lw=lw,
zorder=zorder,
label='Test')
return errorbar
def plot_line(ax,
bin_edges,
y,
color,
lw=1.6,
alpha=1.0,
zorder=None):
if zorder is None:
zorder = MAIN_ZORDER
alpha = min(1., max(0., alpha))
obj, = ax.plot(bin_edges,
np.append(y[0], y),
drawstyle='steps-pre',
lw=lw,
c=color,
label='test',
alpha=alpha,
zorder=zorder)
return obj
def plot_test_ratio_mapped(fig,
ax,
bin_edges,
ratio,
is_above,
facecolor,
edgecolor,
alpha):
bin_mids = (bin_edges[1:] + bin_edges[:-1]) / 2.
is_finite = np.isfinite(ratio)
finite_mask_upper = np.logical_and(is_finite, is_above)
finite_mask_lower = np.logical_and(is_finite, ~is_above)
plot_finite_marker(ax,
x=bin_mids[finite_mask_upper],
y=ratio[finite_mask_upper],
facecolor=facecolor,
edgecolor=edgecolor,
alpha=alpha)
plot_finite_marker(ax,
x=bin_mids[finite_mask_lower],
y=ratio[finite_mask_lower],
facecolor=facecolor,
edgecolor=edgecolor,
alpha=alpha)
oor_mask_upper = np.logical_and(is_above, np.isposinf(ratio))
no_ratio_mask_upper = np.logical_and(is_above, np.isneginf(ratio))
plot_inf_marker(fig,
ax,
bin_edges,
oor_mask_upper,
markerfacecolor=facecolor,
markeredgecolor=edgecolor,
bot=False)
plot_inf_marker(fig,
ax,
bin_edges,
no_ratio_mask_upper,
markerfacecolor=facecolor,
markeredgecolor=edgecolor,
bot=False,
alpha=0.5)
oor_mask_lower = np.logical_and(~is_above, np.isposinf(ratio))
no_ratio_mask_lower = np.logical_and(~is_above, np.isneginf(ratio))
plot_inf_marker(fig,
ax,
bin_edges,
oor_mask_lower,
markerfacecolor=facecolor,
markeredgecolor=edgecolor,
bot=True)
plot_inf_marker(fig,
ax,
bin_edges,
no_ratio_mask_lower,
markerfacecolor=facecolor,
markeredgecolor=edgecolor,
bot=True,
alpha=0.5)
def generate_ticks_for_aggarwal_ratio(y_0, y_min, max_ticks_per_side=5):
y_min_floored = np.floor(y_min)
y_0_log = np.log10(y_0)
tick_pos = []
n_ticks = 1
tick_pos.append(y_0_log)
if y_0_log != np.floor(y_0_log):
tick_pos.append(np.floor(y_0_log))
n_ticks += 2
while tick_pos[-1] > y_min_floored:
tick_pos.append(tick_pos[-1] - 1)
n_ticks += 2
n_ticks_per_side = (n_ticks - 1) / 2
mayor_step_size = np.ceil(n_ticks_per_side / max_ticks_per_side)
tick_pos_mapped, y_min_ticks = map_aggarwal_ratio(np.power(10, tick_pos),
y_0=1.)
tick_pos_mapped = rescale_limit(tick_pos_mapped,
y_min_ticks,
y_min)
mayor_ticks = []
mayor_ticks_labels = []
minor_ticks = []
minor_ticks_labels = []
mayor_tick_counter = 0
for i, [p, l] in enumerate(zip(tick_pos_mapped, tick_pos)):
lab = 10**l
lab = r'$10^{{\mathrm{{{:.0f}}}}}$'.format(l)
if i == 0:
mayor_ticks_labels.append(lab)
mayor_ticks.append(0)
else:
if mayor_tick_counter == mayor_step_size:
mayor_ticks.extend([p * -1, p])
mayor_ticks_labels.extend([lab, lab])
mayor_tick_counter = 0
else:
minor_ticks.extend([p * -1, p])
minor_ticks_labels.extend([lab, lab])
mayor_tick_counter += 1
return mayor_ticks_labels, mayor_ticks, minor_ticks_labels, minor_ticks
```
#### File: disteval/visualization/feature_importance_test.py
```python
from __future__ import absolute_import, print_function, division
import numpy as np
from matplotlib import pyplot as plt
def visualize_feature_importance_mad(return_list,
X_names,
annoting_text='auto',
manual_x_lims=None,
save_path=None,
fig_size=(12, 10)):
"""Function visualizing the output of the
disteval.evaluation.feature_importance_mad(_majority). It plots
a histogram for the feature importances with a rug plot.
Removed features are marked and can be labeled.
Parameters
----------
return_list: list
List of all returns from the feature_importance_mad or
feature_importance_mad_majority function.
X_names: list of strings
Name of the columns of X.
annoting_text: [True, False, 'auto'], optional (default=True)
Whether the names of the removed features should be plotted.
If 'auto' the x_lims are autoscaled to try to fit in all the
names and the names are only printed if 10 or less features
are removed.
manual_x_lims: array-like, shape=(2), optional (default=None)
Array with x limits. Useful if the names of the removed features
doesn't fit on the figure.
save_path : None or string, default=./roc_equivalence.png', optional
Path under which the plot should be saved. If None only the
figure and the axes are returned. If 'show' plt.show() is called.
fig_size : array-like, shape=(2), optional (default=(12,10))
Size of the figure.
Returns
-------
fig: matplotlib.figure
The created figure.
axes: list of matplotlib.axis (len=3)
The created axes.
"""
fig, ax = plt.subplots(figsize=fig_size)
kept = return_list[0]
feature_importance = return_list[1]
ax.hist(feature_importance, bins=30)
for x_i in feature_importance[kept]:
ax.axvline(x_i, 0, 0.05, linewidth=0.3, color='k')
y_lims = ax.get_ylim()
x_lims = ax.get_xlim()
dx = (x_lims[1] - x_lims[0]) * 0.01
dy = (y_lims[1] - y_lims[0]) * 0.02
length = (y_lims[1] - y_lims[0]) * 0.08
y_0 = y_lims[0] + (y_lims[1] - y_lims[0]) * 0.05
n_removed = sum(~kept)
if isinstance(annoting_text, bool):
if annoting_text:
do_text = True
else:
do_text = False
else:
if n_removed <= 10:
do_text = True
ax.set_xlim(x_lims[0], x_lims[1] + (x_lims[1] - x_lims[0]) * 0.25)
else:
do_text = False
removed_names = [name_i for name_i, kept_i in zip(X_names, kept)
if not kept_i]
removed_x = feature_importance[~kept]
order = np.argsort(feature_importance[~kept])[::-1]
for i, idx in enumerate(order):
x_i = removed_x[idx]
ax.axvline(x_i, 0, 0.05, linewidth=0.3, color='r', zorder=3)
if do_text:
ax.annotate(removed_names[idx],
xy=(x_i, y_0),
xytext=(x_i + dx, y_0 + length + i * dy),
arrowprops=dict(facecolor='0.6',
edgecolor='0.6',
shrink=0.05),
size=10,
family='monospace',
color='0.6')
ax.set_ylabel('Number of Features')
ax.set_xlabel('Feature Importance')
if manual_x_lims is not None:
ax.set_xlim(manual_x_lims)
if save_path == 'show':
plt.show()
elif save_path is not None:
fig.savefig(save_path)
return fig, ax
```
|
{
"source": "Jebux01/fastapi_curse",
"score": 3
}
|
#### File: app/models/model_users.py
```python
from pydantic import BaseModel, validator
from typing import Optional
class user(BaseModel):
username: str
password: str
name: str
age: int
profession: Optional[str] = 'Developer'
#decorador
# @validator('*')
# def empty_values(cls, v):
# if len(v) == 0:
# raise ValueError('Es necesario mandar toda la informacion requerida')
# return v.title()
@validator('password')
def validate_password(cls, v):
if len(v) == 0:
raise ValueError('Es necesario un password')
return v.title()
```
|
{
"source": "jecalles/synbio",
"score": 2
}
|
#### File: synbio/codes/utils.py
```python
import itertools
import pickle
import random
from collections import deque
from copy import copy
from math import comb as binomial
from pathlib import Path
from synbio.utils import (
aminoacids, kdHydrophobicity, rNTPs, rna_basepairing, triplet_rna_codons,
)
# define scope of package
__all__ = [
# definitions
"unrestricted_block", "standard_block", "natural_block", "dna_wobbling",
"rna_wobbling", "standard_code", "colorado_code", "RED20", "RED15",
"FS20", "FS16",
# functions
"get_aa_counts", "get_block_counts", "is_ambiguous", "is_promiscuous",
"is_one_to_one", "get_codon_connectivity", "get_resi_connectivity",
"get_codon_neighbors", "table_to_blocks", "blocks_to_table", "check_block",
"random_code", "num_codes", "silencicity", "mutability", "promiscuity",
"mut_pair_num", "get_mut_pairs", "order_NTPs",
]
def __dir__():
default = [key for key in globals().keys() if key[:2] == '__']
return default + __all__
###############
# definitions #
###############
def _get_block(grouping):
return {
i: list(group)
for i, group in enumerate(grouping)
}
unrestricted_block = _get_block(triplet_rna_codons)
def _get_standard_grouping(codon):
return 0 if codon[-1] in {'U', 'C'} \
else 1 if codon[-1] == 'A' \
else 2
_, _standard_grouping = zip(
*itertools.groupby(triplet_rna_codons, _get_standard_grouping)
)
standard_block = _get_block(_standard_grouping)
natural_block = {
0: ['UUU', 'UUC'],
1: ['UUA', 'UUG'],
2: ['CUU', 'CUC', 'CUA', 'CUG'],
3: ['AUU', 'AUC', 'AUA'],
4: ['AUG'],
5: ['GUU', 'GUC', 'GUA', 'GUG'],
6: ['UCU', 'UCC', 'UCA', 'UCG'],
7: ['CCU', 'CCC', 'CCA', 'CCG'],
8: ['ACU', 'ACC', 'ACA', 'ACG'],
9: ['GCU', 'GCC', 'GCA', 'GCG'],
10: ['UAU', 'UAC'],
11: ['UAA', 'UAG'],
12: ['CAU', 'CAC'],
13: ['CAA', 'CAG'],
14: ['AAU', 'AAC'],
15: ['AAA', 'AAG'],
16: ['GAU', 'GAC'],
17: ['GAA', 'GAG'],
18: ['UGU', 'UGC'],
19: ['UGA'],
20: ['UGG'],
21: ['CGU', 'CGC', 'CGA', 'CGG'],
22: ['AGU', 'AGC'],
23: ['AGA', 'AGG'],
24: ['GGU', 'GGC', 'GGA', 'GGG']
}
# define Watson Crick Wobbling Rules
dna_wobbling = {
'T': ['A', 'G'],
'C': ['G'],
'A': ['T', 'C'],
'G': ['T', 'C'],
'I': ['A', 'C', 'T']
}
rna_wobbling = {
'U': ['A', 'G'],
'C': ['G'],
'A': ['U', 'C'],
'G': ['U', 'C'],
'I': ['A', 'C', 'U']
}
# define standard code
standard_code = {
'UUU': 'F',
'UUC': 'F',
'UUA': 'L',
'UUG': 'L',
'UCU': 'S',
'UCC': 'S',
'UCA': 'S',
'UCG': 'S',
'UAU': 'Y',
'UAC': 'Y',
'UAA': '*',
'UAG': '*',
'UGU': 'C',
'UGC': 'C',
'UGA': '*',
'UGG': 'W',
'CUU': 'L',
'CUC': 'L',
'CUA': 'L',
'CUG': 'L',
'CCU': 'P',
'CCC': 'P',
'CCA': 'P',
'CCG': 'P',
'CAU': 'H',
'CAC': 'H',
'CAA': 'Q',
'CAG': 'Q',
'CGU': 'R',
'CGC': 'R',
'CGA': 'R',
'CGG': 'R',
'AUU': 'I',
'AUC': 'I',
'AUA': 'I',
'AUG': 'M',
'ACU': 'T',
'ACC': 'T',
'ACA': 'T',
'ACG': 'T',
'AAU': 'N',
'AAC': 'N',
'AAA': 'K',
'AAG': 'K',
'AGU': 'S',
'AGC': 'S',
'AGA': 'R',
'AGG': 'R',
'GUU': 'V',
'GUC': 'V',
'GUA': 'V',
'GUG': 'V',
'GCU': 'A',
'GCC': 'A',
'GCA': 'A',
'GCG': 'A',
'GAU': 'D',
'GAC': 'D',
'GAA': 'E',
'GAG': 'E',
'GGU': 'G',
'GGC': 'G',
'GGA': 'G',
'GGG': 'G',
}
# define refactored [sic] code from Pines et al 2017 (aka Colorado code)
colorado_code = {
'GAA': 'V',
'UCG': 'V',
'CGU': 'V',
'UGA': 'L',
'AAU': 'L',
'CUC': 'L',
'CCA': 'I',
'GGG': 'I',
'UUU': 'I',
'UAC': 'I',
'CAG': 'A',
'AUA': 'A',
'GCU': 'A',
'AGC': 'A',
'GAU': 'E',
'ACA': 'E',
'UUC': 'E',
'CGG': 'E',
'UGU': 'D',
'AAC': 'D',
'GUG': 'D',
'UAA': '*',
'UCU': 'P',
'AUG': 'P',
'GUC': 'P',
'CAA': 'P',
'GAC': 'T',
'UCA': 'T',
'CCC': 'S',
'AGG': 'S',
'AUU': 'Q',
'GGA': 'Q',
'UGC': 'N',
'CAU': 'N',
'GCG': 'M',
'CUA': 'M',
'AAA': 'C',
'UUG': 'C',
'GGU': 'C',
'CUU': 'G',
'AGU': 'G',
'ACC': 'G',
'UAG': 'G',
'UGG': 'R',
'GCA': 'R',
'CAC': 'R',
'GGC': 'H',
'CCG': 'H',
'UUA': 'H',
'ACU': 'H',
'CGA': 'K',
'UCC': 'K',
'GUU': 'K',
'AAG': 'K',
'CCU': 'Y',
'GAG': 'Y',
'AUC': 'Y',
'CGC': 'W',
'ACG': 'W',
'GUA': 'W',
'UAU': 'W',
'GCC': 'F',
'CUG': 'F',
'AGA': 'F',
}
# get RED20 and RED15 from file
def _get_pickle_path(local_path_str):
_basepath = Path(__file__).absolute().parent
return Path(
_basepath, local_path_str
)
with open(_get_pickle_path('res/RED20.pickle'), 'rb') as handle:
RED20 = pickle.load(handle)
with open(_get_pickle_path('res/RED15.pickle'), 'rb') as handle:
RED15 = pickle.load(handle)
with open(_get_pickle_path('res/FS20.pickle'), 'rb') as handle:
FS20 = pickle.load(handle)
with open(_get_pickle_path('res/FS16.pickle'), 'rb') as handle:
FS16 = pickle.load(handle)
#############
# functions #
#############
def get_aa_counts(table):
""" A function that takes a Code and finds the counts of each
AA. Returns a dictionary mapping AA to their respective counts.
Parameters
----------
dict table: a python dict representing the codon table
Returns
-------
dict AA_count: a python dict mapping amino acids to degeneracy
"""
# declare dictionary of AA counts
AA_count = {}
# iterate over key and value pairs in self.table
for codon, AA in table.items():
# handle case where AA is previously uncounted
if AA not in AA_count:
# add AA to AA_count and initialize count value to 1
AA_count[AA] = 1
# else, increment AA count
else:
AA_count[AA] += 1
# return AA_count dictionary
return AA_count
def get_block_counts(blocks):
""" A function that takes a Code represented in block structure
form and finds the number of blocks encoding each AA. Returns a
dictionary mapping AA to their respective counts.
Parameters
----------
dict blocks: a python dict representing the codon table in block form
Returns
-------
dict block_counts: a python dict mapping amino acids to degeneracy
"""
# initialize dict of counts and populate keys
block_counts = {}
for AA in aminoacids:
block_counts[AA] = 0
# increment counts
for AA in blocks.values():
block_counts[AA] += 1
# return block_counts
return block_counts
def is_ambiguous(table):
"""A staticmethod that takes a codon table as a dictionary and returns True
if it is ambiguous and False if not.
Parameters
----------
dict table: a python dict representing the codon table
Returns
-------
bool ambiguous: boolean representing the ambiguity of the table
"""
# use promiscuity method to determine ambiguity
try:
__ = promiscuity(table, allow_ambiguous=False) # fails if ambiguous
ambiguous = False
except ValueError:
ambiguous = True
return ambiguous
def is_promiscuous(table):
"""
A staticmethod that takes a codon table as a dictionary and returns True
if it represents a promiscuous table and False if not.
Parameters
----------
dict table: a python dict representing the codon table
Returns
-------
bool ambiguous: boolean representing the promiscuity of the table
"""
return any(
type(AA) != str
for AA in table.values()
)
def is_one_to_one(table):
"""A staticmethod that takes a codon table as a dictionary and returns
True if it represents a One-To-One genetic code and False otherwise.
A one-to-one code is defined as a code in which every amino acid is
represented with exactly one codon. This defines an unambiguous
mapping of protein sequence to corresponding DNA sequence.
Parameters
----------
dict table: a python dict representing the codon table
Returns
-------
bool one2one: boolean; True if One-To-One, and False otherwise
"""
# declare storage dict to count amino acid number
aa_set = set(aa for aa in table.values())
aa_counts = {aa: 0 for aa in aa_set}
# count number of amino acids
for aa in table.values():
aa_counts[aa] += 1
# iterate through dictionary and check counts
one2one = True
for aa, count in aa_counts.items():
# skip stop and null signals:
if aa in {'*', '0'}:
continue
elif count > 1:
one2one = False
break
return one2one
def get_codon_connectivity(table):
"""get_codon_connectivity(dict table) a function that takes a codon table
and finds the graph distance between codon pairs. Connectivity is
defined as follows: two codons c and c' are as connected if a series of
point mutations can convert c to c' changing which amino acid it
encodes for, until the last mutation (i.e. only) one AA change is
allowed per path.
Outputs a dict of str --> list of (str, int) tuples representing a list
of the connected codons and their distance. Implemented as breadth
first search.
Parameters
----------
dict table: a python dict representing the codon table
Returns
-------
dict dist_dict: a python dictionary representing the adjacency matrix of
a codon table with respect to codon neighbors.
"""
# declare dictionary of distances
dist_dict = {}
# loop through all possible codons
for codon in table.keys():
cache = set(codon)
codon_deque = deque()
neighbors = []
# use connect_recurse to map connectivity
dist_dict[codon] = _connect_recurse(
codon, 1, table, neighbors,
codon_deque, cache
)
# return codon_dist
return dist_dict
def _connect_recurse(codon, level, table, neighbors, codon_deque, cache):
""" A recursive helper function that finds all of a codon's nearest
neighbors and how far away they are. Returns a list of tuples
representing the codons and their distances away.
Codons are said to be connected if going from c --> c' converts the
decoded AA from A --> A' without an intermediate residue.
Parameters
----------
- str codon: a string representing the input codon
- int level: the current number of mutations away from the start codon
- dict table: a python dict representing the codon table
- list neighbors: the current list of the base codon's nearest neighbors
- deque codon_deque: the queue of codons to search recursively
- set cache: memoization set to store previously visited codons
Returns
-------
list neighbors: returns updated neighbors list
"""
# import ipdb; ipdb.set_trace()
# loop through every codon one mutation away
for i, base in enumerate(codon):
for nt in rNTPs:
# handle if nt is the same as base
if nt == base:
continue
# if not, generate new codon
c_new = codon[:i] + nt + codon[i + 1:]
# Base case: c_new already found
if c_new in cache:
continue
# Base case: found terminus
elif table[c_new] != table[codon]:
# add distance to neighbors list
neighbors.append((c_new, level))
# add c_new to cache of found codons
cache.add(str(c_new))
# Recursive case
else:
# add c_new to cache of found codons
cache.add(c_new)
# append c_new to queue of codons to recurse through
codon_deque.appendleft((c_new, level))
# iterate over codons to recursively search for connectivity
while not len(codon_deque) == 0:
# get next codon to search
c, newlevel = codon_deque.pop()
# append results to neighbors list
neighbors = _connect_recurse(c, newlevel + 1, table,
neighbors, codon_deque, cache)
# return resulting list
return neighbors
def get_resi_connectivity(table):
""" get_resi_connectivity(dict table): a function that takes a dictionary
representing a codon table and outputs a dictionary mapping amino acids
to their respective neighbors, along with number of mutations away.
"""
# call get_codon_connectivity
codon_dist_dict = get_codon_connectivity(table)
# declare dict to return
resi_dist_dict = {}
# loop over codons
for c1, codon_neighbors in codon_dist_dict.items():
# extract amino acid for c1 and declare neighbors list
A1 = table[c1]
aa_neighbors = []
# loop over elements of neighbors list
for (c2, level) in codon_neighbors:
# convert neighbors to residues and store
A2 = table[c2]
aa_neighbors.append((A2, level))
# store resulting list in resi_dist_dict
if A1 not in resi_dist_dict:
resi_dist_dict[A1] = aa_neighbors
else:
resi_dist_dict[A1] += aa_neighbors
# return dictionary
return resi_dist_dict
def get_codon_neighbors(codon):
"""A function used to get all codons one mutation away from the given codon.
Parameters
----------
str codon: the codon whose neighbors will be returned
Returns
-------
list<str> neighbors: a list of codons one mutation away
"""
# declare list of neighbors
neighbors = []
# generate nearest neighbors by looping over codon positions
for i, base in enumerate(codon):
for nt in rNTPs:
# handle if nt is the same as base
if nt == base:
continue
# if not, generate new codon
c_new = codon[:i] + nt + codon[i + 1:]
# store new codon in neighbors
neighbors.append(c_new)
# return resulting list
return neighbors
def table_to_blocks(table, block_struct):
"""A function that takes a codon table and returns the
representation as blocks of codons (individual tRNAs) as opposed to
individual codons.
Parameters
----------
- dict table: a python dict representing the codon table
- dict block_struct: a python dict representing the table block structure
Returns
-------
- dict blocks: a python dict representing the codon table in block form
- bool False: an "exception" if input table does not match block_struct
"""
# run check_block to confirm proper block structure, returns False if not
if not check_block(table, block_struct):
return False
# declare dictionary to return
blocks = {}
# loop over block_struct and populate blocks
for block_ind, codon_list in block_struct.items():
blocks[block_ind] = table[codon_list[0]]
# return populated blocks dict
return blocks
def blocks_to_table(blocks, block_struct):
"""A function that takes a codon table represented in block
structure form and returns the representation as a traditional codon
table
Parameters
----------
dict blocks: a python dict representing the codon table in block form
dict block_struct: a python dict representing the table block structure
Returns
-------
dict table: a python dict representing the codon table
bool False: an "exception" if input table does not match block_struct
"""
# declare table to return
table = {}
# loop over blocks in block_struct and assign to table using blocks
for block_ind, codon_list in block_struct.items():
block_aa = blocks[block_ind]
for codon in codon_list:
table[codon] = block_aa
# return filled codon table
return table
def check_block(table, block_struct):
"""A function used to check whether a given codon table conforms
to the given block structure
Parameters
----------
dict table: a python dict representing the codon table
dict block_struct: a python dict representing the table block structure
Returns
-------
bool valid: true->table conforms to block structure; false otherwise
"""
# loop over codons in each block; return false if they code for
# different residues
for codon_list in block_struct.values():
# initialize set of residues that a block codes for and populate
block_residues = set()
for codon in codon_list:
block_residues.add(table[codon])
# return false if the set is more than one element long
if len(block_residues) > 1:
return False
# if function reaches this point, return True
return True
def random_code(block_structure='standard'):
"""A function used to generate a random codon table, optionally
defining the block structure. Will guarantee each amino acid be
represented by at least one block in the table.
Parameters
----------
str block_structure = 'standard': a string telling the simulator which
wobble rules to follow for accepting new tables
Acceptable inputs:
- 'standard' : 48 blocks
- 'preserve_block' : maintain same block structure as standard table
- 'unrestricted' : 63 open blocks, at least 1 of every AA and stop.
Returns
-------
dict table: a python dict representing the codon table to return
"""
# determine block structure based on wobble rule
block_choices = {
'standard': standard_block,
'preserve_block': natural_block,
'unrestricted': unrestricted_block
}
try:
block_struct = copy(block_choices[block_structure])
except KeyError:
raise ValueError(
'block_structure string not recognized. '
'Use one of the following options: {0}'.format(
set(block_choices.keys())
)
)
# get blocks to assign
blocks = list(block_struct.keys())
random.shuffle(blocks)
# randomly assign one block to each residue
for AA in aminoacids:
block = blocks.pop()
block_struct[block] = AA
# randomly assign values to the remaining blocks
for block in blocks:
AA = random.choice(aminoacids)
block_struct[block] = AA
# convert block_struct to table and return
return blocks_to_table(block_struct, block_choices[block_structure])
def num_codes(l_aa, b):
"""A function used to calculate the number of codon tables
realizable given a number of amino acids to include, length of the
codon, and number of blocks. Relies on an inclusion/exclusion criterion
(i.e. count the total number of codon tables, minus the number that do
not include one AA, plus the number that do not include two AAs...)
l_aa = length of amino acid alphabet (20 + 1 stop)
b = number of blocks to assign (triplet most permissive = 48, quadruplet
most permissive = 192)
n = l_aa^b + Sum_i^(l_aa-1) [(-1)^i * binomial(l_aa, i) * (l_aa - i)^b]
Parameters
----------
int l_aa: the number of amino acids + Stop to encode
int b: the number of blocks in the codon table
Returns
-------
int n: the number of possible tables
str num: n, represented in scientific notation as a string
"""
# calculate n
n = l_aa ** b + sum(
(-1) ** i * binomial(l_aa, i) * (l_aa - i) ** b
for i in range(1, l_aa)
)
# handle string processing
mag = -1
temp_n = n
while temp_n > 0:
# increment mag for each order of magnitude
temp_n = temp_n // 10
mag += 1
# create string representing n in scientific notation
str_n = str(n)[:3]
num = '{0}.{1}E{2}'.format(str_n[0], str_n[1:], mag)
return n, num
def silencicity(table):
"""A function used to calculate the silencicity of a codon table.
Silencicity is a lab defined metric calculating the fraction of all
mutations that are synonymous out of all possible ones.
Parameters
----------
dict table: a python dict representing the codon table to analyze
Returns
-------
float silencicity: a float representing the silencicity metric
"""
# initialize counter and get mutation pairs
syn_mut = 0
mut_pairs = get_mut_pairs(table)
total_mut = len(mut_pairs)
# loop over mutation pairs and increment for synonymous mutations
for (c1, c2) in mut_pairs:
if table[c1] == table[c2]:
syn_mut += 1
# return fraction of synonymous mutations
return syn_mut / total_mut
def mutability(table):
"""A function used to calculate the average chemical variability
of single point mutations in a given genetic code. For each
nonsynonymous single point mutation, it calculates the chemical
distance between the previously encoded amino acid and its replacement
after mutation. The mean of these values is then returned.
Parameters
----------
dict table: a python dict representing the codon table to analyze
Returns
-------
float mut: a float representing the silencicity metric
"""
# initialize counter and running metric, and get mutation pairs
nonsyn_mut = 0
metric = 0
mut_pairs = get_mut_pairs(table)
# get Kyte-Doolittle hydropathy metric
kd = kdHydrophobicity
# loop over mutation pairs
for (c1, c2) in mut_pairs:
# increment counter and metric if nonsynonymous
if not (table[c1] == table[c2]):
# increment counter
nonsyn_mut += 1
# increment metric
aa1 = table[c1]
aa2 = table[c2]
metric += abs(kd[aa1] - kd[aa2])
# if there are no nonsynonymous mutations, return 0
if nonsyn_mut == 0:
mut = 0
# else, return the average dKD per mutation
else:
mut = metric / nonsyn_mut
return mut
def promiscuity(table, allow_ambiguous=False):
"""A function used to generate the genetic code resulting from
considering tRNA promiscuity. Uses Crick Wobble Hypothesis. Raises an
exception if the table generated is ambiguous (more than one signal
acceptable for a given codon)
Parameters
----------
- dict table: the codon table to promsicuitize
- bool allow_ambiguous: flag telling code whether to accept ambiguity
Returns
-------
dict promsicuous: the resulting table when considering tRNA promiscuity
"""
# handle type errors for input table
if not isinstance(table, dict):
raise TypeError("Input table is a dict or dict-like")
# declare table to return
promiscuous = {}
for codon in triplet_rna_codons:
promiscuous[codon] = '0'
# loop over codons to reassign
for codon, AA in table.items():
# skip assignments to STOP
if AA == '0':
continue
# get codons that would be decoded in reality
wobble = rna_wobbling[rna_basepairing[codon[-1]]]
codons = [codon[:2] + nt3 for nt3 in wobble]
# determine if there is ambiguity
acceptable = [AA, '0']
for c in codons:
if promiscuous[c] not in acceptable:
# raise error if allow_ambiguous = False
if not allow_ambiguous:
raise ValueError(
'input code generates ambiguous code '
'upon promiscuization'
)
else:
# else, package all nonstop codons as tuple
AAs = tuple(
[aa for aa in promiscuous[c] if aa != '0'] +
[AA]
)
promiscuous[c] = AAs
# otherwise, package as simple str --> str mapping
else:
promiscuous[c] = AA
return promiscuous
def mut_pair_num(table):
"""
A function that calculates the number of pairs of codons one
mutation away from each other. Treats mutations with directionality. In
general, the number of mutational pairs is equal to the number of
codons in a table multiplied by the number of unique codons within one
mutation. Let a = alphabet length (generally 4), L = codon length
(generally 3)
n = (a^L) * L(a-1)
Parameters
----------
dict table: the codon table to analyze
Returns
-------
int mut_num: the number of distinct mutational pairs.
"""
# get list of all codons in table
codon_list = list(table)
# get alphabet size
alphabet = set()
for codon in codon_list:
for nt in codon:
alphabet.add(nt)
a = len(alphabet)
# get codon length
L = len(codon_list[0])
# calculate mut_num and return
return (a ** L) * L * (a - 1)
def get_mut_pairs(table):
"""
A function used to generate the set of all pairs of codons one
mutation away given a codon table.
Parameters
----------
dict table: the codon table to analyze
Returns
-------
set<(str, str)> mut_pairs: a set of distinct mutational pairs.
"""
# declare set of mutational pairs
mut_pairs = set()
# get list of codons and iterate over them
codon_list = list(table)
for codon in codon_list:
# iterate over each base in the codon
for i, base in enumerate(codon):
for nt in rNTPs:
# handle if nt is the same as base
if nt == base:
continue
# if not, generate new codon
c_new = codon[:i] + nt + codon[i + 1:]
# add to set
mut_pairs.add((codon, c_new))
return mut_pairs
def order_NTPs(sortable, nucleic_acid='RNA'):
"""A function used to sort iterables by standard order of NTPs.
For RNA, U-C-A-G. For DNA, T-C-A-G. Returns sorted object.
Parameters
----------
- iterable sortable: the object to sort
- str nucleic_acid: the type of nucleic acid considered
Returns
-------
iterable sorted_obj: the sorted object
"""
# define ordering dictionary
orderdict = {
'RNA': ['U', 'C', 'A', 'G'],
'DNA': ['T', 'C', 'A', 'G']
}
# raise error if nucleic_acid flag invalid
if nucleic_acid.upper() not in orderdict:
raise ValueError(
'nucleic_acid flag set to invalid option (use DNA or RNA)')
# attempt sorting
try:
order = orderdict[nucleic_acid.upper()]
sorted_obj = sorted(
sortable, key=lambda word: [order.index(nt) for nt in word]
)
except ValueError:
print('Variable to sort broke the code :/')
# raise error
sorted_obj = False
return sorted_obj
```
#### File: synbio/codes/wrappers.py
```python
from synbio.codes import Code
__all__ = [
"codesavvy"
]
def codesavvy(func):
def wrapper(*args, **kwargs):
# handle code input
code = kwargs.get("code", None)
if code is None:
kwargs["code"] = Code()
elif isinstance(code, dict):
kwargs["code"] = Code(code)
else:
raise TypeError("code must be a dict or dict-like obj")
return func(*args, **kwargs)
return wrapper
```
#### File: synbio/tests/test_annotations.py
```python
from synbio.annotations import *
from synbio.polymers import DNA
class TestLocation:
##################
# Test locations #
##################
# a: ----------
# b: ----
# c: ----
# d: ----------
# e: ----------
a = Location(0, 10)
b = Location(4, 7)
c = Location(0, 4)
d = Location(10, 20)
e = Location(5, 15)
def test_eq(self):
w = Location(0, 4, "FWD")
x = Location(0, 4, "FWD")
y = Location(0, 4, "REV")
z = Location(3, 7, "FWD")
assert x == x
assert w == x
assert x != y
assert x != z
def test_contains(self):
a = self.a
b = self.b
c = self.c
d = self.d
e = self.e
# testing contains on self should return True
assert Location.contains(a, a)
# Truly contained should return True ...
assert Location.contains(a, b)
# ... even with the same start index
assert Location.contains(a, c)
# ... but not vice versa (order matters!!)
assert not Location.contains(b, a)
assert not Location.contains(c, a)
# Overlaped, but not contained, should return False
assert not Location.contains(a, e)
assert not Location.contains(e, a)
# No overlap at all should return False
assert not Location.contains(a, d)
assert not Location.contains(d, a)
def test_overlaps(self):
a = self.a
b = self.b
c = self.c
d = self.d
e = self.e
# same location should overlap
assert Location.overlaps(a, a) == True
# sequential locations should not overlap
assert not Location.overlaps(a, d)
assert not Location.overlaps(d, a)
# overlapping locations should return True
assert Location.overlaps(a, e)
assert Location.overlaps(e, a)
# contained location should return True ...
assert Location.overlaps(a, b)
assert Location.overlaps(b, a)
# ... even with the same start index
assert Location.overlaps(a, c)
assert Location.overlaps(c, a)
def test_find_overlaps(self):
locations = [self.a, self.b, self.c, self.d, self.e]
assert Location.find_overlaps(locations) == [
[1, 1, 1, 0, 1],
[1, 1, 0, 0, 1],
[1, 0, 1, 0, 0],
[0, 0, 0, 1, 1],
[1, 1, 0, 1, 1],
]
def test_to_slice(self):
loc = self.b
assert loc.to_slice() == slice(4, 7, 1)
class TestPart:
def test_eq(self):
dna = DNA("ATCGAATTCCGG")
part1 = Part(seq=dna, name="same name", location=Location(2, 8, "FWD"))
part2 = Part(seq=dna, name="same name", location=Location(2, 8, "FWD"))
part3 = Part(seq=dna, name="diff name", location=Location(2, 8, "FWD"))
part4 = Part(seq=dna, name="diff name", location=Location(4, 10, "REV"))
assert part1 == part1
assert part1 == part2
assert part2 == part1
assert part1 != part3
assert part3 != part1
assert part1 != part4
assert part4 != part1
def test_slice(self):
dna = DNA("ATCGAATTCCGG")
part1 = Part(seq=dna, name="part1", location=Location(2, 8, "FWD"))
part2 = Part(seq=dna, name="part2", location=Location(4, 10, "REV"))
assert part1[2:4].seq == DNA("AA")
assert part1[2:4] == Part(seq=dna, name="part1_subset",
location=Location(4, 6))
assert part1[2:4] == part1[Location(2, 4, "FWD")]
assert part2[2:5].seq == DNA("GAA")
assert part2[2:5] == Part(seq=dna, name="part2_subset",
location=Location(6, 9, "REV"))
assert part2[2:5] == part2[Location(2, 5, "REV")]
def test_DNA_integration(self):
dna = DNA("ATCGAATTCCGG")
part1 = Part(seq=dna, location=Location(0, 4))
part2 = Part(seq=dna, location=Location(4, 8))
part3 = Part(seq=dna, location=Location(8, 12))
part4 = Part(seq=dna, location=Location(2, 10))
# test indexing
assert part1.seq == "ATCG"
assert part2.seq == "AATT"
assert part3.seq == "CCGG"
assert part4.seq == "CGAATTCC"
# test reassignment from Part
part2.seq = "AT"
assert part1.seq == "ATCG"
assert part1.location == Location(0, 4)
assert part2.seq == "AT"
assert part2.location == Location(4, 6)
assert part3.seq == "CCGG"
assert part3.location == Location(6, 10)
assert part4.seq == "CGATCC"
assert part4.location == Location(2, 8)
part2.seq = "AATT"
assert part1.seq == "ATCG"
assert part1.location == Location(0, 4)
assert part2.seq == "AATT"
assert part2.location == Location(4, 8)
assert part3.seq == "CCGG"
assert part3.location == Location(8, 12)
assert part4.seq == "CGAATTCC"
assert part4.location == Location(2, 10)
def test_circular_seq(self):
dna = ("AAAAATTTTTCCCCCGGGGG")
part = Part(seq=dna,
location=[Location(15, 20), Location(0, 5)]) # GGGGGAAAAA
assert part.seq == "GGGGGAAAAA"
if __name__ == '__main__':
TestPart().test_DNA_integration()
```
|
{
"source": "jecampagne/jax_cosmo",
"score": 2
}
|
#### File: jax_cosmo/jax_cosmo/angular_cl.py
```python
from functools import partial
import jax.numpy as np
from jax import jit
from jax import lax
from jax import vmap
import jax_cosmo.background as bkgrd
import jax_cosmo.constants as const
import jax_cosmo.power as power
import jax_cosmo.transfer as tklib
from jax_cosmo.scipy.integrate import Quadrature
from jax_cosmo.scipy.integrate import simps
from jax_cosmo.utils import a2z
from jax_cosmo.utils import z2a
# JEC 30-June-2021
def _get_cl_ordering(probes):
"""
Utility function to get the indices for Cls from a list of probes
"""
n_tracers = sum([p.n_tracers for p in probes])
# Define an ordering for the blocks of the signal vector
cl_index = []
for i in range(n_tracers):
for j in range(i, n_tracers):
cl_index.append((i, j))
return cl_index
def _get_cov_blocks_ordering(probes):
"""
Utility function to get the ordering of the covariance matrix blocks
"""
cl_index = _get_cl_ordering(probes)
def find_index(a, b):
if (a, b) in cl_index:
return cl_index.index((a, b))
else:
return cl_index.index((b, a))
cov_blocks = []
for (i, j) in cl_index:
for (m, n) in cl_index:
cov_blocks.append(
(find_index(i, m), find_index(j, n), find_index(i, n), find_index(j, m))
)
return cov_blocks
# New code by JEC 30-June-2021
def angular_cl(
cosmo,
ell,
probes,
quadInt=None,
transfer_fn=tklib.Eisenstein_Hu,
nonlinear_fn=power.halofit,
):
"""
Computes angular Cls for the provided probes
All using the Limber approximation
Returns
-------
cls: [ell, ncls]
"""
# Retrieve the maximum redshift probed
zmax = max([p.zmax for p in probes])
# We define a function that computes a single l, and vectorize it
@partial(vmap, out_axes=1)
def cl(ell):
def integrand(a):
# Step 1: retrieve the associated comoving distance
chi = bkgrd.radial_comoving_distance(cosmo, a)
# Step 2: get the power spectrum for this combination of chi and a
k = (ell + 0.5) / np.clip(chi, 1.0)
# pk should have shape [na]
pk = power.nonlinear_matter_power(cosmo, k, a, transfer_fn, nonlinear_fn)
# Compute the kernels for all probes
kernels = np.vstack([p.kernel(cosmo, a2z(a), ell) for p in probes])
# Define an ordering for the blocks of the signal vector
cl_index = np.array(_get_cl_ordering(probes))
# Compute all combinations of tracers
def combine_kernels(inds):
return kernels[inds[0]] * kernels[inds[1]]
# Now kernels has shape [ncls, na]
kernels = lax.map(combine_kernels, cl_index)
result = pk * kernels * bkgrd.dchioverda(cosmo, a) / np.clip(chi ** 2, 1.0)
# We transpose the result just to make sure that na is first
return result.T
if quadInt != None:
return quadInt.computeIntegral(integrand, [z2a(zmax), 1.0]) / const.c ** 2
else:
return simps(integrand, z2a(zmax), 1.0, 512) / const.c ** 2
return cl(ell)
def noise_cl(ell, probes):
"""
Computes noise contributions to auto-spectra
"""
n_ell = len(ell)
# Concatenate noise power for each tracer
noise = np.concatenate([p.noise() for p in probes])
# Define an ordering for the blocks of the signal vector
cl_index = np.array(_get_cl_ordering(probes))
# Only include a noise contribution for the auto-spectra
def get_noise_cl(inds):
i, j = inds
delta = 1.0 - np.clip(np.abs(i - j), 0.0, 1.0)
return noise[i] * delta * np.ones(n_ell)
return lax.map(get_noise_cl, cl_index)
def gaussian_cl_covariance(ell, probes, cl_signal, cl_noise, f_sky=0.25, sparse=True):
"""
Computes a Gaussian covariance for the angular cls of the provided probes
Set sparse True to return a sparse matrix representation that uses a factor
of n_ell less memory and is compatible with the linear algebra operations
in :mod:`jax_cosmo.sparse`.
return_cls: (returns covariance)
"""
ell = np.atleast_1d(ell)
n_ell = len(ell)
one = 1.0 if sparse else np.eye(n_ell)
# Adding noise to auto-spectra
cl_obs = cl_signal + cl_noise
n_cls = cl_obs.shape[0]
# Normalization of covariance
norm = (2 * ell + 1) * np.gradient(ell) * f_sky
# Retrieve ordering for blocks of the covariance matrix
cov_blocks = np.array(_get_cov_blocks_ordering(probes))
def get_cov_block(inds):
a, b, c, d = inds
cov = (cl_obs[a] * cl_obs[b] + cl_obs[c] * cl_obs[d]) / norm
return cov * one
# Return a sparse representation of the matrix containing only the diagonals
# for each of the n_cls x n_cls blocks of size n_ell x n_ell.
# We could compress this further using the symmetry of the blocks, but
# it is easier to invert this matrix with this redundancy included.
cov_mat = lax.map(get_cov_block, cov_blocks)
# Reshape covariance matrix into proper matrix
if sparse:
cov_mat = cov_mat.reshape((n_cls, n_cls, n_ell))
else:
cov_mat = cov_mat.reshape((n_cls, n_cls, n_ell, n_ell))
cov_mat = cov_mat.transpose((0, 2, 1, 3)).reshape(
(n_ell * n_cls, n_ell * n_cls)
)
return cov_mat
def gaussian_cl_covariance_and_mean(
cosmo,
ell,
probes,
transfer_fn=tklib.Eisenstein_Hu,
nonlinear_fn=power.halofit,
f_sky=0.25,
sparse=False,
):
"""
Computes a Gaussian covariance for the angular cls of the provided probes
Set sparse True to return a sparse matrix representation that uses a factor
of n_ell less memory and is compatible with the linear algebra operations
in :mod:`jax_cosmo.sparse`.
return_cls: (returns signal + noise cl, covariance)
"""
ell = np.atleast_1d(ell)
n_ell = len(ell)
# Compute signal vectors
cl_signal = angular_cl(
cosmo, ell, probes, transfer_fn=transfer_fn, nonlinear_fn=nonlinear_fn
)
cl_noise = noise_cl(ell, probes)
# retrieve the covariance
cov_mat = gaussian_cl_covariance(ell, probes, cl_signal, cl_noise, f_sky, sparse)
return cl_signal.flatten(), cov_mat
```
|
{
"source": "jecampagne/N5K",
"score": 3
}
|
#### File: python/testwrap/3cint.py
```python
from math import sin,cos,pi
import Angpow
class FuncType0(Angpow.ClassFunc1D_get_value):
def get_value(self,a_x):
return a_x * (a_x-1.)*(a_x-1.)
import math
class FuncType1(Angpow.ClassFunc1D_get_value):
m_ell = 0
m_R = 0
def __init__(self,a_ell,a_R):
Angpow.ClassFunc1D_get_value.__init__(self)
self.m_ell = a_ell
self.m_R = a_R
def get_value(self,a_x):
return math.cos(a_x*self.m_R - self.m_ell*math.pi*0.5 - math.pi*0.25)
############
def pow(x,a):
return x**a
##############
def truth_test0(l,R1,R2,a,b):
M_PI = pi
res = 0
if R1 != R2:
res = ((-3*(-2 + pow(a,2.)*pow(R1 - R2,2.))*cos(a*(R1 - R2)))/pow(R1 - R2,4.) -
cos(a*(R1 - R2))/pow(R1 - R2,2.) + (4*a*cos(a*(R1 - R2)))/pow(R1 - R2,2.) +
(3*(-2 + pow(b,2.)*pow(R1 - R2,2.))*cos(b*(R1 - R2)))/pow(R1 - R2,4.) +
cos(b*(R1 - R2))/pow(R1 - R2,2.) - (4*b*cos(b*(R1 - R2)))/pow(R1 - R2,2.) -
(a*(-6 + pow(a,2.)*pow(R1 - R2,2.))*sin(a*(R1 - R2)))/pow(R1 - R2,3.) +
(2*(-2 + pow(a,2.)*pow(R1 - R2,2.))*sin(a*(R1 - R2)))/pow(R1 - R2,3.) -
(a*sin(a*(R1 - R2)))/(R1 - R2) +
(b*(-6 + pow(b,2.)*pow(R1 - R2,2.))*sin(b*(R1 - R2)))/pow(R1 - R2,3.) -
(2*(-2 + pow(b,2.)*pow(R1 - R2,2.))*sin(b*(R1 - R2)))/pow(R1 - R2,3.) +
(b*sin(b*(R1 - R2)))/(R1 - R2) +
(a*(R1 + R2)*cos(l*M_PI - a*(R1 + R2)) + sin(l*M_PI - a*(R1 + R2)))/pow(R1 + R2,2.) -
(2*((-2 + pow(a,2.)*pow(R1 + R2,2.))*cos(l*M_PI - a*(R1 + R2)) +
2*a*(R1 + R2)*sin(l*M_PI - a*(R1 + R2))))/pow(R1 + R2,3.) +
(a*(R1 + R2)*(-6 + pow(a,2.)*pow(R1 + R2,2.))*cos(l*M_PI - a*(R1 + R2)) +
3*(-2 + pow(a,2.)*pow(R1 + R2,2.))*sin(l*M_PI - a*(R1 + R2)))/pow(R1 + R2,4.) -
(b*(R1 + R2)*cos(l*M_PI - b*(R1 + R2)) + sin(l*M_PI - b*(R1 + R2)))/pow(R1 + R2,2.) +
(2*((-2 + pow(b,2.)*pow(R1 + R2,2.))*cos(l*M_PI - b*(R1 + R2)) +
2*b*(R1 + R2)*sin(l*M_PI - b*(R1 + R2))))/pow(R1 + R2,3.) -
(b*(R1 + R2)*(-6 + pow(b,2.)*pow(R1 + R2,2.))*cos(l*M_PI - b*(R1 + R2)) +
3*(-2 + pow(b,2.)*pow(R1 + R2,2.))*sin(l*M_PI - b*(R1 + R2)))/pow(R1 + R2,4.))/2.;
else:
res = (-2*pow(a,2.)*(6 - 8*a + 3*pow(a,2.))*pow(R1,4.) +
2*pow(b,2.)*(6 - 8*b + 3*pow(b,2.))*pow(R1,4.) +
6*R1*(2 - 4*pow(a,2.)*pow(R1,2.) + 2*pow(a,3.)*pow(R1,2.) +
a*(-3 + 2*pow(R1,2.)))*cos(l*M_PI - 2*a*R1) -
6*R1*(2 - 4*pow(b,2.)*pow(R1,2.) + 2*pow(b,3.)*pow(R1,2.) +
b*(-3 + 2*pow(R1,2.)))*cos(l*M_PI - 2*b*R1) +
3*(-3 + (2 - 8*a + 6*pow(a,2.))*pow(R1,2.))*sin(l*M_PI - 2*a*R1) -
3*(-3 + (2 - 8*b + 6*pow(b,2.))*pow(R1,2.))*sin(l*M_PI - 2*b*R1))/(48.*pow(R1,4.));
return res
#########
def test0():
ell = 20
R1 = 2000.
R2 = 2200.
chebyshev_order_1 = 8
chebyshev_order_2 = chebyshev_order_1
n_sub_intervals = 5
print("ell=",ell," , Nintervales=",n_sub_intervals)
# k-integral bounds
kMin = 0.
kMax = 1.0 #Mpc^(-1)
klp = Angpow.std_vector_double(n_sub_intervals+1)
dK = kMax-kMin
for i in range(0,n_sub_intervals+1):
klp[i] = kMin + dK * i/n_sub_intervals
f1 = FuncType1(ell,R1)
f2 = FuncType1(ell,R2)
f0 = FuncType0()
iOrd0 = 2
iOrd1 = chebyshev_order_1
iOrd2 = chebyshev_order_2
farr = Angpow.std_vector_CheFunc()
che_fun_0 = Angpow.CheFunc(f1, iOrd1)
farr.push_back(che_fun_0)
che_fun_1 = Angpow.CheFunc(f2, iOrd2)
farr.push_back(che_fun_1)
che_fun_2 = Angpow.CheFunc(f0, iOrd0)
farr.push_back(che_fun_2)
# Initialisation of the Clenshow-Curtis quadrature
cheAlgo = Angpow.CheAlgo(farr)
# Integration
integral = 0.
for p in range(1,n_sub_intervals+1):
# get the bounds
lowBound = klp[p-1]
uppBound = klp[p]
if lowBound > uppBound:
print('KIntegrator::Compute uppBound < lowBound Fatal')
return
# Loop on each function to compute their Foward Chebyshev coefficents
for i in range(0,farr.size()):
farr[i].ChebyshevTransform(lowBound, uppBound)
# Compute the sampling of all the functions in the final space dimension
cheAlgo.InverseChebyshevTransform()
# Compute the integral thanks to CC quadrature and the function sampling
integral += (uppBound - lowBound) * cheAlgo.ComputeIntegralUnscaled()
print("Approx. Integ = ",integral)
trueInt = truth_test0(ell,R1,R2,kMin,kMax)
diff = trueInt - integral
print(f"True Integ ={trueInt}, diff = {diff}")
if __name__ == '__main__':
test0()
```
|
{
"source": "JecaTatu/Django-FAI",
"score": 2
}
|
#### File: rediti/users/models.py
```python
from .manager import UserManager
from common.models import IndexedTimeStampedModel
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from django.db import models
class User(AbstractBaseUser, PermissionsMixin, IndexedTimeStampedModel):
email = models.EmailField(unique=True)
username = models.CharField(max_length=30)
description = models.TextField(blank=True)
karma = models.IntegerField(default=0)
avatar = models.ImageField(blank=True, default='avatar.jpg')
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
def __str__(self):
return self.username
```
|
{
"source": "JecaTatu/nina-teste",
"score": 3
}
|
#### File: JecaTatu/nina-teste/s3_athena_scrypt.py
```python
import boto3
import pandas as pd
from boto3 import client
from pyathena import connect
from pyathena.util import as_pandas
#Creating Bucket
region='us-east-1'
s3_client = boto3.client('s3', region_name=region)
bucket_name = 'teste-nina'
location = {'LocationConstraint': region}
s3_client.create_bucket(Bucket=bucket_name)
s3 = boto3.resource('s3')
bucket = s3.Bucket('teste-nina')
account_id = boto3.client('sts').get_caller_identity().get('Account')
#Getting the csv to use as a dataset
csv_url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/06-18-2020.csv'
df = pd.read_csv(csv_url)
#Removing columns not so useful for now
df = df.loc[:, ['Province_State', 'Country_Region', 'Confirmed', 'Deaths', 'Recovered', 'Active', 'Incidence_Rate', 'Case-Fatality_Ratio']]
#Filtering to just show the Brazil's numbers
df = df.query('Country_Region.str.contains("Brazil")')
#Setting the types for each column
df.loc[:, 'Confirmed'] = df['Confirmed'].astype(int)
df.loc[:, 'Deaths'] = df['Deaths'].astype(int)
df.loc[:, 'Recovered'] = df['Recovered'].astype(int)
df.loc[:, 'Active'] = df['Active'].astype(int)
df.loc[:, 'Province_State'] = df['Province_State'].astype(str).str.lower()
df.loc[:, 'Country_Region'] = df['Country_Region'].astype(str).str.lower()
df.loc[:, 'Incidence_Rate'] = df['Incidence_Rate'].astype(float)
df.loc[:, 'Case-Fatality_Ratio'] = df['Case-Fatality_Ratio'].astype(float)
df.to_csv('covid-brasil-data.csv', index=False, header=False)
#Uploading the csv to s3
bucket.upload_file('./covid-brasil-data.csv', 'covid-brasil-report.csv')
#Starting to make the csv queryable
glue_client = boto3.client('glue')
#Creating the database
database_name = 'nina-teste'
glue_client.create_database(CatalogId=account_id, DatabaseInput={'Name': database_name, 'Description': 'Database with covid informations'})
location_data = 's3://teste-nina/'
table_name = 'Covid Data'
#Creating the table for the dataset
response = glue_client.create_table(
CatalogId=account_id,
DatabaseName=database_name,
TableInput={
'Name': table_name,
'Description': 'Covid informations of Brazil',
'StorageDescriptor': {
'Columns': [
{
'Name': 'Province_State',
'Type': 'string',
'Comment': 'Name of the State'
},
{
'Name': 'Country_Region',
'Type': 'string',
'Comment': 'Name of the Country'
},
{
'Name': 'Confirmed',
'Type': 'int',
'Comment': 'Number of confirmed cases'
},
{
'Name': 'Deaths',
'Type': 'int',
'Comment': 'Number of deaths cases'
},
{
'Name': 'Recovered',
'Type': 'int',
'Comment': 'Number of recovered cases'
},
{
'Name': 'Active',
'Type': 'int',
'Comment': 'Number of active cases'
},
{
'Name': 'Incidence_Rate',
'Type': 'float',
'Comment': 'Rate of incidence'
},
{
'Name': 'Case-Fatality_Ratio',
'Type': 'float',
'Comment': 'Percentage of letal case'
},
],
'Location': location_data,
'InputFormat': 'org.apache.hadoop.mapred.TextInputFormat',
'OutputFormat': 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',
'SerdeInfo': {
'SerializationLibrary': 'org.apache.hadoop.hive.serde2.OpenCSVSerde',
'Parameters': {
'escapeChar': '\\',
'separatorChar': ',',
'serialization.format': '1'
}
},
},
'TableType': 'EXTERNAL_TABLE',
'Parameters': {
'classification': 'csv'
}
}
)
#Making a query
cursor = connect(region_name=region, s3_staging_dir='s3://teste-nina/').cursor()
cursor.execute('SELECT * FROM "nina-teste"."covid data" limit 10;'.format(database_name, table_name))
df_sql = as_pandas(cursor)
#Creating aggregation dataset fot the total Deaths, Total cases, Total of recovers and all that still active
df_agg = df.agg({'Confirmed': ['sum'], 'Deaths': ['sum'], 'Recovered': ['sum'], 'Active': ['sum']})
#Calculating the letality of covid in Brazil
def calc_letality(row):
return row['Deaths'] / row['Confirmed'] * 100
df_agg['Letality'] = df_agg.apply(calc_letality, axis=1)
df_agg.to_csv('covid-brasil-agg.csv', index=False, header=False)
#Creating a new bucket to upload the aggregation
bucket_name_agg = 'teste-nina-agg'
s3_client.create_bucket(Bucket=bucket_name_agg)
bucket_agg = s3.Bucket('teste-nina-agg')
#Uploading the csv to s3
bucket_agg.upload_file('./covid-brasil-agg.csv', 'covid-brasil-agg-report.csv')
table_name_agg = 'aggregation'
location_data_agg = 's3://teste-nina-agg/'
#Creating the new table for the aggregation
response = glue_client.create_table(
CatalogId=account_id,
DatabaseName=database_name,
TableInput={
'Name': table_name_agg,
'Description': 'Covid informations of Brazil',
'StorageDescriptor': {
'Columns': [
{
'Name': 'Confirmed',
'Type': 'int',
'Comment': 'Total number of confirmed cases in Brazil'
},
{
'Name': 'Deaths',
'Type': 'int',
'Comment': 'Total number of deaths cases in Brazil'
},
{
'Name': 'Recovered',
'Type': 'int',
'Comment': 'Total number of recovered cases in Brazil'
},
{
'Name': 'Active',
'Type': 'int',
'Comment': 'Total number of active cases in Brazill'
},
{
'Name': 'Letality',
'Type': 'float',
'Comment': 'Letality of covid in Brazil'
},
],
'Location': location_data_agg,
'InputFormat': 'org.apache.hadoop.mapred.TextInputFormat',
'OutputFormat': 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',
'SerdeInfo': {
'SerializationLibrary': 'org.apache.hadoop.hive.serde2.OpenCSVSerde',
'Parameters': {
'escapeChar': '\\',
'separatorChar': ',',
'serialization.format': '1'
}
},
},
'TableType': 'EXTERNAL_TABLE',
'Parameters': {
'classification': 'csv'
}
}
)
#Making a query in the aggregation
cursor = connect(region_name=region, s3_staging_dir='s3://teste-nina/').cursor()
cursor.execute('SELECT * FROM "nina-teste"."aggregation" limit 10;'.format(database_name, table_name_agg))
df_sql_agg = as_pandas(cursor)
```
|
{
"source": "jeccec51/SentimentAnalysis",
"score": 3
}
|
#### File: jeccec51/SentimentAnalysis/Sentiment.py
```python
from string import punctuation
from collections import Counter
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader
import torch.nn as nn
train_on_gpu = torch.cuda.is_available()
def load_test(path_reviews, path_labels):
with open(path_reviews, 'r') as f:
reviews = f.read()
with open(path_labels, 'r') as r:
labels = r.read()
return reviews, labels
def preprocess_text(text):
reviews = text.lower()
all_text = ''.join([c for c in reviews if c not in punctuation])
reviews_split = all_text.split('\n')
all_text = ''.join(reviews_split)
return all_text, reviews_split
def encode_text(text, split_text):
words_text = text.split()
counts = Counter(words_text)
vocab = sorted(counts, key=counts.get, reverse=True)
vocab_to_int = {word: ii for ii, word in enumerate(vocab, 1)}
reviews_ints = []
for review in split_text:
reviews_ints.append([vocab_to_int[word] for word in review.split()])
return reviews_ints, vocab_to_int
def encode_labels(labels_text):
labels_split = labels_text.split()
encoded_labels = np.array([1 if label == 'positive' else 0 for label in labels_split])
return encoded_labels
def outlier_removal(int_texts, encoded_labels):
lengths = Counter([len(x) for x in int_texts])
non_zero_indices = [ii for ii, review in enumerate(int_texts) if len(review) != 0]
processed_int_texts = [int_texts[ii] for ii in non_zero_indices]
processed_labels = np.array([encoded_labels[ii] for ii in non_zero_indices])
return processed_int_texts, processed_labels
def pad_features(int_texts, seq_lengths):
features = np.zeros((len(int_texts), seq_lengths), dtype=int)
for i, row in enumerate(int_texts):
features[i, -len(row):] = np.array(row)[:seq_lengths]
return features
def generate_data_loaders(train_data, train_label, valid_data, valid_label, test_data, test_label):
train_data_loader = TensorDataset(torch.from_numpy(train_data), torch.from_numpy(train_label))
valid_data_loader = TensorDataset(torch.from_numpy(valid_data), torch.from_numpy(valid_label))
test_data_loader = TensorDataset(torch.from_numpy(test_data), torch.from_numpy(test_label))
return train_data_loader, valid_data_loader, test_data_loader
class SentimentRNN(nn.Module):
def __init__(self, vocab_size, output_size, embed_dim, hidden_dim, n_layers, drop_prob = 0.5):
super(SentimentRNN, self).__init__()
self.output_size = output_size
self.n_layers = n_layers
self.hidden_dim = hidden_dim
self.embedding = nn.Embedding(vocab_size, embed_dim)
self.lstm = nn.LSTM(embed_dim, hidden_dim, n_layers, dropout=drop_prob, batch_first=True)
self.dropout = nn.Dropout(0.3)
self.fc = nn.Linear(hidden_dim, output_size)
self.sig = nn.Sigmoid()
def forward(self, x, hidden):
batch_size = x.size(0)
embeds = self.embedding(x)
lstm_out, hidden = self.lstm(embeds, hidden)
lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)
out = self.dropout(lstm_out)
out = self.fc(out)
sig_out = self.sig(out)
sig_out = sig_out.view(batch_size, -1)
sig_out = sig_out[:, -1]
return sig_out, hidden
def init_hidden(self, batch_size):
weight = next(self.parameters()).data
if train_on_gpu:
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda())
else:
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_())
return hidden
```
|
{
"source": "je-c/CryptoClassifier",
"score": 3
}
|
#### File: lib/functionality/model.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
targetLabels = []
predsMade = []
def accuracy(outputs, labels):
"""
Calculate model accuracy
* :param outputs(torch.tensor): Pytorch weights tensor
* :param labels(list(str)): List of known labels
:return (torch.tensor): Prediction accuracy
"""
_, preds = torch.max(outputs, dim=1)
return torch.tensor(torch.sum(preds == labels).item() / len(preds))
class BaseClassifier(nn.Module):
"""
BaseClassifier Class
--------------------
Extension of nn.module. Adds epoch step through methods for loss calculation by batch
and prediction output storage
"""
def calculate_loss(self, batch):
"""
Perform a training step in the model.
* :param batch(torch.DataLoader): Pytorch dataloader containing dataset
:return loss(torch.tensor): Loss tensor
"""
images, labels = batch
out = self(images) # Generate predictions
loss = F.cross_entropy(out, labels) # Calculate loss
return loss
def validate(self, batch):
"""
Perform a validation step in the model.
* :param batch(torch.DataLoader): Pytorch dataloader containing dataset
:return (dict): Representation of loss and accuracy for the validation step
"""
images, labels = batch
out = self(images)
for i, j in zip(labels, out):
targetLabels.append(i)
predsMade.append(torch.argmax(j))
loss = F.cross_entropy(out, labels)
acc = accuracy(out, labels)
return {
'val_loss': loss.detach(),
'val_acc': acc
}
def validate_epoch(self, outputs):
"""
Accumulates validation steps completed in a single epoch.
* :param outputs(dict): Dictionary of loss and accuracy statistics for each validation step
:return (dict): Validation results
"""
return {
'val_loss': torch.stack([i['val_loss'] for i in outputs]).mean().item(),
'val_acc': torch.stack([i['val_acc'] for i in outputs]).mean().item()
}
def epoch_wrapup(self, epoch, results):
"""
Outputs epoch statistics
* :param epoch(int): Epoch #
* :param results(dict): Dictionary of statistics for the epoch
:return (NoneType): None
"""
print(f"Epoch [{epoch}]")
print(f" - last_lr: {results['lrs'][-1]:.8f}")
print(f" - train_loss: {results['train_loss']:.4f}")
print(f" - val_loss: {results['val_loss']:.4f}")
print(f" - val_acc: {results['val_acc']:.4f}")
def conv_block(in_channels, out_channels, pool=False):
"""
Convolution layer structure. Convolves incoming image, performs batch normalisation and applys ReLU.
Optionally supports layer pooling
* :param in_channels(int): Expected number of incoming channels
* :param out_channels(int): Output number channel
* :param pool(bool): Invokes layer pooling
:return (torch.tensor): Linearised convolution layer
"""
layers = [
nn.ConvTranspose2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace = True)
]
if pool: layers.append(nn.MaxPool2d(3))
return nn.Sequential(*layers)
class ResNet9(BaseClassifier):
"""
ResNet9 Class
-------------
Extends BaseClassifier with Residual Neural Net structures. Handles weights forward
pass and structures the network.
"""
def __init__(self, in_channels, num_classes):
super().__init__()
self.conv1 = conv_block(in_channels, 64)
self.conv2 = conv_block(64, 128)
self.res1 = nn.Sequential(
conv_block(128, 128),
conv_block(128, 128)
)
self.conv3 = conv_block(128, 256)
self.conv4 = conv_block(256, 512, pool = True)
self.res2 = nn.Sequential(
conv_block(512, 512),
conv_block(512, 512)
)
self.classifier = nn.Sequential(
nn.MaxPool2d(3),
nn.Flatten(),
nn.Linear(512, num_classes)
)
def forward(self, X):
"""
Convolution forward pass. Convolves image through network, computes residual layers and final
classifier
* :param X(torch.tensor): Incoming data
:return out(torch.tensor): Convolved network
"""
out = self.conv1(X)
out = self.conv2(out)
out = self.res1(out) + out
out = self.conv3(out)
out = self.conv4(out)
out = self.res2(out) + out
out = self.classifier(out)
return out
@torch.no_grad()
def evaluate(model, validation_dl):
"""
Evaluate model at epoch
* :param model(torch.nn.module): ResNet model
* :param validation_dl(torch.dataloader): Validation data
:return (dict): Validation results for model at current epoch
"""
model.eval()
return model.validate_epoch([model.validate(batch) for batch in validation_dl])
def get_lr(optimizer):
"""
Return current learning rate from optimiser function
* :param optimiser(torch.optim): Optimiser function
:return (float): Learning rate at current epoch
"""
for param_group in optimizer.param_groups:
return param_group['lr']
def fit(params, model, training_dl, validation_dl, optimiser_f=torch.optim.SGD):
"""
Main training function. Compiles and trains the model on given data. Handles
learning rate sheduling and validation statistics storage.
* :param params(dict): Model parameters
* :param model(torch.nn.module): Model
* :param training_dl(torch.dataloader): Training data
* :param validation_dl(torch.dataloader): Validation data
* :param optimiser_f(torch.optim): Optimiser function
:return history(list): Statistics for each epoch training/validation step
"""
epochs, max_lr, weight_decay, grad_clip = [params[key] for key in params]
torch.cuda.empty_cache()
history = []
optimiser = optimiser_f(
model.parameters(),
max_lr,
weight_decay=weight_decay
)
sched = torch.optim.lr_scheduler.OneCycleLR(
optimiser, max_lr,
epochs=epochs,
steps_per_epoch=len(training_dl)
)
for epoch in range(epochs):
model.train()
train_losses = []
lrs = []
for batch in training_dl:
loss = model.calculate_loss(batch)
train_losses.append(loss)
loss.backward()
if grad_clip: nn.utils.clip_grad_value_(model.parameters(), grad_clip)
optimiser.step()
optimiser.zero_grad()
lrs.append(get_lr(optimiser))
sched.step()
# Validate epoch
result = evaluate(model, validation_dl)
result['train_loss'] = torch.stack(train_losses).mean().item()
result['lrs'] = lrs
model.epoch_wrapup(epoch, result)
history.append(result)
return history
```
#### File: lib/storage/sql.py
```python
import psycopg2
import psycopg2.extras
from sqlalchemy import create_engine
import json
import pandas as pd
class SQLTools:
"""
SQLTools Class
--------------
Wrapper for SQL and databasing tools. Contains functions for querying non-local and local databases,
flexible schema mapping and data storage.
"""
@staticmethod
def pgquery(conn, sqlcmd, args=None, msg=False, returntype='tuple'):
"""
Utility function for packaging SQL statements from python.
* :param conn(psycopg2.connection): Connection port to SQL database
* :param sqlcmd(str): SQL query
* :param args(dict): Optional arguements for SQL-side
* :param msg(str): Return message from server
* :param returntype(str): Demarkation of expected query return type
:return returnval(str): Return message
"""
returnval = None
with conn:
cursortype = None if returntype != 'dict' else psycopg2.extras.RealDictCursor
with conn.cursor(cursor_factory=cursortype) as cur:
try:
if args is None:
cur.execute(sqlcmd)
else:
cur.execute(sqlcmd, args)
if (cur.description != None ):
returnval = cur.fetchall()
if msg != False:
print("success: " + msg)
except psycopg2.DatabaseError as e:
if e.pgcode != None:
if msg: print("db read error: "+msg)
print(e)
except Exception as e:
print(e)
return returnval
@staticmethod
def pgconnect(credential_filepath):
"""
Connection terminal from python to SQL server
* :param credential_filepath(str): Filepath to credentials.json
:return conn(psycopg2.connection): Connection port to SQL server
"""
try:
with open(credential_filepath) as f:
db_conn_dict = json.load(f)
conn = psycopg2.connect(**db_conn_dict)
print('Connection successful')
except Exception as e:
print("Connection unsuccessful... Try again")
print(e)
return None
return conn
@staticmethod
def sqlInject(data, insert_stmt, conn):
"""
Pipeline for uploading data to SQL server
* :param data(pd.DataFrame): Data to upload
* :param insert_stmt(str): SQL query
* :param conn(psycopg2.connection): Connection port to SQL server
:return (NoneType): None
"""
count = 0
if isinstance(data, list):
for df in data:
print(f'Element {count} data injection commenced')
for _, area in df.iterrows():
SQLTools.pgquery(conn, insert_stmt, args=area, msg="inserted ")
count += 1
else:
for _, area in data.iterrows():
SQLTools.pgquery(conn, insert_stmt, args=area, msg="inserted ")
@staticmethod
def feed_schema(data):
"""
Flexible schema mapping for incoming data. Allows non-fixed axis 1 dimensions
* :param data(pd.DataFrame): Data to upload
:return (str): Schema segment to splice into table creation schema
"""
schema_segment = [
f'{col} NUMERIC,\n' if col != data.columns()[-1] else f'{col} NUMERIC' for col in data.columns()
]
return ''.join(schema_segment)
@staticmethod
def parse_and_upload(credfilepath, data, start=False):
"""
Parses data from local directories to relational database.
* :param credfilepath(json): Database credentials
* :param data(pd.DataFrame): Data to upload
* :param start(boolean): arg to determine if needed
:return (NoneType): None
"""
if start:
conn = SQLTools.pgconnect(credfilepath)
insert_stmt = f"""
INSERT INTO classifier VALUES ( {', '.join([col for col in data.columns()])} )
"""
SQLTools.pgquery(conn, "DROP TABLE IF EXISTS classifier CASCADE", msg="cleared old table")
groupbuy_schema = f"""
CREATE TABLE classifier(
date DATETIME PRIMARY KEY,
{SQLTools.feed_schema(data)}
);
"""
SQLTools.pgquery(conn, groupbuy_schema, msg="created groupbuy table")
SQLTools.sqlInject(data, insert_stmt, conn)
conn.close
```
|
{
"source": "jecel0911/ImageLocalFinder",
"score": 2
}
|
#### File: ImageLocalFinder/image_finder/models.py
```python
from django.db import models
import time
import random
from functools import partial
def _update_filename(instance, filename, path):
return path + str(time.time()) + '_' + str(random.randint(0,1000)) + '_' + filename
def upload_to(path):
return partial(_update_filename, path=path)
# Create your models here.
class ImageHeader(models.Model):
description = models.TextField()
date_added = models.DateTimeField()
resolved = models.BooleanField(default=False)
def __unicode__(self):
return self.description
def __str__(self):
return self.__unicode__()
class ImageDetail(models.Model):
image_header = models.ForeignKey(ImageHeader)
image = models.FileField(upload_to=upload_to('ImageLocalFinder/static/attachments/'))
resolved = models.BooleanField()
def __unicode__(self):
return 'Image ' + str(self.id)
def __str__(self):
return self.__unicode__()
```
|
{
"source": "jecepeda/advent-code-2019",
"score": 4
}
|
#### File: advent-code-2019/day-03/main.py
```python
from typing import Dict, List, Tuple
class StepOperation:
def __init__(self, op, steps):
self.op = op
self.steps = steps
def __repr__(self):
return "op: {} steps: {}".format(self.op, self.steps)
def get_op(elem):
return StepOperation(op=elem[0], steps=int(elem[1:]))
def traverse_path(path: List[StepOperation]):
result: Dict[Tuple[int, int], bool] = {}
result_steps: Dict[Tuple[int, int], int] = {}
x, y, steps = 0, 0, 0
for step in path:
if step.op == "R":
partial = {(x_inc, y): True for x_inc in range(x, x + step.steps + 1)}
partial_steps = {
(x_inc, y): steps + (x_inc - x)
for x_inc in range(x, x + step.steps + 1)
}
result_steps.update(partial_steps)
result.update(partial)
x += step.steps
elif step.op == "L":
partial = {(x_inc, y): True for x_inc in range(x, x - step.steps - 1, -1)}
partial_steps = {
(x_inc, y): steps + (x - x_inc)
for x_inc in range(x, x - step.steps - 1, -1)
}
result.update(partial)
result_steps.update(partial_steps)
x -= step.steps
elif step.op == "U":
partial = {(x, y_inc): True for y_inc in range(y, y + step.steps + 1)}
partial_steps = {
(x, y_inc): steps + (y_inc - y)
for y_inc in range(y, y + step.steps + 1)
}
result.update(partial)
result_steps.update(partial_steps)
y += step.steps
elif step.op == "D":
partial = {(x, y_inc): True for y_inc in range(y, y - step.steps - 1, -1)}
partial_steps = {
(x, y_inc): steps + (y - y_inc)
for y_inc in range(y, y - step.steps - 1, -1)
}
result.update(partial)
result_steps.update(partial_steps)
y -= step.steps
steps += step.steps
return result, result_steps
def get_manhattan_distances(elems):
return [abs(elem[0]) + abs(elem[1]) for elem in elems]
def get_intersection_steps(steps_1, steps_2, matches):
return [steps_1[elem] + steps_2[elem] for elem in matches]
if __name__ == "__main__":
first_path = None
second_path = None
with open("input.txt") as f:
first_path = [get_op(elem) for elem in f.readline().split(",")]
second_path = [get_op(elem) for elem in f.readline().split(",")]
positions_1, steps_1 = traverse_path(first_path)
positions_2, steps_2 = traverse_path(second_path)
matches = set(positions_1.keys()) & set(positions_2.keys())
manhattan_distances = get_manhattan_distances(matches)
lowest_steps = get_intersection_steps(steps_1, steps_2, matches)
print(sorted(manhattan_distances)[1])
print(sorted(lowest_steps)[1])
```
|
{
"source": "jechague/transform",
"score": 2
}
|
#### File: tensorflow_transform/coders/example_proto_coder_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pickle
import sys
# GOOGLE-INITIALIZATION
from absl import flags
# Note that this needs to happen before any non-python imports, so we do it
# pretty early on.
if any(arg == '--proto_implementation_type=python' for arg in sys.argv):
os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'python'
elif any(arg == '--proto_implementation_type=cpp' for arg in sys.argv):
os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'cpp'
elif any(arg.startswith('--proto_implementation_type') for arg in sys.argv):
raise ValueError('Unexpected value for --proto_implementation_type')
# pylint: disable=g-import-not-at-top
import numpy as np
import tensorflow as tf
from tensorflow_transform.coders import example_proto_coder
from tensorflow_transform import test_case
from tensorflow_transform.coders import example_proto_coder_test_cases
from tensorflow_transform.tf_metadata import schema_utils
from google.protobuf.internal import api_implementation
from google.protobuf import text_format
# pylint: enable=g-import-not-at-top
flags.DEFINE_string(
'proto_implementation_type', 'cpp',
'The implementation type of python proto to use when exercising this test')
def _ascii_to_example(ascii_proto):
return text_format.Merge(ascii_proto, tf.train.Example())
def _ascii_to_binary(ascii_proto):
return _ascii_to_example(ascii_proto).SerializeToString()
def _binary_to_example(serialized_proto):
return tf.train.Example.FromString(serialized_proto)
class ExampleProtoCoderTest(test_case.TransformTestCase):
def setUp(self):
super(ExampleProtoCoderTest, self).setUp()
# Verify that the implementation we requested via the Flag is honoured.
assert api_implementation.Type() == flags.FLAGS.proto_implementation_type
def assertSerializedProtosEqual(self, a, b):
np.testing.assert_equal(_binary_to_example(a), _binary_to_example(b))
@test_case.named_parameters(*(
example_proto_coder_test_cases.ENCODE_DECODE_CASES +
example_proto_coder_test_cases.DECODE_ONLY_CASES))
def test_decode(self, feature_spec, ascii_proto, instance, **kwargs):
schema = schema_utils.schema_from_feature_spec(feature_spec)
coder = example_proto_coder.ExampleProtoCoder(schema, **kwargs)
serialized_proto = _ascii_to_binary(ascii_proto)
np.testing.assert_equal(coder.decode(serialized_proto), instance)
@test_case.named_parameters(*(
example_proto_coder_test_cases.ENCODE_DECODE_CASES +
example_proto_coder_test_cases.DECODE_ONLY_CASES))
def test_decode_non_serialized(self, feature_spec, ascii_proto, instance,
**kwargs):
schema = schema_utils.schema_from_feature_spec(feature_spec)
coder = example_proto_coder.ExampleProtoCoder(
schema, serialized=False, **kwargs)
proto = _ascii_to_example(ascii_proto)
np.testing.assert_equal(coder.decode(proto), instance)
@test_case.named_parameters(*(
example_proto_coder_test_cases.ENCODE_DECODE_CASES +
example_proto_coder_test_cases.ENCODE_ONLY_CASES))
def test_encode(self, feature_spec, ascii_proto, instance, **kwargs):
schema = schema_utils.schema_from_feature_spec(feature_spec)
coder = example_proto_coder.ExampleProtoCoder(schema, **kwargs)
serialized_proto = _ascii_to_binary(ascii_proto)
self.assertSerializedProtosEqual(coder.encode(instance), serialized_proto)
@test_case.named_parameters(*(
example_proto_coder_test_cases.ENCODE_DECODE_CASES +
example_proto_coder_test_cases.ENCODE_ONLY_CASES))
def test_encode_non_serialized(self, feature_spec, ascii_proto, instance,
**kwargs):
schema = schema_utils.schema_from_feature_spec(feature_spec)
coder = example_proto_coder.ExampleProtoCoder(
schema, serialized=False, **kwargs)
proto = _ascii_to_example(ascii_proto)
np.testing.assert_equal(coder.encode(instance), proto)
@test_case.named_parameters(
*example_proto_coder_test_cases.DECODE_ERROR_CASES)
def test_decode_error(self,
feature_spec,
ascii_proto,
error_msg,
error_type=ValueError,
**kwargs):
schema = schema_utils.schema_from_feature_spec(feature_spec)
coder = example_proto_coder.ExampleProtoCoder(schema, **kwargs)
serialized_proto = _ascii_to_binary(ascii_proto)
with self.assertRaisesRegexp(error_type, error_msg):
coder.decode(serialized_proto)
@test_case.named_parameters(
*example_proto_coder_test_cases.ENCODE_ERROR_CASES)
def test_encode_error(self,
feature_spec,
instance,
error_msg,
error_type=ValueError,
**kwargs):
schema = schema_utils.schema_from_feature_spec(feature_spec)
coder = example_proto_coder.ExampleProtoCoder(schema, **kwargs)
with self.assertRaisesRegexp(error_type, error_msg):
coder.encode(instance)
def test_example_proto_coder_picklable(self):
schema = schema_utils.schema_from_feature_spec(
example_proto_coder_test_cases.FEATURE_SPEC)
coder = example_proto_coder.ExampleProtoCoder(schema)
ascii_proto = """
features {
feature { key: "scalar_feature_1" value { int64_list { value: [ 12 ] } } }
feature { key: "varlen_feature_1"
value { float_list { value: [ 89.0 ] } } }
feature { key: "scalar_feature_2" value { int64_list { value: [ 12 ] } } }
feature { key: "scalar_feature_3"
value { float_list { value: [ 2.0 ] } } }
feature { key: "1d_vector_feature"
value { bytes_list { value: [ 'this is a ,text' ] } } }
feature { key: "2d_vector_feature"
value { float_list { value: [ 1.0, 2.0, 3.0, 4.0 ] } } }
feature { key: "varlen_feature_2"
value { bytes_list { value: [ 'female' ] } } }
feature { key: "value" value { float_list { value: [ 12.0, 20.0 ] } } }
feature { key: "idx" value { int64_list { value: [ 1, 4 ] } } }
}
"""
instance = {
'scalar_feature_1': 12,
'scalar_feature_2': 12,
'scalar_feature_3': 2.0,
'varlen_feature_1': [89.0],
'1d_vector_feature': [b'this is a ,text'],
'2d_vector_feature': [[1.0, 2.0], [3.0, 4.0]],
'varlen_feature_2': [b'female'],
'idx': [1, 4],
'value': [12.0, 20.0],
}
serialized_proto = _ascii_to_binary(ascii_proto)
for _ in range(2):
coder = pickle.loads(pickle.dumps(coder))
np.testing.assert_equal(coder.decode(serialized_proto), instance)
self.assertSerializedProtosEqual(coder.encode(instance), serialized_proto)
def test_example_proto_coder_cache(self):
"""Test that the cache remains valid after reading/writing None."""
schema = schema_utils.schema_from_feature_spec({
'varlen': tf.io.VarLenFeature(tf.int64),
})
coder = example_proto_coder.ExampleProtoCoder(schema)
ascii_protos = [
'features {feature {key: "varlen" value {int64_list {value: [5] }}}}',
'features {feature {key: "varlen" value {}}}',
'features {feature {key: "varlen" value {int64_list {value: [6] }}}}',
]
instances = [{'varlen': [5]}, {'varlen': None}, {'varlen': [6]}]
serialized_protos = map(_ascii_to_binary, ascii_protos)
for instance, serialized_proto in zip(instances, serialized_protos):
np.testing.assert_equal(coder.decode(serialized_proto), instance)
self.assertSerializedProtosEqual(coder.encode(instance), serialized_proto)
if __name__ == '__main__':
test_case.main()
```
#### File: transform/tensorflow_transform/schema_inference_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# GOOGLE-INITIALIZATION
import tensorflow as tf
from tensorflow_transform import analyzers
from tensorflow_transform import common
from tensorflow_transform import mappers
from tensorflow_transform import schema_inference
from tensorflow_transform import test_case
from tensorflow_transform.tf_metadata import schema_utils_legacy
from tensorflow_transform.tf_metadata import schema_utils
from google.protobuf import text_format
import unittest
from tensorflow_metadata.proto.v0 import schema_pb2
if common.IS_ANNOTATIONS_PB_AVAILABLE:
from tensorflow_transform import annotations_pb2 # pylint: disable=g-import-not-at-top
def _make_tensors_with_override():
x = tf.compat.v1.placeholder(tf.int64, (None,))
schema_inference.set_tensor_schema_override(x, tf.constant(5), tf.constant(6))
return {'x': x}
class SchemaInferenceTest(test_case.TransformTestCase):
# pylint: disable=g-long-lambda
@test_case.named_parameters(
dict(
testcase_name='fixed_len_int',
make_tensors_fn=lambda:
{'x': tf.compat.v1.placeholder(tf.int64, (None,))},
feature_spec={'x': tf.io.FixedLenFeature([], tf.int64)}),
dict(
testcase_name='fixed_len_string',
make_tensors_fn=lambda:
{'x': tf.compat.v1.placeholder(tf.string, (None,))},
feature_spec={'x': tf.io.FixedLenFeature([], tf.string)}),
dict(
testcase_name='fixed_len_float',
make_tensors_fn=lambda:
{'x': tf.compat.v1.placeholder(tf.float32, (None,))},
feature_spec={'x': tf.io.FixedLenFeature([], tf.float32)}),
dict(
testcase_name='override',
make_tensors_fn=_make_tensors_with_override,
feature_spec={'x': tf.io.FixedLenFeature([], tf.int64)},
domains={'x': schema_pb2.IntDomain(is_categorical=True)}),
dict(
testcase_name='override_with_session',
make_tensors_fn=_make_tensors_with_override,
feature_spec={'x': tf.io.FixedLenFeature([], tf.int64)},
domains={
'x': schema_pb2.IntDomain(min=5, max=6, is_categorical=True)
},
create_session=True))
# pylint: enable=g-long-lambda
def test_infer_feature_schema(self,
make_tensors_fn,
feature_spec,
domains=None,
create_session=False):
with tf.compat.v1.Graph().as_default() as graph:
tensors = make_tensors_fn()
if create_session:
with tf.compat.v1.Session(graph=graph) as session:
schema = schema_inference.infer_feature_schema(tensors, graph, session)
else:
schema = schema_inference.infer_feature_schema(tensors, graph)
expected_schema = schema_utils.schema_from_feature_spec(
feature_spec, domains)
self.assertEqual(schema, expected_schema)
def test_infer_feature_schema_bad_rank(self):
with tf.compat.v1.Graph().as_default() as graph:
tensors = {
'a': tf.compat.v1.placeholder(tf.float32, ()),
}
with self.assertRaises(ValueError):
schema_inference.infer_feature_schema(tensors, graph)
@unittest.skipIf(not common.IS_ANNOTATIONS_PB_AVAILABLE,
'Schema annotations are not available')
def test_vocab_annotation(self):
with tf.compat.v1.Graph().as_default() as graph:
tensors = {
'foo': tf.convert_to_tensor([0, 1, 2, 3], dtype=tf.int64),
}
analyzers._maybe_annotate_vocab_metadata('file1',
tf.constant(100, dtype=tf.int64))
analyzers._maybe_annotate_vocab_metadata('file2',
tf.constant(200, dtype=tf.int64))
# Create a session to actually evaluate the annotations and extract the
# the output schema with annotations applied.
with tf.compat.v1.Session(graph=graph) as session:
schema = schema_inference.infer_feature_schema(tensors, graph, session)
self.assertLen(schema.annotation.extra_metadata, 2)
sizes = {}
for annotation in schema.annotation.extra_metadata:
message = annotations_pb2.VocabularyMetadata()
annotation.Unpack(message)
sizes[message.file_name] = message.unfiltered_vocabulary_size
self.assertDictEqual(sizes, {'file1': 100, 'file2': 200})
@unittest.skipIf(not common.IS_ANNOTATIONS_PB_AVAILABLE,
'Schema annotations are not available')
def test_bucketization_annotation(self):
with tf.compat.v1.Graph().as_default() as graph:
inputs = {
'foo': tf.convert_to_tensor([0, 1, 2, 3]),
'bar': tf.convert_to_tensor([0, 2, 0, 2]),
}
boundaries_foo = tf.expand_dims(tf.convert_to_tensor([.5, 1.5]), axis=0)
boundaries_bar = tf.expand_dims(tf.convert_to_tensor([.1, .2]), axis=0)
outputs = {}
# tft.apply_buckets will annotate the feature in the output schema to
# indicate the bucket boundaries that were applied.
outputs['Bucketized_foo'] = mappers.apply_buckets(inputs['foo'],
boundaries_foo)
outputs['Bucketized_bar'] = mappers.apply_buckets(inputs['bar'],
boundaries_bar)
# Create a session to actually evaluate the annotations and extract the
# the output schema with annotations applied.
with tf.compat.v1.Session(graph=graph) as session:
schema = schema_inference.infer_feature_schema(outputs, graph, session)
self.assertLen(schema.feature, 2)
for feature in schema.feature:
self.assertLen(feature.annotation.extra_metadata, 1)
for annotation in feature.annotation.extra_metadata:
# Extract the annotated message and validate its contents
message = annotations_pb2.BucketBoundaries()
annotation.Unpack(message)
if feature.name == 'Bucketized_foo':
self.assertAllClose(list(message.boundaries), [.5, 1.5])
elif feature.name == 'Bucketized_bar':
self.assertAllClose(list(message.boundaries), [.1, .2])
else:
raise RuntimeError('Unexpected features in schema')
@unittest.skipIf(not common.IS_ANNOTATIONS_PB_AVAILABLE,
'Schema annotations are not available')
def test_global_annotation(self):
# pylint: enable=g-import-not-at-top
with tf.compat.v1.Graph().as_default() as graph:
outputs = {
'foo': tf.convert_to_tensor([0, 1, 2, 3], dtype=tf.int64),
'bar': tf.convert_to_tensor([0, 2, 0, 2], dtype=tf.int64),
}
# Annotate an arbitrary proto at the schema level (not sure what global
# schema boundaries would mean, but hey I'm just a test).
boundaries = tf.constant([[1.0]])
message_type = annotations_pb2.BucketBoundaries.DESCRIPTOR.full_name
sizes = tf.expand_dims([tf.size(boundaries)], axis=0)
message_proto = tf.raw_ops.EncodeProto(
sizes=sizes, values=[tf.cast(boundaries, tf.float32)],
field_names=['boundaries'], message_type=message_type)[0]
type_url = os.path.join('type.googleapis.com', message_type)
schema_inference.annotate(type_url, message_proto)
with tf.compat.v1.Session(graph=graph) as session:
schema = schema_inference.infer_feature_schema(outputs, graph, session)
self.assertLen(schema.annotation.extra_metadata, 1)
for annotation in schema.annotation.extra_metadata:
# Extract the annotated message and validate its contents
message = annotations_pb2.BucketBoundaries()
annotation.Unpack(message)
self.assertAllClose(list(message.boundaries), [1])
def test_infer_feature_schema_with_ragged_tensor(self):
with tf.compat.v1.Graph().as_default() as graph:
outputs = {
'foo': tf.RaggedTensor.from_row_splits(
values=tf.constant([3, 1, 4, 1, 5, 9, 2, 6], tf.int64),
row_splits=[0, 4, 4, 7, 8, 8]),
}
with tf.compat.v1.Session(graph=graph) as session:
schema = schema_inference.infer_feature_schema(outputs, graph, session)
expected_schema_ascii = """feature {
name: "foo"
type: INT
annotation {
tag: "ragged_tensor"
}
}
"""
expected_schema = text_format.Parse(expected_schema_ascii,
schema_pb2.Schema())
schema_utils_legacy.set_generate_legacy_feature_spec(expected_schema,
False)
self.assertProtoEquals(expected_schema, schema)
with self.assertRaisesRegexp(ValueError,
'Feature "foo" had tag "ragged_tensor"'):
schema_utils.schema_as_feature_spec(schema)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jechav/linkCollector",
"score": 3
}
|
#### File: linkCollector/fn/help.py
```python
from bs4 import BeautifulSoup as Soup
from termcolor import colored
import requests
from var import servers as Servers
def getUrl(URL, ROOT, LENGUAGUE, SERVER):
tmpURLS = []
tt = _REQUEST(URL)
if not tt: return False
soup = Soup(tt.content, 'html.parser')
trs = soup.find('table', {'class': 'table-Hover'}).find_all('tr')
if LENGUAGUE == 'all':
return _deepGet(ROOT, trs)
for ind, t in enumerate(trs):
tds = t.find_all('td')
img = tds[1].find('img')
img2 = tds[2].find('img')
if img and img2:
if img.get('src') == LENGUAGUE and img2.get('src') == SERVER:
tmpURLS.append( ROOT+tds[2].find('a').get('href') )
for ind, url in enumerate(tmpURLS):
vUrl = _getUrl2(url)
print(colored(vUrl, _getColor( _checkUp(vUrl, SERVER) )))
if(len(tmpURLS) < 1): _deepGet(ROOT, trs) # call deepget if no result for lenguague and serv
def _deepGet(ROOT, trs):
print(colored('GOING DEEP !!!!!', 'cyan'))
flag = None
for ind, t in enumerate(trs):
tds = t.find_all('td')
img = tds[1].find('img')
img2 = tds[2].find('img')
if img and img2:
if flag <> img.get('src'):
flag = img.get('src')
print(colored(flag, 'cyan')) # print lenguague flag
url = ROOT+tds[2].find('a').get('href')
vUrl = _getUrl2(url)
print(colored(vUrl, _getColor( _checkUp(vUrl, img2.get('src')) )))
def _getUrl2(url):
s = url.split('/')
base = 'http://seriesblanco.com/ajax/load_enlace.php'
parms = '?serie={0}&temp={1}&cap={2}&id={3}'.format(s[4], s[5], s[6], s[7])
tt = _REQUEST(base+parms)
if not tt: return False
soup = Soup(tt.content, 'html.parser')
btn = soup.find('input', {'type': 'button'})
enlace = btn.get('onclick')
# print(enlace)
return enlace.split('"')[1]
def _checkUp(url, server):
# print(server)
if server not in {Servers.streamin, Servers.streamplay, Servers.openload}: return -1
tt = _REQUEST(url);
if not tt: return -1
soup = Soup(tt.content, 'html.parser')
if server == Servers.openload:
return soup.find('video') != None
if server in {Servers.streamin, Servers.streamplay}:
return soup.find("input", {"id": "btn_download"}) != None
def _getColor(v):
if v == -1: return 'white'
if v: return 'green'
return 'red'
def tree(URLTREE):
tt = _REQUEST(URLTREE)
if not tt: return False
soup = Soup(tt.content, 'html.parser')
seasons = soup.findAll('a', {'class': 'panel-title'})
print(colored('Temporadas {}'.format(len(seasons)), 'green'))
seriesnumber = URLTREE.split('/')[4]
for ind, s in enumerate(seasons):
links = _getChapters(seriesnumber, ind+1)
print(colored('Temporada {0} - {1}'.format(ind+1, len(links)), 'blue'))
def _getChapters(s, seasons):
base = 'http://seriesblanco.com/ajax/visto3.php'
parms = '?season_id={0}&season_number={1}'.format(s, seasons)
tt = _REQUEST(base+parms)
soup = Soup(tt.content, 'html.parser')
links = soup.findAll('tr', {'class': 'table-hover'})
return links
def _REQUEST(url):
try:
return requests.get(url)
except requests.exceptions.Timeout:
print(colored('Timeout', 'red'))
except requests.exceptions.TooManyRedirects:
print(colored('TooManyRedirects', 'red'))
except requests.exceptions.RequestException as e:
print(colored('RequestException', 'red'))
return False;
from subprocess import PIPE, Popen
def saveHistory():
command = 'history | tail -1 | cut -c 8-'
print(colored(command, 'blue')); #DEBUG
p = Popen(command, shell=True, stdin=PIPE, stdout=PIPE)
out, err = p.communicate(); # wait to completation
print(out)
```
|
{
"source": "JE-Chen/APITestka",
"score": 2
}
|
#### File: je_api_testka/requests_wrapper/request_method.py
```python
import datetime
import sys
import requests
from je_api_testka.requests_wrapper.requests_http_method_wrapper import api_tester_method
from requests.structures import CaseInsensitiveDict
from je_api_testka.utils.exception.exceptions import APITesterGetDataException
from je_api_testka.utils.exception.exceptions import APITesterExecuteException
from je_api_testka.utils.get_data_strcture.get_api_data import get_api_response_data
from je_api_testka.utils.exception.exception_tag import get_data_error_message
from je_api_testka.utils.exception.exception_tag import get_error_message
from je_api_testka.utils.exception.exception_tag import put_error_message
from je_api_testka.utils.exception.exception_tag import delete_error_message
from je_api_testka.utils.exception.exception_tag import post_error_message
from je_api_testka.utils.exception.exception_tag import head_error_message
from je_api_testka.utils.exception.exception_tag import options_error_message
from je_api_testka.utils.exception.exception_tag import patch_error_message
from je_api_testka.utils.exception.exception_tag import session_error_message
from je_api_testka.utils.test_record.test_record_class import test_record_instance
from je_api_testka.utils.assert_result.result_check import check_result
exception_message_dict = {
"get": get_error_message,
"put": put_error_message,
"delete": delete_error_message,
"post": post_error_message,
"head": head_error_message,
"options": options_error_message,
"patch": patch_error_message,
"session_get": session_error_message,
"session_put": session_error_message,
"session_patch": session_error_message,
"session_post": session_error_message,
"session_head": session_error_message,
"session_delete": session_error_message,
"session_options": session_error_message,
}
def get_response(response: requests.Response,
start_time: [str, float, int],
end_time: [str, float, int]) -> dict:
"""
use requests response to create data dict
:param response: requests response
:param start_time: test start time
:param end_time: test end time
:return: data dict include [status_code, text, content, headers, history, encoding, cookies,
elapsed, request_time_sec, request_method, request_url, request_body, start_time, end_time]
"""
try:
return get_api_response_data(response, start_time, end_time)
except APITesterGetDataException:
raise APITesterGetDataException(get_data_error_message)
def test_api_method(http_method: str, test_url: str,
soap: bool = False, record_request_info: bool = True,
clean_record: bool = False, result_check_dict: dict = None, **kwargs) \
-> (requests.Response, dict):
"""
set requests http_method url headers and record response and record report
:param http_method:
:param test_url:
:param soap:
:param record_request_info:
:param clean_record:
:param result_check_dict:
:param kwargs:
:return:
"""
try:
try:
start_time = datetime.datetime.now()
if soap is False:
response = api_tester_method(http_method, test_url=test_url, **kwargs)
else:
headers = CaseInsensitiveDict()
headers["Content-Type"] = "application/soap+xml"
return test_api_method(http_method, test_url=test_url, headers=headers, **kwargs)
end_time = datetime.datetime.now()
response_data = get_response(response, start_time, end_time)
if clean_record:
test_record_instance.clean_record()
if result_check_dict is None:
if record_request_info:
test_record_instance.test_record_list.append(response_data)
return {"response": response, "response_data": response_data}
else:
check_result(response_data, result_check_dict)
if record_request_info:
test_record_instance.test_record_list.append(response_data)
return {"response": response, "response_data": response_data}
except APITesterExecuteException as error:
raise repr(error)
except Exception as error:
print(repr(error), file=sys.stderr)
test_record_instance.error_record_list.append([
{
"http_method": http_method,
"test_url": test_url,
"soap": soap,
"record_request_info": record_request_info,
"clean_record": clean_record,
"result_check_dict": result_check_dict
},
repr(error)]
)
```
#### File: je_api_testka/requests_wrapper/requests_http_method_wrapper.py
```python
import sys
import requests
from requests import Session
from requests import delete
from requests import get
from requests import head
from requests import options
from requests import patch
from requests import post
from requests import put
from je_api_testka.utils.exception.exception_tag import http_method_have_wrong_type
from je_api_testka.utils.exception.exceptions import APITesterException
from je_api_testka.utils.exception.exception_tag import wrong_http_method_error_message
_session = Session()
http_method_dict = {
"get": get,
"put": put,
"patch": patch,
"post": post,
"head": head,
"delete": delete,
"options": options,
"session_get": _session.get,
"session_put": _session.put,
"session_patch": _session.patch,
"session_post": _session.post,
"session_head": _session.head,
"session_delete": _session.delete,
"session_options": _session.options,
}
def get_http_method(http_method: str) -> [
requests.get, requests.put, requests.patch, requests.post, requests.head, requests.delete,
Session.get, Session.put, Session.patch, Session.post, Session.head, Session.head, Session.options
]:
"""
:param http_method: what http method we use to api test
:return: one of method in http_method_dict if not exists will raise exception
"""
try:
if type(http_method) is not str:
raise APITesterException(wrong_http_method_error_message)
http_method = str(http_method).lower()
if http_method not in http_method_dict:
raise APITesterException(http_method_have_wrong_type)
return http_method_dict.get(http_method)
except APITesterException as error:
print(repr(error), file=sys.stderr)
def api_tester_method(http_method: str, test_url: str, **kwargs) -> requests.Response:
"""
:param http_method: what http method we use to api test
:param test_url: what url we want to test
:param kwargs: use to setting
:return: test response
"""
response = get_http_method(http_method)
if response is None:
raise APITesterException(wrong_http_method_error_message)
else:
response = response(test_url, **kwargs)
return response
```
#### File: utils/assert_result/result_check.py
```python
from je_api_testka.utils.exception.exceptions import APIAssertException
def check_result(result_dict: dict, result_check_dict: dict):
"""
:param result_dict: response result dict (get_api_response_data's return data)
:param result_check_dict: the dict include data name and value to check result_dict is valid or not
:return:
"""
for key, value in result_check_dict.items():
if result_dict.get(key) != value:
raise APIAssertException(
"value should be {right_value} but value was {wrong_value}".format(
right_value=value, wrong_value=result_dict.get(key)
)
)
```
#### File: utils/html_report/html_report_generate.py
```python
import sys
from je_api_testka.utils.test_record.test_record_class import test_record_instance
from je_api_testka.utils.exception.exceptions import HTMLException
from je_api_testka.utils.exception.exception_tag import html_generate_no_data_tag
from threading import Lock
lock = Lock()
_html_string_head = \
"""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8"/>
<title>Load Density Report</title>
<style>
body{
font-size: 100%;
}
h1{
font-size: 2em;
}
.main_table {
margin: 0 auto;
border-collapse: collapse;
width: 75%;
font-size: 1.5em;
}
.success_table_head {
border: 3px solid #262626;
background-color: aqua;
font-family: "Times New Roman", sans-serif;
text-align: center;
}
.failure_table_head {
border: 3px solid #262626;
background-color: #f84c5f;
font-family: "Times New Roman", sans-serif;
text-align: center;
}
.table_data_field_title {
border: 3px solid #262626;
padding: 0;
margin: 0;
background-color: #dedede;
font-family: "Times New Roman", sans-serif;
text-align: center;
width: 25%;
}
.table_data_field_text {
border: 3px solid #262626;
padding: 0;
margin: 0;
background-color: #dedede;
font-family: "Times New Roman", sans-serif;
text-align: left;
width: 75%;
}
.text {
text-align: center;
font-family: "Times New Roman", sans-serif;
}
</style>
</head>
<body>
<h1 class="text">
Test Report
</h1>
""".strip()
_html_string_bottom = \
"""
</body>
</html>
""".strip()
_success_table = \
r"""
<table class="main_table">
<thead>
<tr>
<th colspan="2" class="success_table_head">Test Report</th>
</tr>
</thead>
<tbody>
<tr>
<td class="table_data_field_title">status_code</td>
<td class="table_data_field_text">{status_code}</td>
</tr>
<tr>
<td class="table_data_field_title">text</td>
<td class="table_data_field_text">{text}</td>
</tr>
<tr>
<td class="table_data_field_title">content</td>
<td class="table_data_field_text">{content}</td>
</tr>
<tr>
<td class="table_data_field_title">headers</td>
<td class="table_data_field_text">{headers}</td>
</tr>
<tr>
<td class="table_data_field_title">history</td>
<td class="table_data_field_text">{history}</td>
</tr>
<tr>
<td class="table_data_field_title">encoding</td>
<td class="table_data_field_text">{encoding}</td>
</tr>
<tr>
<td class="table_data_field_title">cookies</td>
<td class="table_data_field_text">{cookies}</td>
</tr>
<tr>
<td class="table_data_field_title">elapsed</td>
<td class="table_data_field_text">{elapsed}</td>
</tr>
<tr>
<td class="table_data_field_title">request_time_sec</td>
<td class="table_data_field_text">{request_time_sec}</td>
</tr>
<tr>
<td class="table_data_field_title">request_method</td>
<td class="table_data_field_text">{request_method}</td>
</tr>
<tr>
<td class="table_data_field_title">request_url</td>
<td class="table_data_field_text">{request_url}</td>
</tr>
<tr>
<td class="table_data_field_title">request_body</td>
<td class="table_data_field_text">{request_body}</td>
</tr>
<tr>
<td class="table_data_field_title">start_time</td>
<td class="table_data_field_text">{start_time}</td>
</tr>
<tr>
<td class="table_data_field_title">end_time</td>
<td class="table_data_field_text">{end_time}</td>
</tr>
</tbody>
</table>
<br>
""".strip()
_failure_table = \
r"""
<table class="main_table">
<thead>
<tr>
<th colspan="2" class="failure_table_head">Test Report</th>
</tr>
</thead>
<tbody>
<tr>
<td class="table_data_field_title">http_method</td>
<td class="table_data_field_text">{http_method}</td>
</tr>
<tr>
<td class="table_data_field_title">test_url</td>
<td class="table_data_field_text">{test_url}</td>
</tr>
<tr>
<td class="table_data_field_title">soap</td>
<td class="table_data_field_text">{soap}</td>
</tr>
<tr>
<td class="table_data_field_title">record_request_info</td>
<td class="table_data_field_text">{record_request_info}</td>
</tr>
<tr>
<td class="table_data_field_title">clean_record</td>
<td class="table_data_field_text">{clean_record}</td>
</tr>
<tr>
<td class="table_data_field_title">result_check_dict</td>
<td class="table_data_field_text">{result_check_dict}</td>
</tr>
<tr>
<td class="table_data_field_title">error</td>
<td class="table_data_field_text">{error}</td>
</tr>
</tbody>
</table>
<br>
""".strip()
def generate_html(html_name: str = "default_name"):
"""
:param html_name: save html file name
:return: html_string
"""
if len(test_record_instance.test_record_list) == 0 and len(test_record_instance.error_record_list) == 0:
raise HTMLException(html_generate_no_data_tag)
else:
success_list = list()
for record_data in test_record_instance.test_record_list:
success_list.append(
_success_table.format(
status_code=record_data.get("status_code"),
text=record_data.get("text"),
content=str(record_data.get("content"), encoding="utf-8"),
headers=record_data.get("headers"),
history=record_data.get("history"),
encoding=record_data.get("encoding"),
cookies=record_data.get("cookies"),
elapsed=record_data.get("elapsed"),
request_time_sec=record_data.get("request_time_sec"),
request_method=record_data.get("request_method"),
request_url=record_data.get("request_url"),
request_body=record_data.get("request_body"),
start_time=record_data.get("start_time"),
end_time=record_data.get("end_time"),
)
)
failure_list = list()
if len(test_record_instance.error_record_list) == 0:
pass
else:
for record_data in test_record_instance.error_record_list:
failure_list.append(
_failure_table.format(
http_method=record_data[0].get("http_method"),
test_url=record_data[0].get("test_url"),
soap=record_data[0].get("soap"),
record_request_info=record_data[0].get("record_request_info"),
clean_record=record_data[0].get("clean_record"),
result_check_dict=record_data[0].get("result_check_dict"),
error=record_data[1]
),
)
try:
lock.acquire()
with open(html_name + ".html", "w+") as file_to_write:
file_to_write.writelines(
_html_string_head
)
for success in success_list:
file_to_write.write(success)
for failure in failure_list:
file_to_write.write(failure)
file_to_write.writelines(
_html_string_bottom
)
except Exception as error:
print(repr(error), file=sys.stderr)
finally:
lock.release()
return success_list, failure_list
```
|
{
"source": "JE-Chen/AutoControl",
"score": 2
}
|
#### File: linux_with_x11/listener/x11_linux_listener.py
```python
import sys
from je_auto_control.utils.exception.exceptions import AutoControlException
from je_auto_control.utils.exception.exception_tag import linux_import_error
from je_auto_control.utils.exception.exception_tag import listener_error
if sys.platform not in ["linux", "linux2"]:
raise AutoControlException(linux_import_error)
import Xlib.threaded
from Xlib.display import Display
from Xlib import X
from Xlib.ext import record
from Xlib.protocol import rq
from threading import Thread
# get current display
current_display = Display()
class KeypressHandler(Thread):
def __init__(self, default_daemon: bool = True):
"""
setDaemon : default damon is true
still listener : continue listener keycode ?
event_key_code : now current key code default is 0
"""
super().__init__()
self.setDaemon(default_daemon)
self.still_listener = True
self.record_flag = False
self.record_queue = None
self.event_keycode = 0
self.event_position = 0, 0
# two times because press and release
def check_is_press(self, keycode: int):
"""
:param keycode we want to check
"""
if keycode == self.event_keycode:
self.event_keycode = 0
return True
else:
return False
def run(self, reply):
"""
:param reply listener return data
get data
while data not null and still listener
get event
"""
try:
data = reply.data
while len(data) and self.still_listener:
event, data = rq.EventField(None).parse_binary_value(data, current_display.display, None, None)
if event.detail != 0:
if event.type is X.ButtonRelease or event.type is X.KeyRelease:
self.event_keycode = event.detail
self.event_position = event.root_x, event.root_y
if self.record_flag is True:
temp = (event.type, event.detail, event.root_x, event.root_y)
self.record_queue.put(temp)
except AutoControlException:
raise AutoControlException(listener_error)
def record(self, record_queue):
"""
:param record_queue the queue test_record action
"""
self.record_flag = True
self.record_queue = record_queue
def stop_record(self):
self.record_flag = False
return self.record_queue
class XWindowsKeypressListener(Thread):
def __init__(self, default_daemon=True):
"""
:param default_daemon default kill when program down
create handler
set root
"""
super().__init__()
self.setDaemon(default_daemon)
self.still_listener = True
self.handler = KeypressHandler()
self.root = current_display.screen().root
self.context = None
def check_is_press(self, keycode: int):
"""
:param keycode check this keycode is press?
"""
return self.handler.check_is_press(keycode)
def run(self):
"""
while still listener
get context
set handler
set test_record
get event
"""
if self.still_listener:
try:
# Monitor keypress and button press
if self.context is None:
self.context = current_display.record_create_context(
0,
[record.AllClients],
[{
'core_requests': (0, 0),
'core_replies': (0, 0),
'ext_requests': (0, 0, 0, 0),
'ext_replies': (0, 0, 0, 0),
'delivered_events': (0, 0),
'device_events': (X.KeyReleaseMask, X.ButtonReleaseMask),
'errors': (0, 0),
'client_started': False,
'client_died': False,
}])
current_display.record_enable_context(self.context, self.handler.run)
current_display.record_free_context(self.context)
# keep running this to get event
next_event = self.root.display.next_event()
except AutoControlException:
raise AutoControlException(listener_error)
finally:
self.handler.still_listener = False
self.still_listener = False
def record(self, record_queue):
self.handler.record(record_queue)
def stop_record(self):
return self.handler.stop_record()
xwindows_listener = XWindowsKeypressListener()
xwindows_listener.start()
def check_key_is_press(keycode: int):
"""
:param keycode check this keycode is press?
"""
return xwindows_listener.check_is_press(keycode)
def x11_linux_record(record_queue):
"""
:param record_queue the queue test_record action
"""
xwindows_listener.record(record_queue)
def x11_linux_stop_record():
"""
stop test_record action
"""
return xwindows_listener.stop_record()
if __name__ == "__main__":
from queue import Queue
test_queue = Queue()
xwindows_listener.record(test_queue)
while True:
pass
```
#### File: utils/executor/action_executor.py
```python
import sys
from je_auto_control import AutoControlActionNullException
from je_auto_control import check_key_is_press
from je_auto_control import click_mouse
from je_auto_control import hotkey
from je_auto_control import keys_table
from je_auto_control import locate_all_image
from je_auto_control import locate_and_click
from je_auto_control import locate_image_center
from je_auto_control import mouse_table
from je_auto_control import position
from je_auto_control import press_key
from je_auto_control import press_mouse
from je_auto_control import release_key
from je_auto_control import release_mouse
from je_auto_control import screenshot
from je_auto_control import scroll
from je_auto_control import set_position
from je_auto_control import size
from je_auto_control import special_table
from je_auto_control import type_key
from je_auto_control import write
from je_auto_control.utils.exception.exception_tag import action_is_null_error
from je_auto_control.utils.exception.exception_tag import cant_execute_action_error
from je_auto_control.utils.exception.exceptions import AutoControlActionException
from je_auto_control.utils.json.json_file import read_action_json
from je_auto_control.utils.test_record.record_test_class import record_total
event_dict = {
# mouse
"mouse_left": click_mouse,
"mouse_right": click_mouse,
"mouse_middle": click_mouse,
"click_mouse": click_mouse,
"mouse_table": mouse_table,
"position": position,
"press_mouse": press_mouse,
"release_mouse": release_mouse,
"scroll": scroll,
"set_position": set_position,
"special_table": special_table,
# keyboard
"keys_table": keys_table,
"type_key": type_key,
"press_key": press_key,
"release_key": release_key,
"check_key_is_press": check_key_is_press,
"write": write,
"hotkey": hotkey,
# image
"locate_all_image": locate_all_image,
"locate_image_center": locate_image_center,
"locate_and_click": locate_and_click,
# screen
"size": size,
"screenshot": screenshot
}
def execute_action(action_list: list):
"""
use to execute all action on action list(action file or program list)
:param action_list the list include action
for loop the list and execute action
"""
execute_record_string = ""
try:
if action_list is None:
raise AutoControlActionNullException(action_is_null_error)
for action in action_list:
event = event_dict.get(action[0])
if len(action) == 2:
event(**action[1])
elif len(action) == 1:
event()
else:
raise AutoControlActionException(cant_execute_action_error)
temp_string = "execute: " + str(action)
print(temp_string)
execute_record_string = "".join([execute_record_string, temp_string + "\n"])
except Exception as error:
record_total("execute_action", action_list, repr(error))
print(repr(error), file=sys.stderr)
return execute_record_string
def execute_files(execute_files_list: list):
"""
:param execute_files_list: list include execute files path
:return: every execute detail as list
"""
execute_detail_list = list()
for file in execute_files_list:
execute_detail_list.append(execute_action(read_action_json(file)))
return execute_detail_list
```
#### File: utils/json/json_file.py
```python
import json
from pathlib import Path
from threading import Lock
from je_auto_control.utils.exception.exceptions import AutoControlJsonActionException
from je_auto_control.utils.exception.exception_tag import cant_save_json_error
from je_auto_control.utils.exception.exception_tag import cant_find_json_error
lock = Lock()
def read_action_json(json_file_path: str):
"""
use to read action file
:param json_file_path json file's path to read
"""
try:
lock.acquire()
file_path = Path(json_file_path)
if file_path.exists() and file_path.is_file():
with open(json_file_path) as read_file:
return json.loads(read_file.read())
except AutoControlJsonActionException:
raise AutoControlJsonActionException(cant_find_json_error)
finally:
lock.release()
def write_action_json(json_save_path: str, action_json: list):
"""
use to save action file
:param json_save_path json save path
:param action_json the json str include action to write
"""
try:
lock.acquire()
with open(json_save_path, "w+") as file_to_write:
file_to_write.write(json.dumps(action_json))
except AutoControlJsonActionException:
raise AutoControlJsonActionException(cant_save_json_error)
finally:
lock.release()
```
#### File: je_auto_control/wrapper/auto_control_image.py
```python
import sys
from je_auto_control.utils.image import template_detection
from je_auto_control.utils.exception.exception_tag import cant_find_image
from je_auto_control.utils.exception.exception_tag import find_image_error_variable
from je_auto_control.utils.exception.exceptions import ImageNotFoundException
from je_auto_control.wrapper.auto_control_mouse import click_mouse
from je_auto_control.wrapper.auto_control_mouse import set_position
from je_auto_control.utils.image.screenshot import pil_screenshot
from je_auto_control.utils.test_record.record_test_class import record_total
def locate_all_image(image, detect_threshold: [float, int] = 1, draw_image: bool = False):
"""
use to locate all image that detected and then return detected images list
:param image which image we want to find on screen (png or PIL ImageGrab.grab())
:param detect_threshold detect precision 0.0 ~ 1.0; 1 is absolute equal (float or int)
:param draw_image draw detect tag on return image (bool)
"""
param = locals()
try:
try:
image_data_array = template_detection.find_image_multi(image, detect_threshold, draw_image)
except ImageNotFoundException as error:
raise ImageNotFoundException(find_image_error_variable + " " + repr(error))
if image_data_array[0] is True:
record_total("locate_all_image", param)
return image_data_array[1]
else:
raise ImageNotFoundException(cant_find_image)
except Exception as error:
record_total("locate_all_image", param, repr(error))
print(repr(error), file=sys.stderr)
def locate_image_center(image, detect_threshold: [float, int] = 1, draw_image: bool = False):
"""
use to locate image and return image center position
:param image which image we want to find on screen (png or PIL ImageGrab.grab())
:param detect_threshold detect precision 0.0 ~ 1.0; 1 is absolute equal (float or int)
:param draw_image draw detect tag on return image (bool)
"""
param = locals()
try:
try:
image_data_array = template_detection.find_image(image, detect_threshold, draw_image)
except ImageNotFoundException as error:
raise ImageNotFoundException(find_image_error_variable + " " + repr(error))
if image_data_array[0] is True:
height = image_data_array[1][2] - image_data_array[1][0]
width = image_data_array[1][3] - image_data_array[1][1]
center = [int(height / 2), int(width / 2)]
record_total("locate_image_center", param)
return [image_data_array[1][0] + center[0], image_data_array[1][1] + center[1]]
else:
raise ImageNotFoundException(cant_find_image)
except Exception as error:
record_total("locate_image_center", param, repr(error))
print(repr(error), file=sys.stderr)
def locate_and_click(image, mouse_keycode: [int, str], detect_threshold: [float, int] = 1, draw_image: bool = False):
"""
use to locate image and click image center position and the return image center position
:param image which image we want to find on screen (png or PIL ImageGrab.grab())
:param mouse_keycode which mouse keycode we want to click
:param detect_threshold detect precision 0.0 ~ 1.0; 1 is absolute equal (float or int)
:param draw_image draw detect tag on return image (bool)
"""
param = locals()
try:
try:
image_data_array = template_detection.find_image(image, detect_threshold, draw_image)
except ImageNotFoundException:
raise ImageNotFoundException(find_image_error_variable)
if image_data_array[0] is True:
height = image_data_array[1][2] - image_data_array[1][0]
width = image_data_array[1][3] - image_data_array[1][1]
center = [int(height / 2), int(width / 2)]
image_center_x = image_data_array[1][0] + center[0]
image_center_y = image_data_array[1][1] + center[1]
set_position(int(image_center_x), int(image_center_y))
click_mouse(mouse_keycode)
record_total("locate_and_click", param)
return [image_center_x, image_center_y]
else:
raise ImageNotFoundException(cant_find_image)
except Exception as error:
record_total("locate_and_click", param, repr(error))
print(repr(error), file=sys.stderr)
def screenshot(file_path: str = None, region: list = None):
"""
use to get now screen image return image
:param file_path save screenshot path (None is no save)
:param region screenshot region (screenshot region on screen)
"""
param = locals()
try:
record_total("screenshot", param)
return pil_screenshot(file_path, region)
except Exception as error:
print(repr(error), file=sys.stderr)
record_total("screenshot", param, repr(error))
```
#### File: je_auto_control/wrapper/auto_control_record.py
```python
import sys
from je_auto_control.utils.executor.action_executor import execute_action
from je_auto_control.utils.exception.exception_tag import macos_record_error
from je_auto_control.utils.exception.exceptions import AutoControlException
from je_auto_control.utils.exception.exceptions import AutoControlJsonActionException
from je_auto_control.wrapper.platform_wrapper import recorder
from je_auto_control.utils.test_record.record_test_class import record_total
def record():
"""
start record keyboard and mouse event until stop_record
"""
try:
if sys.platform == "darwin":
raise AutoControlException(macos_record_error)
record_total("record", None)
return recorder.record()
except Exception as error:
record_total("record", None, repr(error))
print(repr(error), file=sys.stderr)
def stop_record():
"""
stop current record
"""
try:
if sys.platform == "darwin":
raise AutoControlException(macos_record_error)
action_queue = recorder.stop_record()
if action_queue is None:
raise AutoControlJsonActionException
action_list = list(action_queue.queue)
new_list = list()
for action in action_list:
if action[0] == "type_key":
new_list.append([action[0], dict([["keycode", action[1]]])])
else:
new_list.append([action[0], dict(zip(["mouse_keycode", "x", "y"], [action[0], action[1], action[2]]))])
record_total("stop_record", None)
return new_list
except Exception as error:
record_total("stop_record", None, repr(error))
print(repr(error), file=sys.stderr)
```
|
{
"source": "JE-Chen/finalProject_nknySystem",
"score": 2
}
|
#### File: APIBlueprints/Grade/StudentGradeList.py
```python
import json
from flask import Blueprint, render_template, session, redirect, url_for
from flask_cors import cross_origin
from Project.Resource import RestfulAPIResource
SQL = RestfulAPIResource.SQL
StudentGradeList = Blueprint('StudentGradeList', __name__)
@StudentGradeList.route(r'/GET/StudentGradeList')
@cross_origin()
def student_grade_list_page():
if session.get('Login') == 'Login':
return render_template('/Grade/StudentGradeList.html')
else:
return redirect(url_for('Login.login_page'))
@StudentGradeList.route(r'/GET/StudentGradeList/PersonnelNumber', methods=['GET', ])
@cross_origin()
def manager_student_lesson_list():
SQL.table_name = 'LessonGrade'
SQL.select_prefix = '*'
Lists = SQL.select_where('PersonnelNumber', session.get('PersonnelNumber'))
print(Lists)
return json.dumps(Lists)
```
#### File: APIBlueprints/Lesson/LessonList.py
```python
import json
from flask import Blueprint, render_template, session, redirect, url_for, request
from flask_cors import cross_origin
from Project.Resource import RestfulAPIResource
SQL = RestfulAPIResource.SQL
LessonList = Blueprint('LessonList', __name__)
@LessonList.route(r'/GET/LessonList')
@cross_origin()
def lesson_list_page():
if session.get('Login') == 'Login':
return render_template('/Lesson/LessonList.html')
else:
return redirect(url_for('Login.login_page'))
@LessonList.route(r'/GET/LessonListContent', methods=['GET', ])
@cross_origin()
def lesson_list():
SQL.table_name = 'LessonContent'
SQL.select_prefix = '*'
Semester = request.args.get('Semester')
if Semester is None:
Semester = '109'
LessonContents = SQL.select_where('Semester', Semester)
print(LessonContents)
session['LessonContents'] = LessonContents
return render_template('/Lesson/LessonList.html')
@LessonList.route(r'/GET/LessonListContent/AJAX', methods=['GET', ])
@cross_origin()
def get_lesson_list():
data = session.get('LessonContents')
if data is None:
return redirect(url_for('LessonList.lesson_list_page'))
else:
return json.dumps(data)
```
#### File: JECryptography/Core/CryptographyCore.py
```python
import datetime
from JECryptography.Module.Decryption import Decryption
from JECryptography.Module.Encryption import Encryption
from JECryptography.Module.Hash import Hash
class CryptographyCore:
def __init__(self):
try:
self.Hash = Hash()
self.Encryption = Encryption()
self.Decryption = Decryption()
except Exception as error:
raise error
print(datetime.datetime.now(), self.__class__, 'Ready', sep=' ')
```
|
{
"source": "JE-Chen/je_editor",
"score": 2
}
|
#### File: ui_event/encoding/set_encoding.py
```python
def set_encoding(exec_manager, encoding):
"""
:param exec_manager: which program exec manage change encoding
:param encoding: which encoding choose
"""
exec_manager.program_encoding = encoding
```
#### File: open_file/open_last_edit_file/open_last_edit_file.py
```python
from je_editor.utils.file.open.open_file import read_file
def open_last_edit_file(file_from_output_content, code_editor):
"""
:param file_from_output_content: readied file from output content
:param code_editor the editor to insert file content
:return readied file
open last edit file
if success open file
insert file content to code_editor
"""
temp_to_check_file = read_file(file_from_output_content)
if temp_to_check_file is not None:
code_editor.delete("1.0", "end-1c")
code_editor.insert("end-1c", temp_to_check_file[1])
return temp_to_check_file[0]
```
#### File: ui_event/tag_keyword/tag_keyword.py
```python
from tkinter import IntVar
from je_editor.ui.ui_utils.keyword.keyword_list import keyword_list
from je_editor.ui.ui_utils.theme.theme import theme_dict
class HighlightText(object):
def __init__(self, tkinter_text, start_position="1.0", end_position="end"):
"""
:param tkinter_text: want to set highlight's tkinter text
:param start_position: search start position
:param end_position: search end position
"""
self.tkinter_text = tkinter_text
self.start_position = start_position
self.end_position = end_position
# theme dict on theme
self.theme = theme_dict
# use regexp
self.tkinter_text.regexp = True
# bind to keyboard key release
self.tkinter_text.bind("<KeyRelease>", self.search)
def search(self, event=None):
"""
:param event: tkinter event
create temp var tag
remove tag
search all word in keyword_list and tag
"""
tag = "temp"
for tag in self.tkinter_text.tag_names():
self.tkinter_text.tag_remove(tag, self.start_position, self.end_position)
count_var = IntVar()
for word in keyword_list:
position = '1.0'
self.tkinter_text.tag_config(word, foreground=self.theme.get("tag_keyword_color"))
while self.tkinter_text.compare(position, "<", "end"):
find_function_index = self.tkinter_text.search(
"\m" + word + "\M",
position, self.end_position,
count=count_var,
regexp=True
)
if not find_function_index:
break
position = '{}+{}c'.format(find_function_index, len(word))
self.tkinter_text.tag_add(tag, find_function_index, position)
```
#### File: text_process/shell/shell_text.py
```python
import subprocess
from je_editor.utils.exception.je_editor_exceptions import JEditorRunOnShellException
def run_on_shell(run_source):
"""
:param run_source: string command to run
:return: if error return result and True else return result and False
"""
try:
exec_result = subprocess.getoutput(run_source)
return exec_result, False
except Exception as error:
return str(error), True
```
#### File: ui_utils/editor_content/content_save.py
```python
import json
import os
from pathlib import Path
from threading import Lock
from je_editor.utils.exception.je_editor_exceptions import JEditorContentFileException
cwd = os.getcwd()
lock = Lock()
editor_data = {
"last_file": None
}
def read_output_content():
"""
read the editor content
"""
try:
lock.acquire()
file_path = Path(cwd + "/je_editor_content.json")
if file_path.exists() and file_path.is_file():
with open(cwd + "/je_editor_content.json", "r+") as read_file:
return read_file.read()
except JEditorContentFileException:
raise JEditorContentFileException
finally:
lock.release()
def write_output_content():
"""
write the editor content
"""
try:
lock.acquire()
with open(cwd + "/je_editor_content.json", "w+") as file_to_write:
file_to_write.write(json.dumps(editor_data))
except JEditorContentFileException:
raise JEditorContentFileException
finally:
lock.release()
def save_content_and_quit(file):
"""
set content data and write
"""
editor_data["last_file"] = file
write_output_content()
def open_content_and_start():
"""
read data and set content
"""
temp_content = read_output_content()
if temp_content is not None:
editor_data["last_file"] = json.loads(temp_content).get("last_file")
return editor_data.get("last_file")
```
#### File: ui_utils/font/font.py
```python
from tkinter import font
from tkinter.font import Font
def get_font(root, **kwargs):
"""
:param root: get current font families
:param kwargs: another param
"""
return font.families(root, **kwargs)
def create_new_font(font_family: str, font_size: int = 12, **kwargs):
"""
:param font_family: which font family choose to create new font
:param font_size: font size
:param kwargs: another param
"""
new_font = Font(font=(font_family, font_size), **kwargs)
return new_font
```
|
{
"source": "JE-Chen/je_old_repo",
"score": 3
}
|
#### File: je_old_repo/DesktopTool_JE/test.py
```python
import ctypes
import os
import time
import cv2
def change_bg(image_path):
SPI_SETDESKWALLPAPER = 20
ctypes.windll.user32.SystemParametersInfoW(SPI_SETDESKWALLPAPER, 0, image_path, 3)
def show_video_bg(video):
Cap = cv2.VideoCapture(video)
while True:
ret, frame = Cap.read()
if ret is True:
print(time.time())
cv2.imwrite("test.jpg", frame)
print(time.time())
change_bg(os.getcwd() + "\\test.jpg")
print(time.time())
else:
Cap.release()
cv2.destroyAllWindows()
show_video_bg("never gonna give you up.mp4")
```
#### File: APIBlueprints/Profile/Profile.py
```python
from flask import Blueprint, render_template, redirect, session, url_for
from flask_cors import cross_origin
Profile = Blueprint('Profile', __name__)
@Profile.route(r'/GET/Profile')
@cross_origin()
def profile_page():
if session.get('Login') == 'Login':
return render_template('/Profile/Profile.html')
else:
return redirect(url_for('Login.login_page'))
```
#### File: Tests/CryptographyTest/UnitTest.py
```python
import unittest
import JECryptography
class Cryptography(unittest.TestCase):
@staticmethod
def testCryptography():
Hash = JECryptography.Hash()
Hash.hash_md5("12345123451234512345")
Hash.hash_sha1("12345123451234512345")
Hash.hash_sha224("12345123451234512345")
Hash.hash_sha256("12345123451234512345")
Hash.hash_sha384("12345123451234512345")
Hash.hash_sha512("12345123451234512345")
if __name__ == '__main__':
suite = (unittest.TestLoader().loadTestsFromTestCase(Cryptography))
unittest.TextTestRunner(verbosity=2).run(suite)
```
#### File: Tests/WebSocketTest/ServerUnitTest.py
```python
import sys
import time
import unittest
import JEWebSocket
class TestServer(unittest.TestCase):
def tearDown(self) -> None:
pass
def setUp(self) -> None:
pass
def testServer(self):
websocket = JEWebSocket.WebsocketServer("localhost", 5555)
time.sleep(3)
```
#### File: werkzeug/debug/tbtools.py
```python
import codecs
import inspect
import json
import os
import re
import sys
import sysconfig
import traceback
from tokenize import TokenError
from .._compat import PY2
from .._compat import range_type
from .._compat import reraise
from .._compat import string_types
from .._compat import text_type
from .._compat import to_native
from .._compat import to_unicode
from ..filesystem import get_filesystem_encoding
from ..utils import cached_property
from ..utils import escape
from .console import Console
_coding_re = re.compile(br"coding[:=]\s*([-\w.]+)")
_line_re = re.compile(br"^(.*?)$", re.MULTILINE)
_funcdef_re = re.compile(r"^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)")
UTF8_COOKIE = b"\xef\xbb\xbf"
system_exceptions = (SystemExit, KeyboardInterrupt)
try:
system_exceptions += (GeneratorExit,)
except NameError:
pass
HEADER = u"""\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>%(title)s // Werkzeug Debugger</title>
<link rel="stylesheet" href="?__debugger__=yes&cmd=resource&f=style.css"
type="text/css">
<!-- We need to make sure this has a favicon so that the debugger does
not by accident trigger a request to /favicon.ico which might
change the application state. -->
<link rel="shortcut icon"
href="?__debugger__=yes&cmd=resource&f=console.png">
<script src="?__debugger__=yes&cmd=resource&f=jquery.js"></script>
<script src="?__debugger__=yes&cmd=resource&f=debugger.js"></script>
<script type="text/javascript">
var TRACEBACK = %(traceback_id)d,
CONSOLE_MODE = %(console)s,
EVALEX = %(evalex)s,
EVALEX_TRUSTED = %(evalex_trusted)s,
SECRET = "%(secret)s";
</script>
</head>
<body style="background-color: #fff">
<div class="debugger">
"""
FOOTER = u"""\
<div class="footer">
Brought to you by <strong class="arthur">DON'T PANIC</strong>, your
friendly Werkzeug powered traceback interpreter.
</div>
</div>
<div class="pin-prompt">
<div class="inner">
<h3>Console Locked</h3>
<p>
The console is locked and needs to be unlocked by entering the PIN.
You can find the PIN printed out on the standard output of your
shell that runs the server.
<form>
<p>PIN:
<input type=text name=pin size=14>
<input type=submit name=btn value="Confirm Pin">
</form>
</div>
</div>
</body>
</html>
"""
PAGE_HTML = (
HEADER
+ u"""\
<h1>%(exception_type)s</h1>
<div class="detail">
<p class="errormsg">%(exception)s</p>
</div>
<h2 class="traceback">Traceback <em>(most recent call last)</em></h2>
%(summary)s
<div class="plain">
<form action="/?__debugger__=yes&cmd=paste" method="post">
<p>
<input type="hidden" name="language" value="pytb">
This is the Copy/Paste friendly version of the traceback. <span
class="pastemessage">You can also paste this traceback into
a <a href="https://gist.github.com/">gist</a>:
<input type="submit" value="create paste"></span>
</p>
<textarea cols="50" rows="10" name="code" readonly>%(plaintext)s</textarea>
</form>
</div>
<div class="explanation">
The debugger caught an exception in your WSGI application. You can now
look at the traceback which led to the error. <span class="nojavascript">
If you enable JavaScript you can also use additional features such as code
execution (if the evalex feature is enabled), automatic pasting of the
exceptions and much more.</span>
</div>
"""
+ FOOTER
+ """
<!--
%(plaintext_cs)s
-->
"""
)
CONSOLE_HTML = (
HEADER
+ u"""\
<h1>Interactive Console</h1>
<div class="explanation">
In this console you can execute Python expressions in the context of the
application. The initial namespace was created by the debugger automatically.
</div>
<div class="console"><div class="inner">The Console requires JavaScript.</div></div>
"""
+ FOOTER
)
SUMMARY_HTML = u"""\
<div class="%(classes)s">
%(title)s
<ul>%(frames)s</ul>
%(description)s
</div>
"""
FRAME_HTML = u"""\
<div class="frame" id="frame-%(id)d">
<h4>File <cite class="filename">"%(filename)s"</cite>,
line <em class="line">%(lineno)s</em>,
in <code class="function">%(function_name)s</code></h4>
<div class="source %(library)s">%(lines)s</div>
</div>
"""
SOURCE_LINE_HTML = u"""\
<tr class="%(classes)s">
<td class=lineno>%(lineno)s</td>
<td>%(code)s</td>
</tr>
"""
def render_console_html(secret, evalex_trusted=True):
return CONSOLE_HTML % {
"evalex": "true",
"evalex_trusted": "true" if evalex_trusted else "false",
"console": "true",
"title": "Console",
"secret": secret,
"traceback_id": -1,
}
def get_current_traceback(
ignore_system_exceptions=False, show_hidden_frames=False, skip=0
):
"""Get the current exception info as `Traceback` object. Per default
calling this method will reraise system exceptions such as generator exit,
system exit or others. This behavior can be disabled by passing `False`
to the function as first parameter.
"""
exc_type, exc_value, tb = sys.exc_info()
if ignore_system_exceptions and exc_type in system_exceptions:
reraise(exc_type, exc_value, tb)
for _ in range_type(skip):
if tb.tb_next is None:
break
tb = tb.tb_next
tb = Traceback(exc_type, exc_value, tb)
if not show_hidden_frames:
tb.filter_hidden_frames()
return tb
class Line(object):
"""Helper for the source renderer."""
__slots__ = ("lineno", "code", "in_frame", "current")
def __init__(self, lineno, code):
self.lineno = lineno
self.code = code
self.in_frame = False
self.current = False
@property
def classes(self):
rv = ["line"]
if self.in_frame:
rv.append("in-frame")
if self.current:
rv.append("current")
return rv
def render(self):
return SOURCE_LINE_HTML % {
"classes": u" ".join(self.classes),
"lineno": self.lineno,
"code": escape(self.code),
}
class Traceback(object):
"""Wraps a traceback."""
def __init__(self, exc_type, exc_value, tb):
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
exception_type = exc_type.__name__
if exc_type.__module__ not in {"builtins", "__builtin__", "exceptions"}:
exception_type = exc_type.__module__ + "." + exception_type
self.exception_type = exception_type
self.groups = []
memo = set()
while True:
self.groups.append(Group(exc_type, exc_value, tb))
memo.add(id(exc_value))
if PY2:
break
exc_value = exc_value.__cause__ or exc_value.__context__
if exc_value is None or id(exc_value) in memo:
break
exc_type = type(exc_value)
tb = exc_value.__traceback__
self.groups.reverse()
self.frames = [frame for group in self.groups for frame in group.frames]
def filter_hidden_frames(self):
"""Remove the frames according to the paste spec."""
for group in self.groups:
group.filter_hidden_frames()
self.frames[:] = [frame for group in self.groups for frame in group.frames]
@property
def is_syntax_error(self):
"""Is it a syntax error?"""
return isinstance(self.exc_value, SyntaxError)
@property
def exception(self):
"""String representation of the final exception."""
return self.groups[-1].exception
def log(self, logfile=None):
"""Log the ASCII traceback into a file object."""
if logfile is None:
logfile = sys.stderr
tb = self.plaintext.rstrip() + u"\n"
logfile.write(to_native(tb, "utf-8", "replace"))
def paste(self):
"""Create a paste and return the paste id."""
data = json.dumps(
{
"description": "Werkzeug Internal Server Error",
"public": False,
"files": {"traceback.txt": {"content": self.plaintext}},
}
).encode("utf-8")
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
rv = urlopen("https://api.github.com/gists", data=data)
resp = json.loads(rv.read().decode("utf-8"))
rv.close()
return {"url": resp["html_url"], "id": resp["id"]}
def render_summary(self, include_title=True):
"""Render the traceback for the interactive console."""
title = ""
classes = ["traceback"]
if not self.frames:
classes.append("noframe-traceback")
frames = []
else:
library_frames = sum(frame.is_library for frame in self.frames)
mark_lib = 0 < library_frames < len(self.frames)
frames = [group.render(mark_lib=mark_lib) for group in self.groups]
if include_title:
if self.is_syntax_error:
title = u"Syntax Error"
else:
title = u"Traceback <em>(most recent call last)</em>:"
if self.is_syntax_error:
description_wrapper = u"<pre class=syntaxerror>%s</pre>"
else:
description_wrapper = u"<blockquote>%s</blockquote>"
return SUMMARY_HTML % {
"classes": u" ".join(classes),
"title": u"<h3>%s</h3>" % title if title else u"",
"frames": u"\n".join(frames),
"description": description_wrapper % escape(self.exception),
}
def render_full(self, evalex=False, secret=None, evalex_trusted=True):
"""Render the Full HTML page with the traceback info."""
exc = escape(self.exception)
return PAGE_HTML % {
"evalex": "true" if evalex else "false",
"evalex_trusted": "true" if evalex_trusted else "false",
"console": "false",
"title": exc,
"exception": exc,
"exception_type": escape(self.exception_type),
"summary": self.render_summary(include_title=False),
"plaintext": escape(self.plaintext),
"plaintext_cs": re.sub("-{2,}", "-", self.plaintext),
"traceback_id": self.id,
"secret": secret,
}
@cached_property
def plaintext(self):
return u"\n".join([group.render_text() for group in self.groups])
@property
def id(self):
return id(self)
class Group(object):
"""A group of frames for an exception in a traceback. On Python 3,
if the exception has a ``__cause__`` or ``__context__``, there are
multiple exception groups.
"""
def __init__(self, exc_type, exc_value, tb):
self.exc_type = exc_type
self.exc_value = exc_value
self.info = None
if not PY2:
if exc_value.__cause__ is not None:
self.info = (
u"The above exception was the direct cause of the"
u" following exception"
)
elif exc_value.__context__ is not None:
self.info = (
u"During handling of the above exception, another"
u" exception occurred"
)
self.frames = []
while tb is not None:
self.frames.append(Frame(exc_type, exc_value, tb))
tb = tb.tb_next
def filter_hidden_frames(self):
new_frames = []
hidden = False
for frame in self.frames:
hide = frame.hide
if hide in ("before", "before_and_this"):
new_frames = []
hidden = False
if hide == "before_and_this":
continue
elif hide in ("reset", "reset_and_this"):
hidden = False
if hide == "reset_and_this":
continue
elif hide in ("after", "after_and_this"):
hidden = True
if hide == "after_and_this":
continue
elif hide or hidden:
continue
new_frames.append(frame)
# if we only have one frame and that frame is from the codeop
# module, remove it.
if len(new_frames) == 1 and self.frames[0].module == "codeop":
del self.frames[:]
# if the last frame is missing something went terrible wrong :(
elif self.frames[-1] in new_frames:
self.frames[:] = new_frames
@property
def exception(self):
"""String representation of the exception."""
buf = traceback.format_exception_only(self.exc_type, self.exc_value)
rv = "".join(buf).strip()
return to_unicode(rv, "utf-8", "replace")
def render(self, mark_lib=True):
out = []
if self.info is not None:
out.append(u'<li><div class="exc-divider">%s:</div>' % self.info)
for frame in self.frames:
out.append(
u"<li%s>%s"
% (
u' title="%s"' % escape(frame.info) if frame.info else u"",
frame.render(mark_lib=mark_lib),
)
)
return u"\n".join(out)
def render_text(self):
out = []
if self.info is not None:
out.append(u"\n%s:\n" % self.info)
out.append(u"Traceback (most recent call last):")
for frame in self.frames:
out.append(frame.render_text())
out.append(self.exception)
return u"\n".join(out)
class Frame(object):
"""A single frame in a traceback."""
def __init__(self, exc_type, exc_value, tb):
self.lineno = tb.tb_lineno
self.function_name = tb.tb_frame.f_code.co_name
self.locals = tb.tb_frame.f_locals
self.globals = tb.tb_frame.f_globals
fn = inspect.getsourcefile(tb) or inspect.getfile(tb)
if fn[-4:] in (".pyo", ".pyc"):
fn = fn[:-1]
# if it's a file on the file system resolve the real filename.
if os.path.isfile(fn):
fn = os.path.realpath(fn)
self.filename = to_unicode(fn, get_filesystem_encoding())
self.module = self.globals.get("__name__", self.locals.get("__name__"))
self.loader = self.globals.get("__loader__", self.locals.get("__loader__"))
self.code = tb.tb_frame.f_code
# support for paste's traceback extensions
self.hide = self.locals.get("__traceback_hide__", False)
info = self.locals.get("__traceback_info__")
if info is not None:
info = to_unicode(info, "utf-8", "replace")
self.info = info
def render(self, mark_lib=True):
"""Render a single frame in a traceback."""
return FRAME_HTML % {
"id": self.id,
"filename": escape(self.filename),
"lineno": self.lineno,
"function_name": escape(self.function_name),
"lines": self.render_line_context(),
"library": "library" if mark_lib and self.is_library else "",
}
@cached_property
def is_library(self):
return any(
self.filename.startswith(path) for path in sysconfig.get_paths().values()
)
def render_text(self):
return u' File "%s", line %s, in %s\n %s' % (
self.filename,
self.lineno,
self.function_name,
self.current_line.strip(),
)
def render_line_context(self):
before, current, after = self.get_context_lines()
rv = []
def render_line(line, cls):
line = line.expandtabs().rstrip()
stripped_line = line.strip()
prefix = len(line) - len(stripped_line)
rv.append(
'<pre class="line %s"><span class="ws">%s</span>%s</pre>'
% (cls, " " * prefix, escape(stripped_line) or " ")
)
for line in before:
render_line(line, "before")
render_line(current, "current")
for line in after:
render_line(line, "after")
return "\n".join(rv)
def get_annotated_lines(self):
"""Helper function that returns lines with extra information."""
lines = [Line(idx + 1, x) for idx, x in enumerate(self.sourcelines)]
# find function definition and mark lines
if hasattr(self.code, "co_firstlineno"):
lineno = self.code.co_firstlineno - 1
while lineno > 0:
if _funcdef_re.match(lines[lineno].code):
break
lineno -= 1
try:
offset = len(inspect.getblock([x.code + "\n" for x in lines[lineno:]]))
except TokenError:
offset = 0
for line in lines[lineno : lineno + offset]:
line.in_frame = True
# mark current line
try:
lines[self.lineno - 1].current = True
except IndexError:
pass
return lines
def eval(self, code, mode="single"):
"""Evaluate code in the context of the frame."""
if isinstance(code, string_types):
if PY2 and isinstance(code, text_type): # noqa
code = UTF8_COOKIE + code.encode("utf-8")
code = compile(code, "<interactive>", mode)
return eval(code, self.globals, self.locals)
@cached_property
def sourcelines(self):
"""The sourcecode of the file as list of unicode strings."""
# get sourcecode from loader or file
source = None
if self.loader is not None:
try:
if hasattr(self.loader, "get_source"):
source = self.loader.get_source(self.module)
elif hasattr(self.loader, "get_source_by_code"):
source = self.loader.get_source_by_code(self.code)
except Exception:
# we munch the exception so that we don't cause troubles
# if the loader is broken.
pass
if source is None:
try:
with open(
to_native(self.filename, get_filesystem_encoding()), mode="rb"
) as f:
source = f.read()
except IOError:
return []
# already unicode? return right away
if isinstance(source, text_type):
return source.splitlines()
# yes. it should be ascii, but we don't want to reject too many
# characters in the debugger if something breaks
charset = "utf-8"
if source.startswith(UTF8_COOKIE):
source = source[3:]
else:
for idx, match in enumerate(_line_re.finditer(source)):
match = _coding_re.search(match.group())
if match is not None:
charset = match.group(1)
break
if idx > 1:
break
# on broken cookies we fall back to utf-8 too
charset = to_native(charset)
try:
codecs.lookup(charset)
except LookupError:
charset = "utf-8"
return source.decode(charset, "replace").splitlines()
def get_context_lines(self, context=5):
before = self.sourcelines[self.lineno - context - 1 : self.lineno - 1]
past = self.sourcelines[self.lineno : self.lineno + context]
return (before, self.current_line, past)
@property
def current_line(self):
try:
return self.sourcelines[self.lineno - 1]
except IndexError:
return u""
@cached_property
def console(self):
return Console(self.globals, self.locals)
@property
def id(self):
return id(self)
```
#### File: api_core/future/base.py
```python
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class Future(object):
# pylint: disable=missing-docstring
# We inherit the interfaces here from concurrent.futures.
"""Future interface.
This interface is based on :class:`concurrent.futures.Future`.
"""
@abc.abstractmethod
def cancel(self):
raise NotImplementedError()
@abc.abstractmethod
def cancelled(self):
raise NotImplementedError()
@abc.abstractmethod
def running(self):
raise NotImplementedError()
@abc.abstractmethod
def done(self):
raise NotImplementedError()
@abc.abstractmethod
def result(self, timeout=None):
raise NotImplementedError()
@abc.abstractmethod
def exception(self, timeout=None):
raise NotImplementedError()
@abc.abstractmethod
def add_done_callback(self, fn):
# pylint: disable=invalid-name
raise NotImplementedError()
@abc.abstractmethod
def set_result(self, result):
raise NotImplementedError()
@abc.abstractmethod
def set_exception(self, exception):
raise NotImplementedError()
```
#### File: site-packages/google_auth_oauthlib/interactive.py
```python
from __future__ import absolute_import
import google_auth_oauthlib.flow
def get_user_credentials(scopes, client_id, client_secret):
"""Gets credentials associated with your Google user account.
This function authenticates using your user credentials by going through
the OAuth 2.0 flow. You'll open a browser window to authenticate to your
Google account. The permissions it requests correspond to the scopes
you've provided.
To obtain the ``client_id`` and ``client_secret``, create an **OAuth
client ID** with application type **Other** from the `Credentials page on
the Google Developer's Console
<https://console.developers.google.com/apis/credentials>`_. Learn more
with the `Authenticating as an end user
<https://cloud.google.com/docs/authentication/end-user>`_ guide.
Args:
scopes (Sequence[str]):
A list of scopes to use when authenticating to Google APIs. See
the `list of OAuth 2.0 scopes for Google APIs
<https://developers.google.com/identity/protocols/googlescopes>`_.
client_id (str):
A string that identifies your application to Google APIs. Find
this value in the `Credentials page on the Google Developer's
Console
<https://console.developers.google.com/apis/credentials>`_.
client_secret (str):
A string that verifies your application to Google APIs. Find this
value in the `Credentials page on the Google Developer's Console
<https://console.developers.google.com/apis/credentials>`_.
Returns:
google.oauth2.credentials.Credentials:
The OAuth 2.0 credentials for the user.
Examples:
Get credentials for your user account and use them to run a query
with BigQuery::
import google_auth_oauthlib
# TODO: Create a client ID for your project.
client_id = "YOUR-CLIENT-ID.apps.googleusercontent.com"
client_secret = "abc_ThIsIsAsEcReT"
# TODO: Choose the needed scopes for your applications.
scopes = ["https://www.googleapis.com/auth/cloud-platform"]
credentials = google_auth_oauthlib.get_user_credentials(
scopes, client_id, client_secret
)
# 1. Open the link.
# 2. Authorize the application to have access to your account.
# 3. Copy and paste the authorization code to the prompt.
# Use the credentials to construct a client for Google APIs.
from google.cloud import bigquery
bigquery_client = bigquery.Client(
credentials=credentials, project="your-project-id"
)
print(list(bigquery_client.query("SELECT 1").result()))
"""
client_config = {
"installed": {
"client_id": client_id,
"client_secret": client_secret,
"redirect_uris": ["urn:ietf:wg:oauth:2.0:oob"],
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://oauth2.googleapis.com/token",
}
}
app_flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_config(
client_config, scopes=scopes
)
return app_flow.run_console()
```
#### File: site-packages/pyasn1_modules/rfc2985.py
```python
from pyasn1.type import char
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import namedval
from pyasn1.type import opentype
from pyasn1.type import tag
from pyasn1.type import univ
from pyasn1.type import useful
from pyasn1_modules import rfc7292
from pyasn1_modules import rfc5958
from pyasn1_modules import rfc5652
from pyasn1_modules import rfc5280
def _OID(*components):
output = []
for x in tuple(components):
if isinstance(x, univ.ObjectIdentifier):
output.extend(list(x))
else:
output.append(int(x))
return univ.ObjectIdentifier(output)
MAX = float('inf')
# Imports from RFC 5280
AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
Attribute = rfc5280.Attribute
EmailAddress = rfc5280.EmailAddress
Extensions = rfc5280.Extensions
Time = rfc5280.Time
X520countryName = rfc5280.X520countryName
X520SerialNumber = rfc5280.X520SerialNumber
# Imports from RFC 5652
ContentInfo = rfc5652.ContentInfo
ContentType = rfc5652.ContentType
Countersignature = rfc5652.Countersignature
MessageDigest = rfc5652.MessageDigest
SignerInfo = rfc5652.SignerInfo
SigningTime = rfc5652.SigningTime
# Imports from RFC 5958
EncryptedPrivateKeyInfo = rfc5958.EncryptedPrivateKeyInfo
# Imports from RFC 7292
PFX = rfc7292.PFX
# TODO:
# Need a place to import PKCS15Token; it does not yet appear in an RFC
# SingleAttribute is the same as Attribute in RFC 5280, except that the
# attrValues SET must have one and only one member
class AttributeType(univ.ObjectIdentifier):
pass
class AttributeValue(univ.Any):
pass
class AttributeValues(univ.SetOf):
pass
AttributeValues.componentType = AttributeValue()
class SingleAttributeValues(univ.SetOf):
pass
SingleAttributeValues.componentType = AttributeValue()
class SingleAttribute(univ.Sequence):
pass
SingleAttribute.componentType = namedtype.NamedTypes(
namedtype.NamedType('type', AttributeType()),
namedtype.NamedType('values',
AttributeValues().subtype(sizeSpec=constraint.ValueSizeConstraint(1, 1)),
openType=opentype.OpenType('type', rfc5280.certificateAttributesMap)
)
)
# CMSAttribute is the same as Attribute in RFC 5652, and CMSSingleAttribute
# is the companion where the attrValues SET must have one and only one member
CMSAttribute = rfc5652.Attribute
class CMSSingleAttribute(univ.Sequence):
pass
CMSSingleAttribute.componentType = namedtype.NamedTypes(
namedtype.NamedType('attrType', AttributeType()),
namedtype.NamedType('attrValues',
AttributeValues().subtype(sizeSpec=constraint.ValueSizeConstraint(1, 1)),
openType=opentype.OpenType('attrType', rfc5652.cmsAttributesMap)
)
)
# DirectoryString is the same as RFC 5280, except the length is limited to 255
class DirectoryString(univ.Choice):
pass
DirectoryString.componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
namedtype.NamedType('printableString', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
namedtype.NamedType('universalString', char.UniversalString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
namedtype.NamedType('utf8String', char.UTF8String().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
namedtype.NamedType('bmpString', char.BMPString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, 255)))
)
# PKCS9String is DirectoryString with an additional choice of IA5String,
# and the SIZE is limited to 255
class PKCS9String(univ.Choice):
pass
PKCS9String.componentType = namedtype.NamedTypes(
namedtype.NamedType('ia5String', char.IA5String().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
namedtype.NamedType('directoryString', DirectoryString())
)
# Upper Bounds
pkcs_9_ub_pkcs9String = univ.Integer(255)
pkcs_9_ub_challengePassword = univ.Integer(pkcs_9_ub_pkcs9String)
pkcs_9_ub_emailAddress = univ.Integer(pkcs_9_ub_pkcs9String)
pkcs_9_ub_friendlyName = univ.Integer(pkcs_9_ub_pkcs9String)
pkcs_9_ub_match = univ.Integer(pkcs_9_ub_pkcs9String)
pkcs_9_ub_signingDescription = univ.Integer(pkcs_9_ub_pkcs9String)
pkcs_9_ub_unstructuredAddress = univ.Integer(pkcs_9_ub_pkcs9String)
pkcs_9_ub_unstructuredName = univ.Integer(pkcs_9_ub_pkcs9String)
ub_name = univ.Integer(32768)
pkcs_9_ub_placeOfBirth = univ.Integer(ub_name)
pkcs_9_ub_pseudonym = univ.Integer(ub_name)
# Object Identifier Arcs
ietf_at = _OID(1, 3, 6, 1, 5, 5, 7, 9)
id_at = _OID(2, 5, 4)
pkcs_9 = _OID(1, 2, 840, 113549, 1, 9)
pkcs_9_mo = _OID(pkcs_9, 0)
smime = _OID(pkcs_9, 16)
certTypes = _OID(pkcs_9, 22)
crlTypes = _OID(pkcs_9, 23)
pkcs_9_oc = _OID(pkcs_9, 24)
pkcs_9_at = _OID(pkcs_9, 25)
pkcs_9_sx = _OID(pkcs_9, 26)
pkcs_9_mr = _OID(pkcs_9, 27)
# Object Identifiers for Syntaxes for use with LDAP-accessible directories
pkcs_9_sx_pkcs9String = _OID(pkcs_9_sx, 1)
pkcs_9_sx_signingTime = _OID(pkcs_9_sx, 2)
# Object Identifiers for object classes
pkcs_9_oc_pkcsEntity = _OID(pkcs_9_oc, 1)
pkcs_9_oc_naturalPerson = _OID(pkcs_9_oc, 2)
# Object Identifiers for matching rules
pkcs_9_mr_caseIgnoreMatch = _OID(pkcs_9_mr, 1)
pkcs_9_mr_signingTimeMatch = _OID(pkcs_9_mr, 2)
# PKCS #7 PDU
pkcs_9_at_pkcs7PDU = _OID(pkcs_9_at, 5)
pKCS7PDU = Attribute()
pKCS7PDU['type'] = pkcs_9_at_pkcs7PDU
pKCS7PDU['values'][0] = ContentInfo()
# PKCS #12 token
pkcs_9_at_userPKCS12 = _OID(2, 16, 840, 1, 113730, 3, 1, 216)
userPKCS12 = Attribute()
userPKCS12['type'] = pkcs_9_at_userPKCS12
userPKCS12['values'][0] = PFX()
# PKCS #15 token
pkcs_9_at_pkcs15Token = _OID(pkcs_9_at, 1)
# TODO: Once PKCS15Token can be imported, this can be included
#
# pKCS15Token = Attribute()
# userPKCS12['type'] = pkcs_9_at_pkcs15Token
# userPKCS12['values'][0] = PKCS15Token()
# PKCS #8 encrypted private key information
pkcs_9_at_encryptedPrivateKeyInfo = _OID(pkcs_9_at, 2)
encryptedPrivateKeyInfo = Attribute()
encryptedPrivateKeyInfo['type'] = pkcs_9_at_encryptedPrivateKeyInfo
encryptedPrivateKeyInfo['values'][0] = EncryptedPrivateKeyInfo()
# Electronic-mail address
pkcs_9_at_emailAddress = rfc5280.id_emailAddress
emailAddress = Attribute()
emailAddress['type'] = pkcs_9_at_emailAddress
emailAddress['values'][0] = EmailAddress()
# Unstructured name
pkcs_9_at_unstructuredName = _OID(pkcs_9, 2)
unstructuredName = Attribute()
unstructuredName['type'] = pkcs_9_at_unstructuredName
unstructuredName['values'][0] = PKCS9String()
# Unstructured address
pkcs_9_at_unstructuredAddress = _OID(pkcs_9, 8)
unstructuredAddress = Attribute()
unstructuredAddress['type'] = pkcs_9_at_unstructuredAddress
unstructuredAddress['values'][0] = DirectoryString()
# Date of birth
pkcs_9_at_dateOfBirth = _OID(ietf_at, 1)
dateOfBirth = SingleAttribute()
dateOfBirth['type'] = pkcs_9_at_dateOfBirth
dateOfBirth['values'][0] = useful.GeneralizedTime()
# Place of birth
pkcs_9_at_placeOfBirth = _OID(ietf_at, 2)
placeOfBirth = SingleAttribute()
placeOfBirth['type'] = pkcs_9_at_placeOfBirth
placeOfBirth['values'][0] = DirectoryString()
# Gender
class GenderString(char.PrintableString):
pass
GenderString.subtypeSpec = constraint.ValueSizeConstraint(1, 1)
GenderString.subtypeSpec = constraint.SingleValueConstraint("M", "F", "m", "f")
pkcs_9_at_gender = _OID(ietf_at, 3)
gender = SingleAttribute()
gender['type'] = pkcs_9_at_gender
gender['values'][0] = GenderString()
# Country of citizenship
pkcs_9_at_countryOfCitizenship = _OID(ietf_at, 4)
countryOfCitizenship = Attribute()
countryOfCitizenship['type'] = pkcs_9_at_countryOfCitizenship
countryOfCitizenship['values'][0] = X520countryName()
# Country of residence
pkcs_9_at_countryOfResidence = _OID(ietf_at, 5)
countryOfResidence = Attribute()
countryOfResidence['type'] = pkcs_9_at_countryOfResidence
countryOfResidence['values'][0] = X520countryName()
# Pseudonym
id_at_pseudonym = _OID(2, 5, 4, 65)
pseudonym = Attribute()
pseudonym['type'] = id_at_pseudonym
pseudonym['values'][0] = DirectoryString()
# Serial number
id_at_serialNumber = rfc5280.id_at_serialNumber
serialNumber = Attribute()
serialNumber['type'] = id_at_serialNumber
serialNumber['values'][0] = X520SerialNumber()
# Content type
pkcs_9_at_contentType = rfc5652.id_contentType
contentType = CMSSingleAttribute()
contentType['attrType'] = pkcs_9_at_contentType
contentType['attrValues'][0] = ContentType()
# Message digest
pkcs_9_at_messageDigest = rfc5652.id_messageDigest
messageDigest = CMSSingleAttribute()
messageDigest['attrType'] = pkcs_9_at_messageDigest
messageDigest['attrValues'][0] = MessageDigest()
# Signing time
pkcs_9_at_signingTime = rfc5652.id_signingTime
signingTime = CMSSingleAttribute()
signingTime['attrType'] = pkcs_9_at_signingTime
signingTime['attrValues'][0] = SigningTime()
# Random nonce
class RandomNonce(univ.OctetString):
pass
RandomNonce.subtypeSpec = constraint.ValueSizeConstraint(4, MAX)
pkcs_9_at_randomNonce = _OID(pkcs_9_at, 3)
randomNonce = CMSSingleAttribute()
randomNonce['attrType'] = pkcs_9_at_randomNonce
randomNonce['attrValues'][0] = RandomNonce()
# Sequence number
class SequenceNumber(univ.Integer):
pass
SequenceNumber.subtypeSpec = constraint.ValueRangeConstraint(1, MAX)
pkcs_9_at_sequenceNumber = _OID(pkcs_9_at, 4)
sequenceNumber = CMSSingleAttribute()
sequenceNumber['attrType'] = pkcs_9_at_sequenceNumber
sequenceNumber['attrValues'][0] = SequenceNumber()
# Countersignature
pkcs_9_at_counterSignature = rfc5652.id_countersignature
counterSignature = CMSAttribute()
counterSignature['attrType'] = pkcs_9_at_counterSignature
counterSignature['attrValues'][0] = Countersignature()
# Challenge password
pkcs_9_at_challengePassword = _OID(pkcs_9, 7)
challengePassword = SingleAttribute()
challengePassword['type'] = pkcs_9_at_challengePassword
challengePassword['values'][0] = DirectoryString()
# Extension request
class ExtensionRequest(Extensions):
pass
pkcs_9_at_extensionRequest = _OID(pkcs_9, 14)
extensionRequest = SingleAttribute()
extensionRequest['type'] = pkcs_9_at_extensionRequest
extensionRequest['values'][0] = ExtensionRequest()
# Extended-certificate attributes (deprecated)
class AttributeSet(univ.SetOf):
pass
AttributeSet.componentType = Attribute()
pkcs_9_at_extendedCertificateAttributes = _OID(pkcs_9, 9)
extendedCertificateAttributes = SingleAttribute()
extendedCertificateAttributes['type'] = pkcs_9_at_extendedCertificateAttributes
extendedCertificateAttributes['values'][0] = AttributeSet()
# Friendly name
class FriendlyName(char.BMPString):
pass
FriendlyName.subtypeSpec = constraint.ValueSizeConstraint(1, pkcs_9_ub_friendlyName)
pkcs_9_at_friendlyName = _OID(pkcs_9, 20)
friendlyName = SingleAttribute()
friendlyName['type'] = pkcs_9_at_friendlyName
friendlyName['values'][0] = FriendlyName()
# Local key identifier
pkcs_9_at_localKeyId = _OID(pkcs_9, 21)
localKeyId = SingleAttribute()
localKeyId['type'] = pkcs_9_at_localKeyId
localKeyId['values'][0] = univ.OctetString()
# Signing description
pkcs_9_at_signingDescription = _OID(pkcs_9, 13)
signingDescription = CMSSingleAttribute()
signingDescription['attrType'] = pkcs_9_at_signingDescription
signingDescription['attrValues'][0] = DirectoryString()
# S/MIME capabilities
class SMIMECapability(AlgorithmIdentifier):
pass
class SMIMECapabilities(univ.SequenceOf):
pass
SMIMECapabilities.componentType = SMIMECapability()
pkcs_9_at_smimeCapabilities = _OID(pkcs_9, 15)
smimeCapabilities = CMSSingleAttribute()
smimeCapabilities['attrType'] = pkcs_9_at_smimeCapabilities
smimeCapabilities['attrValues'][0] = SMIMECapabilities()
# Certificate Attribute Map
_certificateAttributesMapUpdate = {
# Attribute types for use with the "pkcsEntity" object class
pkcs_9_at_pkcs7PDU: ContentInfo(),
pkcs_9_at_userPKCS12: PFX(),
# TODO: Once PKCS15Token can be imported, this can be included
# pkcs_9_at_pkcs15Token: PKCS15Token(),
pkcs_9_at_encryptedPrivateKeyInfo: EncryptedPrivateKeyInfo(),
# Attribute types for use with the "naturalPerson" object class
pkcs_9_at_emailAddress: EmailAddress(),
pkcs_9_at_unstructuredName: PKCS9String(),
pkcs_9_at_unstructuredAddress: DirectoryString(),
pkcs_9_at_dateOfBirth: useful.GeneralizedTime(),
pkcs_9_at_placeOfBirth: DirectoryString(),
pkcs_9_at_gender: GenderString(),
pkcs_9_at_countryOfCitizenship: X520countryName(),
pkcs_9_at_countryOfResidence: X520countryName(),
id_at_pseudonym: DirectoryString(),
id_at_serialNumber: X520SerialNumber(),
# Attribute types for use with PKCS #10 certificate requests
pkcs_9_at_challengePassword: DirectoryString(),
pkcs_9_at_extensionRequest: ExtensionRequest(),
pkcs_9_at_extendedCertificateAttributes: AttributeSet(),
}
rfc5280.certificateAttributesMap.update(_certificateAttributesMapUpdate)
# CMS Attribute Map
# Note: pkcs_9_at_smimeCapabilities is not included in the map because
# the definition in RFC 5751 is preferred, which produces the same
# encoding, but it allows different parameters for SMIMECapability
# and AlgorithmIdentifier.
_cmsAttributesMapUpdate = {
# Attribute types for use in PKCS #7 data (a.k.a. CMS)
pkcs_9_at_contentType: ContentType(),
pkcs_9_at_messageDigest: MessageDigest(),
pkcs_9_at_signingTime: SigningTime(),
pkcs_9_at_randomNonce: RandomNonce(),
pkcs_9_at_sequenceNumber: SequenceNumber(),
pkcs_9_at_counterSignature: Countersignature(),
# Attributes for use in PKCS #12 "PFX" PDUs or PKCS #15 tokens
pkcs_9_at_friendlyName: FriendlyName(),
pkcs_9_at_localKeyId: univ.OctetString(),
pkcs_9_at_signingDescription: DirectoryString(),
# pkcs_9_at_smimeCapabilities: SMIMECapabilities(),
}
rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
```
#### File: site-packages/pyasn1_modules/rfc3281.py
```python
from pyasn1.type import char
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import namedval
from pyasn1.type import tag
from pyasn1.type import univ
from pyasn1.type import useful
from pyasn1_modules import rfc3280
MAX = float('inf')
def _buildOid(*components):
output = []
for x in tuple(components):
if isinstance(x, univ.ObjectIdentifier):
output.extend(list(x))
else:
output.append(int(x))
return univ.ObjectIdentifier(output)
class ObjectDigestInfo(univ.Sequence):
pass
ObjectDigestInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('digestedObjectType', univ.Enumerated(
namedValues=namedval.NamedValues(('publicKey', 0), ('publicKeyCert', 1), ('otherObjectTypes', 2)))),
namedtype.OptionalNamedType('otherObjectTypeID', univ.ObjectIdentifier()),
namedtype.NamedType('digestAlgorithm', rfc3280.AlgorithmIdentifier()),
namedtype.NamedType('objectDigest', univ.BitString())
)
class IssuerSerial(univ.Sequence):
pass
IssuerSerial.componentType = namedtype.NamedTypes(
namedtype.NamedType('issuer', rfc3280.GeneralNames()),
namedtype.NamedType('serial', rfc3280.CertificateSerialNumber()),
namedtype.OptionalNamedType('issuerUID', rfc3280.UniqueIdentifier())
)
class TargetCert(univ.Sequence):
pass
TargetCert.componentType = namedtype.NamedTypes(
namedtype.NamedType('targetCertificate', IssuerSerial()),
namedtype.OptionalNamedType('targetName', rfc3280.GeneralName()),
namedtype.OptionalNamedType('certDigestInfo', ObjectDigestInfo())
)
class Target(univ.Choice):
pass
Target.componentType = namedtype.NamedTypes(
namedtype.NamedType('targetName', rfc3280.GeneralName().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('targetGroup', rfc3280.GeneralName().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('targetCert',
TargetCert().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
)
class Targets(univ.SequenceOf):
pass
Targets.componentType = Target()
class ProxyInfo(univ.SequenceOf):
pass
ProxyInfo.componentType = Targets()
id_at_role = _buildOid(rfc3280.id_at, 72)
id_pe_aaControls = _buildOid(rfc3280.id_pe, 6)
id_ce_targetInformation = _buildOid(rfc3280.id_ce, 55)
id_pe_ac_auditIdentity = _buildOid(rfc3280.id_pe, 4)
class ClassList(univ.BitString):
pass
ClassList.namedValues = namedval.NamedValues(
('unmarked', 0),
('unclassified', 1),
('restricted', 2),
('confidential', 3),
('secret', 4),
('topSecret', 5)
)
class SecurityCategory(univ.Sequence):
pass
SecurityCategory.componentType = namedtype.NamedTypes(
namedtype.NamedType('type', univ.ObjectIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('value', univ.Any().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class Clearance(univ.Sequence):
pass
Clearance.componentType = namedtype.NamedTypes(
namedtype.NamedType('policyId', univ.ObjectIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.DefaultedNamedType('classList',
ClassList().subtype(implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 1)).subtype(
value="unclassified")),
namedtype.OptionalNamedType('securityCategories', univ.SetOf(componentType=SecurityCategory()).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class AttCertVersion(univ.Integer):
pass
AttCertVersion.namedValues = namedval.NamedValues(
('v2', 1)
)
id_aca = _buildOid(rfc3280.id_pkix, 10)
id_at_clearance = _buildOid(2, 5, 1, 5, 55)
class AttrSpec(univ.SequenceOf):
pass
AttrSpec.componentType = univ.ObjectIdentifier()
class AAControls(univ.Sequence):
pass
AAControls.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('pathLenConstraint',
univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX))),
namedtype.OptionalNamedType('permittedAttrs',
AttrSpec().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('excludedAttrs',
AttrSpec().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.DefaultedNamedType('permitUnSpecified', univ.Boolean().subtype(value=1))
)
class AttCertValidityPeriod(univ.Sequence):
pass
AttCertValidityPeriod.componentType = namedtype.NamedTypes(
namedtype.NamedType('notBeforeTime', useful.GeneralizedTime()),
namedtype.NamedType('notAfterTime', useful.GeneralizedTime())
)
id_aca_authenticationInfo = _buildOid(id_aca, 1)
class V2Form(univ.Sequence):
pass
V2Form.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('issuerName', rfc3280.GeneralNames()),
namedtype.OptionalNamedType('baseCertificateID', IssuerSerial().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('objectDigestInfo', ObjectDigestInfo().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class AttCertIssuer(univ.Choice):
pass
AttCertIssuer.componentType = namedtype.NamedTypes(
namedtype.NamedType('v1Form', rfc3280.GeneralNames()),
namedtype.NamedType('v2Form',
V2Form().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
)
class Holder(univ.Sequence):
pass
Holder.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('baseCertificateID', IssuerSerial().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('entityName', rfc3280.GeneralNames().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('objectDigestInfo', ObjectDigestInfo().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
)
class AttributeCertificateInfo(univ.Sequence):
pass
AttributeCertificateInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('version', AttCertVersion()),
namedtype.NamedType('holder', Holder()),
namedtype.NamedType('issuer', AttCertIssuer()),
namedtype.NamedType('signature', rfc3280.AlgorithmIdentifier()),
namedtype.NamedType('serialNumber', rfc3280.CertificateSerialNumber()),
namedtype.NamedType('attrCertValidityPeriod', AttCertValidityPeriod()),
namedtype.NamedType('attributes', univ.SequenceOf(componentType=rfc3280.Attribute())),
namedtype.OptionalNamedType('issuerUniqueID', rfc3280.UniqueIdentifier()),
namedtype.OptionalNamedType('extensions', rfc3280.Extensions())
)
class AttributeCertificate(univ.Sequence):
pass
AttributeCertificate.componentType = namedtype.NamedTypes(
namedtype.NamedType('acinfo', AttributeCertificateInfo()),
namedtype.NamedType('signatureAlgorithm', rfc3280.AlgorithmIdentifier()),
namedtype.NamedType('signatureValue', univ.BitString())
)
id_mod = _buildOid(rfc3280.id_pkix, 0)
id_mod_attribute_cert = _buildOid(id_mod, 12)
id_aca_accessIdentity = _buildOid(id_aca, 2)
class RoleSyntax(univ.Sequence):
pass
RoleSyntax.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('roleAuthority', rfc3280.GeneralNames().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('roleName',
rfc3280.GeneralName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
id_aca_chargingIdentity = _buildOid(id_aca, 3)
class ACClearAttrs(univ.Sequence):
pass
ACClearAttrs.componentType = namedtype.NamedTypes(
namedtype.NamedType('acIssuer', rfc3280.GeneralName()),
namedtype.NamedType('acSerial', univ.Integer()),
namedtype.NamedType('attrs', univ.SequenceOf(componentType=rfc3280.Attribute()))
)
id_aca_group = _buildOid(id_aca, 4)
id_pe_ac_proxying = _buildOid(rfc3280.id_pe, 10)
class SvceAuthInfo(univ.Sequence):
pass
SvceAuthInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('service', rfc3280.GeneralName()),
namedtype.NamedType('ident', rfc3280.GeneralName()),
namedtype.OptionalNamedType('authInfo', univ.OctetString())
)
class IetfAttrSyntax(univ.Sequence):
pass
IetfAttrSyntax.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType(
'policyAuthority', rfc3280.GeneralNames().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
),
namedtype.NamedType(
'values', univ.SequenceOf(
componentType=univ.Choice(
componentType=namedtype.NamedTypes(
namedtype.NamedType('octets', univ.OctetString()),
namedtype.NamedType('oid', univ.ObjectIdentifier()),
namedtype.NamedType('string', char.UTF8String())
)
)
)
)
)
id_aca_encAttrs = _buildOid(id_aca, 6)
```
#### File: site-packages/pyasn1_modules/rfc5990.py
```python
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import univ
from pyasn1_modules import rfc5280
MAX = float('inf')
def _OID(*components):
output = []
for x in tuple(components):
if isinstance(x, univ.ObjectIdentifier):
output.extend(list(x))
else:
output.append(int(x))
return univ.ObjectIdentifier(output)
# Imports from RFC 5280
AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
# Useful types and definitions
class NullParms(univ.Null):
pass
# Object identifier arcs
is18033_2 = _OID(1, 0, 18033, 2)
nistAlgorithm = _OID(2, 16, 840, 1, 101, 3, 4)
pkcs_1 = _OID(1, 2, 840, 113549, 1, 1)
x9_44 = _OID(1, 3, 133, 16, 840, 9, 44)
x9_44_components = _OID(x9_44, 1)
# Types for algorithm identifiers
class Camellia_KeyWrappingScheme(AlgorithmIdentifier):
pass
class DataEncapsulationMechanism(AlgorithmIdentifier):
pass
class KDF2_HashFunction(AlgorithmIdentifier):
pass
class KDF3_HashFunction(AlgorithmIdentifier):
pass
class KeyDerivationFunction(AlgorithmIdentifier):
pass
class KeyEncapsulationMechanism(AlgorithmIdentifier):
pass
class X9_SymmetricKeyWrappingScheme(AlgorithmIdentifier):
pass
# RSA-KEM Key Transport Algorithm
id_rsa_kem = _OID(1, 2, 840, 113549, 1, 9, 16, 3, 14)
class GenericHybridParameters(univ.Sequence):
pass
GenericHybridParameters.componentType = namedtype.NamedTypes(
namedtype.NamedType('kem', KeyEncapsulationMechanism()),
namedtype.NamedType('dem', DataEncapsulationMechanism())
)
rsa_kem = AlgorithmIdentifier()
rsa_kem['algorithm'] = id_rsa_kem
rsa_kem['parameters'] = GenericHybridParameters()
# KEM-RSA Key Encapsulation Mechanism
id_kem_rsa = _OID(is18033_2, 2, 4)
class KeyLength(univ.Integer):
pass
KeyLength.subtypeSpec = constraint.ValueRangeConstraint(1, MAX)
class RsaKemParameters(univ.Sequence):
pass
RsaKemParameters.componentType = namedtype.NamedTypes(
namedtype.NamedType('keyDerivationFunction', KeyDerivationFunction()),
namedtype.NamedType('keyLength', KeyLength())
)
kem_rsa = AlgorithmIdentifier()
kem_rsa['algorithm'] = id_kem_rsa
kem_rsa['parameters'] = RsaKemParameters()
# Key Derivation Functions
id_kdf_kdf2 = _OID(x9_44_components, 1)
id_kdf_kdf3 = _OID(x9_44_components, 2)
kdf2 = AlgorithmIdentifier()
kdf2['algorithm'] = id_kdf_kdf2
kdf2['parameters'] = KDF2_HashFunction()
kdf3 = AlgorithmIdentifier()
kdf3['algorithm'] = id_kdf_kdf3
kdf3['parameters'] = KDF3_HashFunction()
# Hash Functions
id_sha1 = _OID(1, 3, 14, 3, 2, 26)
id_sha224 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 4)
id_sha256 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 1)
id_sha384 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 2)
id_sha512 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 3)
sha1 = AlgorithmIdentifier()
sha1['algorithm'] = id_sha1
sha1['parameters'] = univ.Null("")
sha224 = AlgorithmIdentifier()
sha224['algorithm'] = id_sha224
sha224['parameters'] = univ.Null("")
sha256 = AlgorithmIdentifier()
sha256['algorithm'] = id_sha256
sha256['parameters'] = univ.Null("")
sha384 = AlgorithmIdentifier()
sha384['algorithm'] = id_sha384
sha384['parameters'] = univ.Null("")
sha512 = AlgorithmIdentifier()
sha512['algorithm'] = id_sha512
sha512['parameters'] = univ.Null("")
# Symmetric Key-Wrapping Schemes
id_aes128_Wrap = _OID(nistAlgorithm, 1, 5)
id_aes192_Wrap = _OID(nistAlgorithm, 1, 25)
id_aes256_Wrap = _OID(nistAlgorithm, 1, 45)
id_alg_CMS3DESwrap = _OID(1, 2, 840, 113549, 1, 9, 16, 3, 6)
id_camellia128_Wrap = _OID(1, 2, 392, 200011, 61, 1, 1, 3, 2)
id_camellia192_Wrap = _OID(1, 2, 392, 200011, 61, 1, 1, 3, 3)
id_camellia256_Wrap = _OID(1, 2, 392, 200011, 61, 1, 1, 3, 4)
aes128_Wrap = AlgorithmIdentifier()
aes128_Wrap['algorithm'] = id_aes128_Wrap
# aes128_Wrap['parameters'] are absent
aes192_Wrap = AlgorithmIdentifier()
aes192_Wrap['algorithm'] = id_aes128_Wrap
# aes192_Wrap['parameters'] are absent
aes256_Wrap = AlgorithmIdentifier()
aes256_Wrap['algorithm'] = id_sha256
# aes256_Wrap['parameters'] are absent
tdes_Wrap = AlgorithmIdentifier()
tdes_Wrap['algorithm'] = id_alg_CMS3DESwrap
tdes_Wrap['parameters'] = univ.Null("")
camellia128_Wrap = AlgorithmIdentifier()
camellia128_Wrap['algorithm'] = id_camellia128_Wrap
# camellia128_Wrap['parameters'] are absent
camellia192_Wrap = AlgorithmIdentifier()
camellia192_Wrap['algorithm'] = id_camellia192_Wrap
# camellia192_Wrap['parameters'] are absent
camellia256_Wrap = AlgorithmIdentifier()
camellia256_Wrap['algorithm'] = id_camellia256_Wrap
# camellia256_Wrap['parameters'] are absent
# Update the Algorithm Identifier map in rfc5280.py.
# Note that the ones that must not have parameters are not added to the map.
_algorithmIdentifierMapUpdate = {
id_rsa_kem: GenericHybridParameters(),
id_kem_rsa: RsaKemParameters(),
id_kdf_kdf2: KDF2_HashFunction(),
id_kdf_kdf3: KDF3_HashFunction(),
id_sha1: univ.Null(),
id_sha224: univ.Null(),
id_sha256: univ.Null(),
id_sha384: univ.Null(),
id_sha512: univ.Null(),
id_alg_CMS3DESwrap: univ.Null(),
}
rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
```
#### File: requests_oauthlib/compliance_fixes/fitbit.py
```python
from json import loads, dumps
from oauthlib.common import to_unicode
def fitbit_compliance_fix(session):
def _missing_error(r):
token = loads(r.text)
if "errors" in token:
# Set the error to the first one we have
token["error"] = token["errors"][0]["errorType"]
r._content = to_unicode(dumps(token)).encode("UTF-8")
return r
session.register_compliance_hook("access_token_response", _missing_error)
session.register_compliance_hook("refresh_token_response", _missing_error)
return session
```
#### File: requests_oauthlib/compliance_fixes/instagram.py
```python
try:
from urlparse import urlparse, parse_qs
except ImportError:
from urllib.parse import urlparse, parse_qs
from oauthlib.common import add_params_to_uri
def instagram_compliance_fix(session):
def _non_compliant_param_name(url, headers, data):
# If the user has already specified the token in the URL
# then there's nothing to do.
# If the specified token is different from ``session.access_token``,
# we assume the user intends to override the access token.
url_query = dict(parse_qs(urlparse(url).query))
token = url_query.get("access_token")
if token:
# Nothing to do, just return.
return url, headers, data
token = [("access_token", session.access_token)]
url = add_params_to_uri(url, token)
return url, headers, data
session.register_compliance_hook("protected_request", _non_compliant_param_name)
return session
```
#### File: requests_oauthlib/compliance_fixes/mailchimp.py
```python
import json
from oauthlib.common import to_unicode
def mailchimp_compliance_fix(session):
def _null_scope(r):
token = json.loads(r.text)
if "scope" in token and token["scope"] is None:
token.pop("scope")
r._content = to_unicode(json.dumps(token)).encode("utf-8")
return r
def _non_zero_expiration(r):
token = json.loads(r.text)
if "expires_in" in token and token["expires_in"] == 0:
token["expires_in"] = 3600
r._content = to_unicode(json.dumps(token)).encode("utf-8")
return r
session.register_compliance_hook("access_token_response", _null_scope)
session.register_compliance_hook("access_token_response", _non_zero_expiration)
return session
```
#### File: site-packages/rsa/_compat.py
```python
from struct import pack
def byte(num: int) -> bytes:
"""
Converts a number between 0 and 255 (both inclusive) to a base-256 (byte)
representation.
:param num:
An unsigned integer between 0 and 255 (both inclusive).
:returns:
A single byte.
"""
return pack("B", num)
def xor_bytes(b1: bytes, b2: bytes) -> bytes:
"""
Returns the bitwise XOR result between two bytes objects, b1 ^ b2.
Bitwise XOR operation is commutative, so order of parameters doesn't
generate different results. If parameters have different length, extra
length of the largest one is ignored.
:param b1:
First bytes object.
:param b2:
Second bytes object.
:returns:
Bytes object, result of XOR operation.
"""
return bytes(x ^ y for x, y in zip(b1, b2))
```
#### File: wrapper/locust_as_library/locust_as_library.py
```python
import gevent
from locust.env import Environment
from locust.stats import stats_printer, stats_history
from locust.log import setup_logging
setup_logging("INFO", None)
def create_env(user_class, ip_address="127.0.0.1"):
env = Environment(user_classes=[user_class])
env.create_local_runner()
env.create_web_ui(ip_address, 8089)
gevent.spawn(stats_printer(env.stats))
gevent.spawn(stats_history, env.runner)
return env
def start_test(env, user_count=1, spawn_rate=10, test_time=60):
env.runner.start(user_count, spawn_rate=spawn_rate)
gevent.spawn_later(test_time, lambda: env.runner.quit())
env.runner.greenlet.join()
env.web_ui.stop()
```
#### File: test/locust_as_library_test/locust_as_library_test.py
```python
from locust import HttpUser, task, between
from load_testing_je import create_env
from load_testing_je import start_test
class User(HttpUser):
wait_time = between(1, 3)
host = "https://docs.locust.io"
@task
def my_task(self):
self.client.get("/")
@task
def task_404(self):
self.client.get("/non-existing-path")
start_test(create_env(User))
```
#### File: writers/html5_polyglot/__init__.py
```python
"""
Plain HyperText Markup Language document tree Writer.
The output conforms to the `HTML 5` specification.
The cascading style sheet "minimal.css" is required for proper viewing,
the style sheet "plain.css" improves reading experience.
"""
__docformat__ = 'reStructuredText'
import os.path
import docutils
from docutils import frontend, nodes, writers, io
from docutils.transforms import writer_aux
from docutils.writers import _html_base
class Writer(writers._html_base.Writer):
supported = ('html', 'html5', 'html4', 'xhtml', 'xhtml10')
"""Formats this writer supports."""
default_stylesheets = ['minimal.css', 'plain.css']
default_stylesheet_dirs = ['.', os.path.abspath(os.path.dirname(__file__))]
default_template = 'template.txt'
default_template_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), default_template)
settings_spec = (
'HTML-Specific Options',
None,
(('Specify the template file (UTF-8 encoded). Default is "%s".'
% default_template_path,
['--template'],
{'default': default_template_path, 'metavar': '<file>'}),
('Comma separated list of stylesheet URLs. '
'Overrides previous --stylesheet and --stylesheet-path settings.',
['--stylesheet'],
{'metavar': '<URL[,URL,...]>', 'overrides': 'stylesheet_path',
'validator': frontend.validate_comma_separated_list}),
('Comma separated list of stylesheet paths. '
'Relative paths are expanded if a matching file is found in '
'the --stylesheet-dirs. With --link-stylesheet, '
'the path is rewritten relative to the output HTML file. '
'Default: "%s"' % ','.join(default_stylesheets),
['--stylesheet-path'],
{'metavar': '<file[,file,...]>', 'overrides': 'stylesheet',
'validator': frontend.validate_comma_separated_list,
'default': default_stylesheets}),
('Embed the stylesheet(s) in the output HTML file. The stylesheet '
'files must be accessible during processing. This is the default.',
['--embed-stylesheet'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Link to the stylesheet(s) in the output HTML file. '
'Default: embed stylesheets.',
['--link-stylesheet'],
{'dest': 'embed_stylesheet', 'action': 'store_false'}),
('Comma-separated list of directories where stylesheets are found. '
'Used by --stylesheet-path when expanding relative path arguments. '
'Default: "%s"' % default_stylesheet_dirs,
['--stylesheet-dirs'],
{'metavar': '<dir[,dir,...]>',
'validator': frontend.validate_comma_separated_list,
'default': default_stylesheet_dirs}),
('Specify the initial header level. Default is 1 for "<h1>". '
'Does not affect document title & subtitle (see --no-doc-title).',
['--initial-header-level'],
{'choices': '1 2 3 4 5 6'.split(), 'default': '1',
'metavar': '<level>'}),
('Format for footnote references: one of "superscript" or '
'"brackets". Default is "brackets".',
['--footnote-references'],
{'choices': ['superscript', 'brackets'], 'default': 'brackets',
'metavar': '<format>',
'overrides': 'trim_footnote_reference_space'}),
('Format for block quote attributions: one of "dash" (em-dash '
'prefix), "parentheses"/"parens", or "none". Default is "dash".',
['--attribution'],
{'choices': ['dash', 'parentheses', 'parens', 'none'],
'default': 'dash', 'metavar': '<format>'}),
('Remove extra vertical whitespace between items of "simple" bullet '
'lists and enumerated lists. Default: enabled.',
['--compact-lists'],
{'default': True, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compact simple bullet and enumerated lists.',
['--no-compact-lists'],
{'dest': 'compact_lists', 'action': 'store_false'}),
('Remove extra vertical whitespace between items of simple field '
'lists. Default: enabled.',
['--compact-field-lists'],
{'default': True, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compact simple field lists.',
['--no-compact-field-lists'],
{'dest': 'compact_field_lists', 'action': 'store_false'}),
('Added to standard table classes. '
'Defined styles: borderless, booktabs, '
'align-left, align-center, align-right, colwidths-auto. '
'Default: ""',
['--table-style'],
{'default': ''}),
('Math output format (one of "MathML", "HTML", "MathJax", '
'or "LaTeX") and option(s). '
'Default: "HTML math.css"',
['--math-output'],
{'default': 'HTML math.css'}),
('Prepend an XML declaration. (Thwarts HTML5 conformance.) '
'Default: False',
['--xml-declaration'],
{'default': False, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Omit the XML declaration.',
['--no-xml-declaration'],
{'dest': 'xml_declaration', 'action': 'store_false'}),
('Obfuscate email addresses to confuse harvesters while still '
'keeping email links usable with standards-compliant browsers.',
['--cloak-email-addresses'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),))
config_section = 'html5 writer'
def __init__(self):
self.parts = {}
self.translator_class = HTMLTranslator
class HTMLTranslator(writers._html_base.HTMLTranslator):
"""
This writer generates `polyglot markup`: HTML5 that is also valid XML.
Safe subclassing: when overriding, treat ``visit_*`` and ``depart_*``
methods as a unit to prevent breaks due to internal changes. See the
docstring of docutils.writers._html_base.HTMLTranslator for details
and examples.
"""
# <acronym> tag obsolete in HTML5. Use the <abbr> tag instead.
def visit_acronym(self, node):
# @@@ implementation incomplete ("title" attribute)
self.body.append(self.starttag(node, 'abbr', ''))
def depart_acronym(self, node):
self.body.append('</abbr>')
# no standard meta tag name in HTML5, use separate "author" meta tags
# https://www.w3.org/TR/html5/document-metadata.html#standard-metadata-names
def visit_authors(self, node):
self.visit_docinfo_item(node, 'authors', meta=False)
for subnode in node:
self.add_meta('<meta name="author" content="%s" />\n' %
self.attval(subnode.astext()))
def depart_authors(self, node):
self.depart_docinfo_item()
# no standard meta tag name in HTML5, use dcterms.rights
# see https://wiki.whatwg.org/wiki/MetaExtensions
def visit_copyright(self, node):
self.visit_docinfo_item(node, 'copyright', meta=False)
self.add_meta('<meta name="dcterms.rights" content="%s" />\n'
% self.attval(node.astext()))
def depart_copyright(self, node):
self.depart_docinfo_item()
# no standard meta tag name in HTML5, use dcterms.date
def visit_date(self, node):
self.visit_docinfo_item(node, 'date', meta=False)
self.add_meta('<meta name="dcterms.date" content="%s" />\n'
% self.attval(node.astext()))
def depart_date(self, node):
self.depart_docinfo_item()
# TODO: use HTML5 <footer> element?
# def visit_footer(self, node):
# def depart_footer(self, node):
# TODO: use the new HTML5 element <aside>? (Also for footnote text)
# def visit_footnote(self, node):
# def depart_footnote(self, node):
# Meta tags: 'lang' attribute replaced by 'xml:lang' in XHTML 1.1
# HTML5/polyglot recommends using both
def visit_meta(self, node):
if node.hasattr('lang'):
node['xml:lang'] = node['lang']
# del(node['lang'])
meta = self.emptytag(node, 'meta', **node.non_default_attributes())
self.add_meta(meta)
def depart_meta(self, node):
pass
# no standard meta tag name in HTML5
def visit_organization(self, node):
self.visit_docinfo_item(node, 'organization', meta=False)
def depart_organization(self, node):
self.depart_docinfo_item()
# TODO: use the new HTML5 element <section>?
# def visit_section(self, node):
# def depart_section(self, node):
```
#### File: docutils/writers/pseudoxml.py
```python
__docformat__ = 'reStructuredText'
from docutils import writers
class Writer(writers.Writer):
supported = ('pprint', 'pformat', 'pseudoxml')
"""Formats this writer supports."""
config_section = 'pseudoxml writer'
config_section_dependencies = ('writers',)
output = None
"""Final translated form of `document`."""
def translate(self):
self.output = self.document.pformat()
def supports(self, format):
"""This writer supports all format-specific elements."""
return True
```
#### File: site-packages/JELogSystem/Log_System.py
```python
import datetime
class Log_System():
'''
預設廣播等級 : 3
預設紀錄時間
Normal : 0
Debug : Debug 1
Info : Info 2
Warning : 3
Error : 4
Critical : 5
'''
def __init__(self):
self.Normal_Lv = 0
self.Debug_Lv = 1
self.Info_Lv = 2
self.Warning_Lv = 3
self.Error_Lv = 4
self.Critical_Lv = 5
self.BoardCast_Lv = 3
self.Time = True
# ----------------------------------------------------------------------------------------------
'''
Log 種類
Normal : 普通訊息
Debug : Debug 用訊息
Info : Info 特殊訊息
Warning : 警告訊息
Error : 錯誤訊息
Critical : 嚴重錯誤訊息
'''
def Normal(self, *args, Except=False):
self.State(self.Normal_Lv, self.BoardCast_Lv, "Log", 'Normal', Except, *args)
def Debug(self, *args, Except=False):
self.State(self.Debug_Lv, self.BoardCast_Lv, "Log", 'Debug', Except, *args)
def Info(self, *args, Except=False):
self.State(self.Info_Lv, self.BoardCast_Lv, "Log", 'Info', Except, *args)
def Warning(self, *args, Except=False):
self.State(self.Warning_Lv, self.BoardCast_Lv, "Log", 'Warning', Except, *args)
def Error(self, *args, Except=False):
self.State(self.Error_Lv, self.BoardCast_Lv, "Log", 'Error', Except, *args)
def Critical(self, *args, Except=False):
self.State(self.Critical_Lv, self.BoardCast_Lv, "Log", 'Critical', Except, *args)
# ----------------------------------------------------------------------------------------------
# 設置需要廣播的等級
def Set_BoardCast_Lv(self, Lv):
if (Lv <= -1):
self.BoardCast_Lv = 3
else:
self.BoardCast_Lv = Lv
# 設置是否顯示時間
def Set_Time_Able(self, Time_Able):
self.Time = Time_Able
# ----------------------------------------------------------------------------------------------
# 用來印出消息
def State(self, LV1, LV2, LogFileName, Error, Except=False, *args):
try:
if (LV1 >= LV2 and self.Time == True):
Text = ''
Text += (datetime.datetime.now().strftime('%Y:%m:%d:%H:%M:%S') + '\t')
Text += ('\t' + str(Error) + ': ' + str(args))
print("Log in file "+Text)
self.Save_Log(Text, LogFileName)
elif (self.Critical_Lv >= self.BoardCast_Lv):
print("Not log in file ",str(Error) + ': ' + str(args))
except Exception as e:
if (Except and LV1 >= LV2):
print(e)
# ----------------------------------------------------------------------------------------------
'''
Log出消息
'''
def Save_Log(self, Error_Text, LogName):
with open(LogName, 'a')as File:
File.write(Error_Text + '\n')
def Clean_Log(self, LogName):
with open(LogName, 'w')as File:
File.write('')
```
#### File: site-packages/rfc3986/misc.py
```python
import re
from . import abnf_regexp
# These are enumerated for the named tuple used as a superclass of
# URIReference
URI_COMPONENTS = ['scheme', 'authority', 'path', 'query', 'fragment']
important_characters = {
'generic_delimiters': abnf_regexp.GENERIC_DELIMITERS,
'sub_delimiters': abnf_regexp.SUB_DELIMITERS,
# We need to escape the '*' in this case
're_sub_delimiters': abnf_regexp.SUB_DELIMITERS_RE,
'unreserved_chars': abnf_regexp.UNRESERVED_CHARS,
# We need to escape the '-' in this case:
're_unreserved': abnf_regexp.UNRESERVED_RE,
}
# For details about delimiters and reserved characters, see:
# http://tools.ietf.org/html/rfc3986#section-2.2
GENERIC_DELIMITERS = abnf_regexp.GENERIC_DELIMITERS_SET
SUB_DELIMITERS = abnf_regexp.SUB_DELIMITERS_SET
RESERVED_CHARS = abnf_regexp.RESERVED_CHARS_SET
# For details about unreserved characters, see:
# http://tools.ietf.org/html/rfc3986#section-2.3
UNRESERVED_CHARS = abnf_regexp.UNRESERVED_CHARS_SET
NON_PCT_ENCODED = abnf_regexp.NON_PCT_ENCODED_SET
URI_MATCHER = re.compile(abnf_regexp.URL_PARSING_RE)
SUBAUTHORITY_MATCHER = re.compile((
'^(?:(?P<userinfo>{0})@)?' # userinfo
'(?P<host>{1})' # host
':?(?P<port>{2})?$' # port
).format(abnf_regexp.USERINFO_RE,
abnf_regexp.HOST_PATTERN,
abnf_regexp.PORT_RE))
HOST_MATCHER = re.compile('^' + abnf_regexp.HOST_RE + '$')
IPv4_MATCHER = re.compile('^' + abnf_regexp.IPv4_RE + '$')
IPv6_MATCHER = re.compile(r'^\[' + abnf_regexp.IPv6_ADDRZ_RFC4007_RE + r'\]$')
# Used by host validator
IPv6_NO_RFC4007_MATCHER = re.compile(r'^\[%s\]$' % (
abnf_regexp.IPv6_ADDRZ_RE
))
# Matcher used to validate path components
PATH_MATCHER = re.compile(abnf_regexp.PATH_RE)
# ##################################
# Query and Fragment Matcher Section
# ##################################
QUERY_MATCHER = re.compile(abnf_regexp.QUERY_RE)
FRAGMENT_MATCHER = QUERY_MATCHER
# Scheme validation, see: http://tools.ietf.org/html/rfc3986#section-3.1
SCHEME_MATCHER = re.compile('^{0}$'.format(abnf_regexp.SCHEME_RE))
RELATIVE_REF_MATCHER = re.compile(r'^%s(\?%s)?(#%s)?$' % (
abnf_regexp.RELATIVE_PART_RE,
abnf_regexp.QUERY_RE,
abnf_regexp.FRAGMENT_RE,
))
# See http://tools.ietf.org/html/rfc3986#section-4.3
ABSOLUTE_URI_MATCHER = re.compile(r'^%s:%s(\?%s)?$' % (
abnf_regexp.COMPONENT_PATTERN_DICT['scheme'],
abnf_regexp.HIER_PART_RE,
abnf_regexp.QUERY_RE[1:-1],
))
# ###############
# IRIs / RFC 3987
# ###############
IRI_MATCHER = re.compile(abnf_regexp.URL_PARSING_RE, re.UNICODE)
ISUBAUTHORITY_MATCHER = re.compile((
u'^(?:(?P<userinfo>{0})@)?' # iuserinfo
u'(?P<host>{1})' # ihost
u':?(?P<port>{2})?$' # port
).format(abnf_regexp.IUSERINFO_RE,
abnf_regexp.IHOST_RE,
abnf_regexp.PORT_RE), re.UNICODE)
# Path merger as defined in http://tools.ietf.org/html/rfc3986#section-5.2.3
def merge_paths(base_uri, relative_path):
"""Merge a base URI's path with a relative URI's path."""
if base_uri.path is None and base_uri.authority is not None:
return '/' + relative_path
else:
path = base_uri.path or ''
index = path.rfind('/')
return path[:index] + '/' + relative_path
UseExisting = object()
```
#### File: OpenGLWrapper_JE/Core/Core.py
```python
import datetime
from Models.Window.GLUT_Window import GLUT_Window
from Models.Window.GLFW_Window import GLFW_Window
class Core:
def __init__(self):
try:
self.GLUT_Window = GLUT_Window()
self.GLFW_Window = GLFW_Window()
except Exception as Error:
raise Error
print(datetime.datetime.now(), self.__class__, 'Ready', sep=' ')
```
#### File: Models/Event/Observer.py
```python
class Base_Observer:
def __init__(self):
pass
class Observer:
def register(self, listener):
raise NotImplementedError("Must subclass me")
def deregister(self, listener):
raise NotImplementedError("Must subclass me")
def notify_listeners(self, event):
raise NotImplementedError("Must subclass me")
class Listener:
def __init__(self, name, Object):
self.name = name
Object.register(self)
def notify(self, event):
print(self.name, "received event", event)
class Object(Observer):
def __init__(self):
self.listeners = []
self.data = None
def action(self, event):
self.data = event
return self.data
def register(self, listener):
self.listeners.append(listener)
def deregister(self, listener):
self.listeners.remove(listener)
def notify_listeners(self, event):
for listener in self.listeners:
listener.notify(event)
```
#### File: OpenGL/arrays/nones.py
```python
REGISTRY_NAME = 'nones'
import logging
_log = logging.getLogger( 'OpenGL.arrays.nones' )
from OpenGL import acceleratesupport
NoneHandler = None
if acceleratesupport.ACCELERATE_AVAILABLE:
try:
from OpenGL_accelerate.nones_formathandler import NoneHandler
except ImportError as err:
_log.warning(
"Unable to load nones_formathandler accelerator from OpenGL_accelerate"
)
if NoneHandler is None:
from OpenGL.arrays import formathandler
class NoneHandler( formathandler.FormatHandler ):
"""Numpy-specific data-type handler for OpenGL"""
HANDLED_TYPES = (type(None), )
def from_param( self, value, typeCode=None ):
"""Convert to a ctypes pointer value"""
return None
def dataPointer( self, value ):
"""return long for pointer value"""
return None
def voidDataPointer( cls, value ):
"""Given value in a known data-pointer type, return void_p for pointer"""
return None
def asArray( self, value, typeCode=None ):
"""Given a value, convert to array representation"""
return None
def arrayToGLType( self, value ):
"""Given a value, guess OpenGL type of the corresponding pointer"""
raise TypeError( """Can't guess type of a NULL pointer""" )
def arraySize( self, value, typeCode = None ):
"""Given a data-value, calculate dimensions for the array"""
return 0
def arrayByteCount( self, value, typeCode = None ):
"""Given a data-value, calculate number of bytes required to represent"""
return 0
def zeros( self, shape, typeCode= None ):
"""Create an array of given shape with given typeCode"""
raise TypeError( """Can't create NULL pointer filled with values""" )
def ones( self, shape, typeCode= None ):
"""Create an array of given shape with given typeCode"""
raise TypeError( """Can't create NULL pointer filled with values""" )
def unitSize( self, value, typeCode=None ):
"""Determine unit size of an array (if possible)"""
raise TypeError( """Can't determine unit size of a null pointer""" )
def dimensions( self, value, typeCode=None ):
"""Determine dimensions of the passed array value (if possible)"""
return (0,)
```
#### File: OpenGL/EGL/debug.py
```python
from OpenGL.EGL import *
import itertools
def eglErrorName(value):
"""Returns error constant if known, otherwise returns value"""
return KNOWN_ERRORS.get(value, value)
KNOWN_ERRORS = {
EGL_SUCCESS: EGL_SUCCESS,
EGL_NOT_INITIALIZED: EGL_NOT_INITIALIZED,
EGL_BAD_ACCESS: EGL_BAD_ACCESS,
EGL_BAD_ALLOC: EGL_BAD_ALLOC,
EGL_BAD_ATTRIBUTE: EGL_BAD_ATTRIBUTE,
EGL_BAD_CONTEXT: EGL_BAD_CONTEXT,
EGL_BAD_CONFIG: EGL_BAD_CONFIG,
EGL_BAD_CURRENT_SURFACE: EGL_BAD_CURRENT_SURFACE,
EGL_BAD_DISPLAY: EGL_BAD_DISPLAY,
EGL_BAD_SURFACE: EGL_BAD_SURFACE,
EGL_BAD_MATCH: EGL_BAD_MATCH,
EGL_BAD_PARAMETER: EGL_BAD_PARAMETER,
EGL_BAD_NATIVE_PIXMAP: EGL_BAD_NATIVE_PIXMAP,
EGL_BAD_NATIVE_WINDOW: EGL_BAD_NATIVE_WINDOW,
EGL_CONTEXT_LOST: EGL_CONTEXT_LOST,
}
def write_ppm(buf, filename):
"""Write height * width * 3-component buffer as ppm to filename
This lets us write a simple image format without
using any libraries that can be viewed on most
linux workstations.
"""
with open(filename, "w") as f:
h, w, c = buf.shape
print("P3", file=f)
print("# ascii ppm file created by pyopengl", file=f)
print("%i %i" % (w, h), file=f)
print("255", file=f)
for y in range(h - 1, -1, -1):
for x in range(w):
pixel = buf[y, x]
l = " %3d %3d %3d" % (pixel[0], pixel[1], pixel[2])
f.write(l)
f.write("\n")
def debug_config(display, config):
"""Get debug display for the given configuration"""
result = {}
value = EGLint()
for attr in CONFIG_ATTRS:
if not eglGetConfigAttrib(display, config, attr, value):
log.warning("Failed to get attribute %s from config", attr)
continue
if attr in BITMASK_FIELDS:
attr_value = {}
for subattr in BITMASK_FIELDS[attr]:
if value.value & subattr:
attr_value[subattr.name] = True
else:
attr_value = value.value
result[attr.name] = attr_value
return result
def debug_configs(display, configs=None, max_count=256):
"""Present a formatted list of configs for the display"""
if configs is None:
configs = (EGLConfig * max_count)()
num_configs = EGLint()
eglGetConfigs(display, configs, max_count, num_configs)
if not num_configs.value:
return []
configs = configs[: num_configs.value]
debug_configs = [debug_config(display, cfg) for cfg in configs]
return debug_configs
SURFACE_TYPE_BITS = [
EGL_MULTISAMPLE_RESOLVE_BOX_BIT,
EGL_PBUFFER_BIT,
EGL_PIXMAP_BIT,
EGL_SWAP_BEHAVIOR_PRESERVED_BIT,
EGL_VG_ALPHA_FORMAT_PRE_BIT,
EGL_VG_COLORSPACE_LINEAR_BIT,
EGL_WINDOW_BIT,
]
RENDERABLE_TYPE_BITS = [
EGL_OPENGL_BIT,
EGL_OPENGL_ES_BIT,
EGL_OPENGL_ES2_BIT,
EGL_OPENGL_ES3_BIT,
EGL_OPENVG_BIT,
]
CAVEAT_BITS = [
EGL_NONE,
EGL_SLOW_CONFIG,
EGL_NON_CONFORMANT_CONFIG,
]
TRANSPARENT_BITS = [
EGL_NONE,
EGL_TRANSPARENT_RGB,
]
CONFIG_ATTRS = [
EGL_CONFIG_ID,
EGL_RED_SIZE,
EGL_GREEN_SIZE,
EGL_BLUE_SIZE,
EGL_DEPTH_SIZE,
EGL_ALPHA_SIZE,
EGL_ALPHA_MASK_SIZE,
EGL_BUFFER_SIZE,
EGL_STENCIL_SIZE,
EGL_BIND_TO_TEXTURE_RGB,
EGL_BIND_TO_TEXTURE_RGBA,
EGL_COLOR_BUFFER_TYPE,
EGL_CONFIG_CAVEAT,
EGL_CONFORMANT,
EGL_LEVEL,
EGL_LUMINANCE_SIZE,
EGL_MAX_PBUFFER_WIDTH,
EGL_MAX_PBUFFER_HEIGHT,
EGL_MAX_PBUFFER_PIXELS,
EGL_MIN_SWAP_INTERVAL,
EGL_MAX_SWAP_INTERVAL,
EGL_NATIVE_RENDERABLE,
EGL_NATIVE_VISUAL_ID,
EGL_NATIVE_VISUAL_TYPE,
EGL_RENDERABLE_TYPE,
EGL_SAMPLE_BUFFERS,
EGL_SAMPLES,
EGL_SURFACE_TYPE,
EGL_TRANSPARENT_TYPE,
EGL_TRANSPARENT_RED_VALUE,
EGL_TRANSPARENT_GREEN_VALUE,
EGL_TRANSPARENT_BLUE_VALUE,
]
BITMASK_FIELDS = dict(
[
(EGL_SURFACE_TYPE, SURFACE_TYPE_BITS),
(EGL_RENDERABLE_TYPE, RENDERABLE_TYPE_BITS),
(EGL_CONFORMANT, RENDERABLE_TYPE_BITS),
(EGL_CONFIG_CAVEAT, CAVEAT_BITS),
(EGL_TRANSPARENT_TYPE, TRANSPARENT_BITS),
]
)
def bit_renderer(bit):
def render(value):
if bit.name in value:
return " Y"
else:
return " ."
return render
CONFIG_FORMAT = [
(EGL_CONFIG_ID, "0x%x", "id", "cfg"),
(EGL_BUFFER_SIZE, "%i", "sz", "bf"),
(EGL_LEVEL, "%i", "l", "lv"),
(EGL_RED_SIZE, "%i", "r", "cbuf"),
(EGL_GREEN_SIZE, "%i", "g", "cbuf"),
(EGL_BLUE_SIZE, "%i", "b", "cbuf"),
(EGL_ALPHA_SIZE, "%i", "a", "cbuf"),
(EGL_DEPTH_SIZE, "%i", "th", "dp"),
(EGL_STENCIL_SIZE, "%i", "t", "s"),
(EGL_SAMPLES, "%i", "ns", "mult"),
(EGL_SAMPLE_BUFFERS, "%i", "bu", "mult"),
(EGL_NATIVE_VISUAL_ID, "0x%x", "id", "visual"),
(EGL_RENDERABLE_TYPE, bit_renderer(EGL_OPENGL_BIT), "gl", "render"),
(EGL_RENDERABLE_TYPE, bit_renderer(EGL_OPENGL_ES_BIT), "es", "render"),
(EGL_RENDERABLE_TYPE, bit_renderer(EGL_OPENGL_ES2_BIT), "e2", "render"),
(EGL_RENDERABLE_TYPE, bit_renderer(EGL_OPENGL_ES3_BIT), "e3", "render"),
(EGL_RENDERABLE_TYPE, bit_renderer(EGL_OPENVG_BIT), "vg", "render"),
(EGL_SURFACE_TYPE, bit_renderer(EGL_WINDOW_BIT), "wn", "surface"),
(EGL_SURFACE_TYPE, bit_renderer(EGL_PBUFFER_BIT), "pb", "surface"),
(EGL_SURFACE_TYPE, bit_renderer(EGL_PIXMAP_BIT), "px", "surface"),
]
def format_debug_configs(debug_configs, formats=CONFIG_FORMAT):
"""Format config for compact debugging display
Produces a config summary display for a set of
debug_configs as a text-mode table.
Uses `formats` (default `CONFIG_FORMAT`) to determine
which fields are extracted and how they are formatted
along with the column/subcolum set to be rendered in
the overall header.
returns formatted ASCII table for display in debug
logs or utilities
"""
columns = []
for (key, format, subcol, col) in formats:
column = []
max_width = 0
for row in debug_configs:
if isinstance(row, EGLConfig):
raise TypeError(row, "Call debug_config(display,config)")
try:
value = row[key.name]
except KeyError:
formatted = "_"
else:
if isinstance(format, str):
formatted = format % (value)
else:
formatted = format(value)
max_width = max((len(formatted), max_width))
column.append(formatted)
columns.append(
{
"rows": column,
"key": key,
"format": format,
"subcol": subcol,
"col": col,
"width": max_width,
}
)
headers = []
subheaders = []
rows = [headers, subheaders]
last_column = None
last_column_width = 0
for header, subcols in itertools.groupby(columns, lambda x: x["col"]):
subcols = list(subcols)
width = sum([col["width"] for col in subcols]) + (len(subcols) - 1)
headers.append(header.center(width, ".")[:width])
for column in columns:
subheaders.append(column["subcol"].rjust(column["width"])[: column["width"]])
rows.extend(
zip(*[[v.rjust(col["width"], " ") for v in col["rows"]] for col in columns])
)
return "\n".join([" ".join(row) for row in rows])
```
#### File: GL/AMD/framebuffer_sample_positions.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.AMD.framebuffer_sample_positions import *
from OpenGL.raw.GL.AMD.framebuffer_sample_positions import _EXTENSION_NAME
def glInitFramebufferSamplePositionsAMD():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/AMD/occlusion_query_event.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.AMD.occlusion_query_event import *
from OpenGL.raw.GL.AMD.occlusion_query_event import _EXTENSION_NAME
def glInitOcclusionQueryEventAMD():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/APPLE/texture_range.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.APPLE.texture_range import *
from OpenGL.raw.GL.APPLE.texture_range import _EXTENSION_NAME
def glInitTextureRangeAPPLE():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glTextureRangeAPPLE.pointer size not checked against length
glTextureRangeAPPLE=wrapper.wrapper(glTextureRangeAPPLE).setInputArraySize(
'pointer', None
)
glGetTexParameterPointervAPPLE=wrapper.wrapper(glGetTexParameterPointervAPPLE).setOutput(
'params',size=(1,),orPassIn=True
)
### END AUTOGENERATED SECTION
```
#### File: GL/APPLE/vertex_array_object.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.APPLE.vertex_array_object import *
from OpenGL.raw.GL.APPLE.vertex_array_object import _EXTENSION_NAME
def glInitVertexArrayObjectAPPLE():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glDeleteVertexArraysAPPLE.arrays size not checked against n
glDeleteVertexArraysAPPLE=wrapper.wrapper(glDeleteVertexArraysAPPLE).setInputArraySize(
'arrays', None
)
glGenVertexArraysAPPLE=wrapper.wrapper(glGenVertexArraysAPPLE).setOutput(
'arrays',size=lambda x:(x,),pnameArg='n',orPassIn=True
)
### END AUTOGENERATED SECTION
```
#### File: GL/APPLE/vertex_array_range.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.APPLE.vertex_array_range import *
from OpenGL.raw.GL.APPLE.vertex_array_range import _EXTENSION_NAME
def glInitVertexArrayRangeAPPLE():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glVertexArrayRangeAPPLE=wrapper.wrapper(glVertexArrayRangeAPPLE).setOutput(
'pointer',size=lambda x:(x,),pnameArg='length',orPassIn=True
)
glFlushVertexArrayRangeAPPLE=wrapper.wrapper(glFlushVertexArrayRangeAPPLE).setOutput(
'pointer',size=lambda x:(x,),pnameArg='length',orPassIn=True
)
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/clip_control.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.clip_control import *
from OpenGL.raw.GL.ARB.clip_control import _EXTENSION_NAME
def glInitClipControlARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/compressed_texture_pixel_storage.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.compressed_texture_pixel_storage import *
from OpenGL.raw.GL.ARB.compressed_texture_pixel_storage import _EXTENSION_NAME
def glInitCompressedTexturePixelStorageARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/compute_variable_group_size.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.compute_variable_group_size import *
from OpenGL.raw.GL.ARB.compute_variable_group_size import _EXTENSION_NAME
def glInitComputeVariableGroupSizeARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/depth_clamp.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.depth_clamp import *
from OpenGL.raw.GL.ARB.depth_clamp import _EXTENSION_NAME
def glInitDepthClampARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/draw_buffers.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.draw_buffers import *
from OpenGL.raw.GL.ARB.draw_buffers import _EXTENSION_NAME
def glInitDrawBuffersARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glDrawBuffersARB.bufs size not checked against n
glDrawBuffersARB=wrapper.wrapper(glDrawBuffersARB).setInputArraySize(
'bufs', None
)
### END AUTOGENERATED SECTION
from OpenGL.lazywrapper import lazy as _lazy
@_lazy( glDrawBuffersARB )
def glDrawBuffersARB( baseOperation, n=None, bufs=None ):
"""glDrawBuffersARB( bufs ) -> bufs
Wrapper will calculate n from dims of bufs if only
one argument is provided...
"""
if bufs is None:
bufs = n
n = None
bufs = arrays.GLenumArray.asArray( bufs )
if n is None:
n = arrays.GLenumArray.arraySize( bufs )
return baseOperation( n,bufs )
```
#### File: GL/ARB/half_float_vertex.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.half_float_vertex import *
from OpenGL.raw.GL.ARB.half_float_vertex import _EXTENSION_NAME
def glInitHalfFloatVertexARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/invalidate_subdata.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.invalidate_subdata import *
from OpenGL.raw.GL.ARB.invalidate_subdata import _EXTENSION_NAME
def glInitInvalidateSubdataARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glInvalidateFramebuffer.attachments size not checked against numAttachments
glInvalidateFramebuffer=wrapper.wrapper(glInvalidateFramebuffer).setInputArraySize(
'attachments', None
)
# INPUT glInvalidateSubFramebuffer.attachments size not checked against numAttachments
glInvalidateSubFramebuffer=wrapper.wrapper(glInvalidateSubFramebuffer).setInputArraySize(
'attachments', None
)
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/map_buffer_alignment.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.map_buffer_alignment import *
from OpenGL.raw.GL.ARB.map_buffer_alignment import _EXTENSION_NAME
def glInitMapBufferAlignmentARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/map_buffer_range.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.map_buffer_range import *
from OpenGL.raw.GL.ARB.map_buffer_range import _EXTENSION_NAME
def glInitMapBufferRangeARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/multisample.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.multisample import *
from OpenGL.raw.GL.ARB.multisample import _EXTENSION_NAME
def glInitMultisampleARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/occlusion_query2.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.occlusion_query2 import *
from OpenGL.raw.GL.ARB.occlusion_query2 import _EXTENSION_NAME
def glInitOcclusionQuery2ARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/shader_atomic_counter_ops.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.shader_atomic_counter_ops import *
from OpenGL.raw.GL.ARB.shader_atomic_counter_ops import _EXTENSION_NAME
def glInitShaderAtomicCounterOpsARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/shader_ballot.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.shader_ballot import *
from OpenGL.raw.GL.ARB.shader_ballot import _EXTENSION_NAME
def glInitShaderBallotARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/shader_bit_encoding.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.shader_bit_encoding import *
from OpenGL.raw.GL.ARB.shader_bit_encoding import _EXTENSION_NAME
def glInitShaderBitEncodingARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/shader_clock.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.shader_clock import *
from OpenGL.raw.GL.ARB.shader_clock import _EXTENSION_NAME
def glInitShaderClockARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/shader_draw_parameters.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.shader_draw_parameters import *
from OpenGL.raw.GL.ARB.shader_draw_parameters import _EXTENSION_NAME
def glInitShaderDrawParametersARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/shader_subroutine.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.shader_subroutine import *
from OpenGL.raw.GL.ARB.shader_subroutine import _EXTENSION_NAME
def glInitShaderSubroutineARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glGetActiveSubroutineUniformiv=wrapper.wrapper(glGetActiveSubroutineUniformiv).setOutput(
'values',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetActiveSubroutineUniformName=wrapper.wrapper(glGetActiveSubroutineUniformName).setOutput(
'length',size=(1,),orPassIn=True
).setOutput(
'name',size=lambda x:(x,),pnameArg='bufsize',orPassIn=True
)
glGetActiveSubroutineName=wrapper.wrapper(glGetActiveSubroutineName).setOutput(
'length',size=(1,),orPassIn=True
).setOutput(
'name',size=lambda x:(x,),pnameArg='bufsize',orPassIn=True
)
# INPUT glUniformSubroutinesuiv.indices size not checked against count
glUniformSubroutinesuiv=wrapper.wrapper(glUniformSubroutinesuiv).setInputArraySize(
'indices', None
)
glGetUniformSubroutineuiv=wrapper.wrapper(glGetUniformSubroutineuiv).setOutput(
'params',size=(1,),orPassIn=True
)
glGetProgramStageiv=wrapper.wrapper(glGetProgramStageiv).setOutput(
'values',size=(1,),orPassIn=True
)
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/shader_texture_image_samples.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.shader_texture_image_samples import *
from OpenGL.raw.GL.ARB.shader_texture_image_samples import _EXTENSION_NAME
def glInitShaderTextureImageSamplesARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/shading_language_100.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.shading_language_100 import *
from OpenGL.raw.GL.ARB.shading_language_100 import _EXTENSION_NAME
def glInitShadingLanguage100ARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/sync.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.sync import *
from OpenGL.raw.GL.ARB.sync import _EXTENSION_NAME
def glInitSyncARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glGetInteger64v=wrapper.wrapper(glGetInteger64v).setOutput(
'data',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetSynciv=wrapper.wrapper(glGetSynciv).setOutput(
'length',size=(1,),orPassIn=True
).setOutput(
'values',size=lambda x:(x,),pnameArg='bufSize',orPassIn=True
)
### END AUTOGENERATED SECTION
from OpenGL.raw.GL._types import GLint
from OpenGL.arrays import GLintArray
def glGetSync( sync, pname, bufSize=1,length=None,values=None ):
"""Wrapper around glGetSynciv that auto-allocates buffers
sync -- the GLsync struct pointer (see glGetSynciv)
pname -- constant to retrieve (see glGetSynciv)
bufSize -- defaults to 1, maximum number of items to retrieve,
currently all constants are defined to return a single
value
length -- None or a GLint() instance (ONLY!), must be a byref()
capable object with a .value attribute which retrieves the
set value
values -- None or an array object, if None, will be a default
return-array-type of length bufSize
returns values[:length.value], i.e. an array with the values set
by the call, currently always a single-value array.
"""
if values is None:
values = GLintArray.zeros( (bufSize,) )
if length is None:
length = GLint()
glGetSynciv( sync, pname, bufSize, length, values )
written = length.value
return values[:written]
```
#### File: GL/ARB/texture_env_crossbar.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.texture_env_crossbar import *
from OpenGL.raw.GL.ARB.texture_env_crossbar import _EXTENSION_NAME
def glInitTextureEnvCrossbarARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/texture_float.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.texture_float import *
from OpenGL.raw.GL.ARB.texture_float import _EXTENSION_NAME
def glInitTextureFloatARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/texture_multisample.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.texture_multisample import *
from OpenGL.raw.GL.ARB.texture_multisample import _EXTENSION_NAME
def glInitTextureMultisampleARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glGetMultisamplefv=wrapper.wrapper(glGetMultisamplefv).setOutput(
'val',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/texture_non_power_of_two.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.texture_non_power_of_two import *
from OpenGL.raw.GL.ARB.texture_non_power_of_two import _EXTENSION_NAME
def glInitTextureNonPowerOfTwoARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/texture_rg.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.texture_rg import *
from OpenGL.raw.GL.ARB.texture_rg import _EXTENSION_NAME
def glInitTextureRgARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
from OpenGL import images as _images
_images.COMPONENT_COUNTS.update( {
GL_R16:1,
GL_R16F:1,
GL_R16I:1,
GL_R16UI:1,
GL_R32F:1,
GL_R32I:1,
GL_R32UI:1,
GL_R8:1,
GL_R8I:1,
GL_R8UI:1,
GL_RG:2,
GL_RG16:2,
GL_RG16F:2,
GL_RG16I:2,
GL_RG16UI:2,
GL_RG32F:2,
GL_RG32I:2,
GL_RG32UI:2,
GL_RG8:2,
GL_RG8I:2,
GL_RG8UI:2,
GL_RG_INTEGER:2,
})
```
#### File: GL/ARB/texture_stencil8.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.texture_stencil8 import *
from OpenGL.raw.GL.ARB.texture_stencil8 import _EXTENSION_NAME
def glInitTextureStencil8ARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/transform_feedback_instanced.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.transform_feedback_instanced import *
from OpenGL.raw.GL.ARB.transform_feedback_instanced import _EXTENSION_NAME
def glInitTransformFeedbackInstancedARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/vertex_shader.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.vertex_shader import *
from OpenGL.raw.GL.ARB.vertex_shader import _EXTENSION_NAME
def glInitVertexShaderARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glVertexAttrib1fvARB=wrapper.wrapper(glVertexAttrib1fvARB).setInputArraySize(
'v', 1
)
glVertexAttrib1svARB=wrapper.wrapper(glVertexAttrib1svARB).setInputArraySize(
'v', 1
)
glVertexAttrib1dvARB=wrapper.wrapper(glVertexAttrib1dvARB).setInputArraySize(
'v', 1
)
glVertexAttrib2fvARB=wrapper.wrapper(glVertexAttrib2fvARB).setInputArraySize(
'v', 2
)
glVertexAttrib2svARB=wrapper.wrapper(glVertexAttrib2svARB).setInputArraySize(
'v', 2
)
glVertexAttrib2dvARB=wrapper.wrapper(glVertexAttrib2dvARB).setInputArraySize(
'v', 2
)
glVertexAttrib3fvARB=wrapper.wrapper(glVertexAttrib3fvARB).setInputArraySize(
'v', 3
)
glVertexAttrib3svARB=wrapper.wrapper(glVertexAttrib3svARB).setInputArraySize(
'v', 3
)
glVertexAttrib3dvARB=wrapper.wrapper(glVertexAttrib3dvARB).setInputArraySize(
'v', 3
)
glVertexAttrib4fvARB=wrapper.wrapper(glVertexAttrib4fvARB).setInputArraySize(
'v', 4
)
glVertexAttrib4svARB=wrapper.wrapper(glVertexAttrib4svARB).setInputArraySize(
'v', 4
)
glVertexAttrib4dvARB=wrapper.wrapper(glVertexAttrib4dvARB).setInputArraySize(
'v', 4
)
glVertexAttrib4ivARB=wrapper.wrapper(glVertexAttrib4ivARB).setInputArraySize(
'v', 4
)
glVertexAttrib4bvARB=wrapper.wrapper(glVertexAttrib4bvARB).setInputArraySize(
'v', 4
)
glVertexAttrib4ubvARB=wrapper.wrapper(glVertexAttrib4ubvARB).setInputArraySize(
'v', 4
)
glVertexAttrib4usvARB=wrapper.wrapper(glVertexAttrib4usvARB).setInputArraySize(
'v', 4
)
glVertexAttrib4uivARB=wrapper.wrapper(glVertexAttrib4uivARB).setInputArraySize(
'v', 4
)
glVertexAttrib4NbvARB=wrapper.wrapper(glVertexAttrib4NbvARB).setInputArraySize(
'v', 4
)
glVertexAttrib4NsvARB=wrapper.wrapper(glVertexAttrib4NsvARB).setInputArraySize(
'v', 4
)
glVertexAttrib4NivARB=wrapper.wrapper(glVertexAttrib4NivARB).setInputArraySize(
'v', 4
)
glVertexAttrib4NubvARB=wrapper.wrapper(glVertexAttrib4NubvARB).setInputArraySize(
'v', 4
)
glVertexAttrib4NusvARB=wrapper.wrapper(glVertexAttrib4NusvARB).setInputArraySize(
'v', 4
)
glVertexAttrib4NuivARB=wrapper.wrapper(glVertexAttrib4NuivARB).setInputArraySize(
'v', 4
)
# INPUT glVertexAttribPointerARB.pointer size not checked against 'size,type,stride'
glVertexAttribPointerARB=wrapper.wrapper(glVertexAttribPointerARB).setInputArraySize(
'pointer', None
)
glGetActiveAttribARB=wrapper.wrapper(glGetActiveAttribARB).setOutput(
'length',size=(1,),orPassIn=True
).setOutput(
'name',size=lambda x:(x,),pnameArg='maxLength',orPassIn=True
).setOutput(
'size',size=(1,),orPassIn=True
).setOutput(
'type',size=(1,),orPassIn=True
)
glGetVertexAttribdvARB=wrapper.wrapper(glGetVertexAttribdvARB).setOutput(
'params',size=(4,),orPassIn=True
)
glGetVertexAttribfvARB=wrapper.wrapper(glGetVertexAttribfvARB).setOutput(
'params',size=(4,),orPassIn=True
)
glGetVertexAttribivARB=wrapper.wrapper(glGetVertexAttribivARB).setOutput(
'params',size=(4,),orPassIn=True
)
glGetVertexAttribPointervARB=wrapper.wrapper(glGetVertexAttribPointervARB).setOutput(
'pointer',size=(1,),orPassIn=True
)
### END AUTOGENERATED SECTION
from OpenGL._bytes import bytes, _NULL_8_BYTE, as_8_bit
from OpenGL.lazywrapper import lazy as _lazy
from OpenGL.GL.ARB.shader_objects import glGetObjectParameterivARB
@_lazy( glGetActiveAttribARB )
def glGetActiveAttribARB(baseOperation, program, index):
"""Retrieve the name, size and type of the uniform of the index in the program"""
max_index = int(glGetObjectParameterivARB( program, GL_OBJECT_ACTIVE_ATTRIBUTES_ARB ))
length = int(glGetObjectParameterivARB( program, GL_OBJECT_ACTIVE_ATTRIBUTE_MAX_LENGTH_ARB))
if index < max_index and index >= 0 and length > 0:
length,name,size,type = baseOperation( program, index )
if hasattr(name,'tostring'):
name = name.tostring().rstrip(b'\000')
elif hasattr(name,'value'):
name = name.value
return name,size,type
raise IndexError('index out of range from zero to %i' % (max_index - 1, ))
@_lazy( glGetAttribLocationARB )
def glGetAttribLocationARB( baseOperation, program, name ):
"""Check that name is a string with a null byte at the end of it"""
if not name:
raise ValueError( """Non-null name required""" )
name = as_8_bit( name )
if name[-1] != _NULL_8_BYTE:
name = name + _NULL_8_BYTE
return baseOperation( program, name )
```
#### File: GL/ARB/vertex_type_2_10_10_10_rev.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.vertex_type_2_10_10_10_rev import *
from OpenGL.raw.GL.ARB.vertex_type_2_10_10_10_rev import _EXTENSION_NAME
def glInitVertexType2101010RevARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glVertexAttribP1uiv=wrapper.wrapper(glVertexAttribP1uiv).setInputArraySize(
'value', 1
)
glVertexAttribP2uiv=wrapper.wrapper(glVertexAttribP2uiv).setInputArraySize(
'value', 1
)
glVertexAttribP3uiv=wrapper.wrapper(glVertexAttribP3uiv).setInputArraySize(
'value', 1
)
glVertexAttribP4uiv=wrapper.wrapper(glVertexAttribP4uiv).setInputArraySize(
'value', 1
)
glVertexP2uiv=wrapper.wrapper(glVertexP2uiv).setInputArraySize(
'value', 1
)
glVertexP3uiv=wrapper.wrapper(glVertexP3uiv).setInputArraySize(
'value', 1
)
glVertexP4uiv=wrapper.wrapper(glVertexP4uiv).setInputArraySize(
'value', 1
)
glTexCoordP1uiv=wrapper.wrapper(glTexCoordP1uiv).setInputArraySize(
'coords', 1
)
glTexCoordP2uiv=wrapper.wrapper(glTexCoordP2uiv).setInputArraySize(
'coords', 1
)
glTexCoordP3uiv=wrapper.wrapper(glTexCoordP3uiv).setInputArraySize(
'coords', 1
)
glTexCoordP4uiv=wrapper.wrapper(glTexCoordP4uiv).setInputArraySize(
'coords', 1
)
glMultiTexCoordP1uiv=wrapper.wrapper(glMultiTexCoordP1uiv).setInputArraySize(
'coords', 1
)
glMultiTexCoordP2uiv=wrapper.wrapper(glMultiTexCoordP2uiv).setInputArraySize(
'coords', 1
)
glMultiTexCoordP3uiv=wrapper.wrapper(glMultiTexCoordP3uiv).setInputArraySize(
'coords', 1
)
glMultiTexCoordP4uiv=wrapper.wrapper(glMultiTexCoordP4uiv).setInputArraySize(
'coords', 1
)
glNormalP3uiv=wrapper.wrapper(glNormalP3uiv).setInputArraySize(
'coords', 1
)
glColorP3uiv=wrapper.wrapper(glColorP3uiv).setInputArraySize(
'color', 1
)
glColorP4uiv=wrapper.wrapper(glColorP4uiv).setInputArraySize(
'color', 1
)
glSecondaryColorP3uiv=wrapper.wrapper(glSecondaryColorP3uiv).setInputArraySize(
'color', 1
)
### END AUTOGENERATED SECTION
```
#### File: GL/ATI/texture_mirror_once.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ATI.texture_mirror_once import *
from OpenGL.raw.GL.ATI.texture_mirror_once import _EXTENSION_NAME
def glInitTextureMirrorOnceATI():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/ATI/vertex_streams.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ATI.vertex_streams import *
from OpenGL.raw.GL.ATI.vertex_streams import _EXTENSION_NAME
def glInitVertexStreamsATI():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glVertexStream1svATI=wrapper.wrapper(glVertexStream1svATI).setInputArraySize(
'coords', 1
)
glVertexStream1ivATI=wrapper.wrapper(glVertexStream1ivATI).setInputArraySize(
'coords', 1
)
glVertexStream1fvATI=wrapper.wrapper(glVertexStream1fvATI).setInputArraySize(
'coords', 1
)
glVertexStream1dvATI=wrapper.wrapper(glVertexStream1dvATI).setInputArraySize(
'coords', 1
)
glVertexStream2svATI=wrapper.wrapper(glVertexStream2svATI).setInputArraySize(
'coords', 2
)
glVertexStream2ivATI=wrapper.wrapper(glVertexStream2ivATI).setInputArraySize(
'coords', 2
)
glVertexStream2fvATI=wrapper.wrapper(glVertexStream2fvATI).setInputArraySize(
'coords', 2
)
glVertexStream2dvATI=wrapper.wrapper(glVertexStream2dvATI).setInputArraySize(
'coords', 2
)
glVertexStream3svATI=wrapper.wrapper(glVertexStream3svATI).setInputArraySize(
'coords', 3
)
glVertexStream3ivATI=wrapper.wrapper(glVertexStream3ivATI).setInputArraySize(
'coords', 3
)
glVertexStream3fvATI=wrapper.wrapper(glVertexStream3fvATI).setInputArraySize(
'coords', 3
)
glVertexStream3dvATI=wrapper.wrapper(glVertexStream3dvATI).setInputArraySize(
'coords', 3
)
glVertexStream4svATI=wrapper.wrapper(glVertexStream4svATI).setInputArraySize(
'coords', 4
)
glVertexStream4ivATI=wrapper.wrapper(glVertexStream4ivATI).setInputArraySize(
'coords', 4
)
glVertexStream4fvATI=wrapper.wrapper(glVertexStream4fvATI).setInputArraySize(
'coords', 4
)
glVertexStream4dvATI=wrapper.wrapper(glVertexStream4dvATI).setInputArraySize(
'coords', 4
)
glNormalStream3bvATI=wrapper.wrapper(glNormalStream3bvATI).setInputArraySize(
'coords', 3
)
glNormalStream3svATI=wrapper.wrapper(glNormalStream3svATI).setInputArraySize(
'coords', 3
)
glNormalStream3ivATI=wrapper.wrapper(glNormalStream3ivATI).setInputArraySize(
'coords', 3
)
glNormalStream3fvATI=wrapper.wrapper(glNormalStream3fvATI).setInputArraySize(
'coords', 3
)
glNormalStream3dvATI=wrapper.wrapper(glNormalStream3dvATI).setInputArraySize(
'coords', 3
)
### END AUTOGENERATED SECTION
```
#### File: GL/DFX/tbuffer.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.DFX.tbuffer import *
from OpenGL.raw.GL.DFX.tbuffer import _EXTENSION_NAME
def glInitTbufferDFX():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES1/APPLE/texture_format_BGRA8888.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES1 import _types, _glgets
from OpenGL.raw.GLES1.APPLE.texture_format_BGRA8888 import *
from OpenGL.raw.GLES1.APPLE.texture_format_BGRA8888 import _EXTENSION_NAME
def glInitTextureFormatBgra8888APPLE():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES1/NV/fence.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES1 import _types, _glgets
from OpenGL.raw.GLES1.NV.fence import *
from OpenGL.raw.GLES1.NV.fence import _EXTENSION_NAME
def glInitFenceNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glDeleteFencesNV.fences size not checked against n
glDeleteFencesNV=wrapper.wrapper(glDeleteFencesNV).setInputArraySize(
'fences', None
)
glGenFencesNV=wrapper.wrapper(glGenFencesNV).setOutput(
'fences',size=lambda x:(x,),pnameArg='n',orPassIn=True
)
glGetFenceivNV=wrapper.wrapper(glGetFenceivNV).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
### END AUTOGENERATED SECTION
```
#### File: GLES1/OES/blend_equation_separate.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES1 import _types, _glgets
from OpenGL.raw.GLES1.OES.blend_equation_separate import *
from OpenGL.raw.GLES1.OES.blend_equation_separate import _EXTENSION_NAME
def glInitBlendEquationSeparateOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES1/OES/EGL_image.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES1 import _types, _glgets
from OpenGL.raw.GLES1.OES.EGL_image import *
from OpenGL.raw.GLES1.OES.EGL_image import _EXTENSION_NAME
def glInitEglImageOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES1/OES/query_matrix.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES1 import _types, _glgets
from OpenGL.raw.GLES1.OES.query_matrix import *
from OpenGL.raw.GLES1.OES.query_matrix import _EXTENSION_NAME
def glInitQueryMatrixOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glQueryMatrixxOES=wrapper.wrapper(glQueryMatrixxOES).setOutput(
'exponent',size=(16,),orPassIn=True
).setOutput(
'mantissa',size=(16,),orPassIn=True
)
### END AUTOGENERATED SECTION
```
#### File: GLES1/OES/required_internalformat.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES1 import _types, _glgets
from OpenGL.raw.GLES1.OES.required_internalformat import *
from OpenGL.raw.GLES1.OES.required_internalformat import _EXTENSION_NAME
def glInitRequiredInternalformatOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES1/QCOM/driver_control.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES1 import _types, _glgets
from OpenGL.raw.GLES1.QCOM.driver_control import *
from OpenGL.raw.GLES1.QCOM.driver_control import _EXTENSION_NAME
def glInitDriverControlQCOM():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glGetDriverControlsQCOM.driverControls size not checked against size
glGetDriverControlsQCOM=wrapper.wrapper(glGetDriverControlsQCOM).setInputArraySize(
'driverControls', None
)
# INPUT glGetDriverControlStringQCOM.driverControlString size not checked against bufSize
glGetDriverControlStringQCOM=wrapper.wrapper(glGetDriverControlStringQCOM).setInputArraySize(
'driverControlString', None
)
### END AUTOGENERATED SECTION
```
#### File: GLES1/QCOM/tiled_rendering.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES1 import _types, _glgets
from OpenGL.raw.GLES1.QCOM.tiled_rendering import *
from OpenGL.raw.GLES1.QCOM.tiled_rendering import _EXTENSION_NAME
def glInitTiledRenderingQCOM():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES1/VERSION/GLES1_1_0.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES1 import _types, _glgets
from OpenGL.raw.GLES1.VERSION.GLES1_1_0 import *
from OpenGL.raw.GLES1.VERSION.GLES1_1_0 import _EXTENSION_NAME
def glInitGles110VERSION():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glClipPlanef=wrapper.wrapper(glClipPlanef).setInputArraySize(
'eqn', 4
)
# INPUT glFogfv.params size not checked against 'pname'
glFogfv=wrapper.wrapper(glFogfv).setInputArraySize(
'params', None
)
glGetClipPlanef=wrapper.wrapper(glGetClipPlanef).setInputArraySize(
'equation', 4
)
glGetFloatv=wrapper.wrapper(glGetFloatv).setOutput(
'data',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetLightfv=wrapper.wrapper(glGetLightfv).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetMaterialfv=wrapper.wrapper(glGetMaterialfv).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetTexEnvfv=wrapper.wrapper(glGetTexEnvfv).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetTexParameterfv=wrapper.wrapper(glGetTexParameterfv).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
# INPUT glLightModelfv.params size not checked against 'pname'
glLightModelfv=wrapper.wrapper(glLightModelfv).setInputArraySize(
'params', None
)
# INPUT glLightfv.params size not checked against 'pname'
glLightfv=wrapper.wrapper(glLightfv).setInputArraySize(
'params', None
)
glLoadMatrixf=wrapper.wrapper(glLoadMatrixf).setInputArraySize(
'm', 16
)
# INPUT glMaterialfv.params size not checked against 'pname'
glMaterialfv=wrapper.wrapper(glMaterialfv).setInputArraySize(
'params', None
)
glMultMatrixf=wrapper.wrapper(glMultMatrixf).setInputArraySize(
'm', 16
)
# INPUT glPointParameterfv.params size not checked against 'pname'
glPointParameterfv=wrapper.wrapper(glPointParameterfv).setInputArraySize(
'params', None
)
# INPUT glTexEnvfv.params size not checked against 'pname'
glTexEnvfv=wrapper.wrapper(glTexEnvfv).setInputArraySize(
'params', None
)
# INPUT glTexParameterfv.params size not checked against 'pname'
glTexParameterfv=wrapper.wrapper(glTexParameterfv).setInputArraySize(
'params', None
)
# INPUT glBufferData.data size not checked against size
glBufferData=wrapper.wrapper(glBufferData).setInputArraySize(
'data', None
)
# INPUT glBufferSubData.data size not checked against size
glBufferSubData=wrapper.wrapper(glBufferSubData).setInputArraySize(
'data', None
)
glClipPlanex=wrapper.wrapper(glClipPlanex).setInputArraySize(
'equation', 4
)
# INPUT glColorPointer.pointer size not checked against 'size,type,stride'
glColorPointer=wrapper.wrapper(glColorPointer).setInputArraySize(
'pointer', None
)
# INPUT glCompressedTexImage2D.data size not checked against imageSize
glCompressedTexImage2D=wrapper.wrapper(glCompressedTexImage2D).setInputArraySize(
'data', None
)
# INPUT glCompressedTexSubImage2D.data size not checked against imageSize
glCompressedTexSubImage2D=wrapper.wrapper(glCompressedTexSubImage2D).setInputArraySize(
'data', None
)
# INPUT glDeleteBuffers.buffers size not checked against n
glDeleteBuffers=wrapper.wrapper(glDeleteBuffers).setInputArraySize(
'buffers', None
)
# INPUT glDeleteTextures.textures size not checked against n
glDeleteTextures=wrapper.wrapper(glDeleteTextures).setInputArraySize(
'textures', None
)
# INPUT glDrawElements.indices size not checked against 'count,type'
glDrawElements=wrapper.wrapper(glDrawElements).setInputArraySize(
'indices', None
)
# INPUT glFogxv.param size not checked against 'pname'
glFogxv=wrapper.wrapper(glFogxv).setInputArraySize(
'param', None
)
glGetBooleanv=wrapper.wrapper(glGetBooleanv).setOutput(
'data',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetBufferParameteriv=wrapper.wrapper(glGetBufferParameteriv).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetClipPlanex=wrapper.wrapper(glGetClipPlanex).setInputArraySize(
'equation', 4
)
glGenBuffers=wrapper.wrapper(glGenBuffers).setOutput(
'buffers',size=lambda x:(x,),pnameArg='n',orPassIn=True
)
glGenTextures=wrapper.wrapper(glGenTextures).setOutput(
'textures',size=lambda x:(x,),pnameArg='n',orPassIn=True
)
glGetIntegerv=wrapper.wrapper(glGetIntegerv).setOutput(
'data',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
# INPUT glGetLightxv.params size not checked against 'pname'
glGetLightxv=wrapper.wrapper(glGetLightxv).setInputArraySize(
'params', None
)
# INPUT glGetMaterialxv.params size not checked against 'pname'
glGetMaterialxv=wrapper.wrapper(glGetMaterialxv).setInputArraySize(
'params', None
)
glGetPointerv=wrapper.wrapper(glGetPointerv).setOutput(
'params',size=(1,),orPassIn=True
)
glGetTexEnviv=wrapper.wrapper(glGetTexEnviv).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
# INPUT glGetTexEnvxv.params size not checked against 'pname'
glGetTexEnvxv=wrapper.wrapper(glGetTexEnvxv).setInputArraySize(
'params', None
)
glGetTexParameteriv=wrapper.wrapper(glGetTexParameteriv).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
# INPUT glGetTexParameterxv.params size not checked against 'pname'
glGetTexParameterxv=wrapper.wrapper(glGetTexParameterxv).setInputArraySize(
'params', None
)
# INPUT glLightModelxv.param size not checked against 'pname'
glLightModelxv=wrapper.wrapper(glLightModelxv).setInputArraySize(
'param', None
)
# INPUT glLightxv.params size not checked against 'pname'
glLightxv=wrapper.wrapper(glLightxv).setInputArraySize(
'params', None
)
glLoadMatrixx=wrapper.wrapper(glLoadMatrixx).setInputArraySize(
'm', 16
)
# INPUT glMaterialxv.param size not checked against 'pname'
glMaterialxv=wrapper.wrapper(glMaterialxv).setInputArraySize(
'param', None
)
glMultMatrixx=wrapper.wrapper(glMultMatrixx).setInputArraySize(
'm', 16
)
# INPUT glNormalPointer.pointer size not checked against 'type,stride'
glNormalPointer=wrapper.wrapper(glNormalPointer).setInputArraySize(
'pointer', None
)
# INPUT glPointParameterxv.params size not checked against 'pname'
glPointParameterxv=wrapper.wrapper(glPointParameterxv).setInputArraySize(
'params', None
)
# OUTPUT glReadPixels.pixels COMPSIZE(format, type, width, height)
# INPUT glTexCoordPointer.pointer size not checked against 'size,type,stride'
glTexCoordPointer=wrapper.wrapper(glTexCoordPointer).setInputArraySize(
'pointer', None
)
# INPUT glTexEnviv.params size not checked against 'pname'
glTexEnviv=wrapper.wrapper(glTexEnviv).setInputArraySize(
'params', None
)
# INPUT glTexEnvxv.params size not checked against 'pname'
glTexEnvxv=wrapper.wrapper(glTexEnvxv).setInputArraySize(
'params', None
)
# INPUT glTexImage2D.pixels size not checked against 'format,type,width,height'
glTexImage2D=wrapper.wrapper(glTexImage2D).setInputArraySize(
'pixels', None
)
# INPUT glTexParameteriv.params size not checked against 'pname'
glTexParameteriv=wrapper.wrapper(glTexParameteriv).setInputArraySize(
'params', None
)
# INPUT glTexParameterxv.params size not checked against 'pname'
glTexParameterxv=wrapper.wrapper(glTexParameterxv).setInputArraySize(
'params', None
)
# INPUT glTexSubImage2D.pixels size not checked against 'format,type,width,height'
glTexSubImage2D=wrapper.wrapper(glTexSubImage2D).setInputArraySize(
'pixels', None
)
# INPUT glVertexPointer.pointer size not checked against 'size,type,stride'
glVertexPointer=wrapper.wrapper(glVertexPointer).setInputArraySize(
'pointer', None
)
### END AUTOGENERATED SECTION
```
#### File: GLES2/AMD/program_binary_Z400.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.AMD.program_binary_Z400 import *
from OpenGL.raw.GLES2.AMD.program_binary_Z400 import _EXTENSION_NAME
def glInitProgramBinaryZ400AMD():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/ANGLE/framebuffer_multisample.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.ANGLE.framebuffer_multisample import *
from OpenGL.raw.GLES2.ANGLE.framebuffer_multisample import _EXTENSION_NAME
def glInitFramebufferMultisampleANGLE():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/ANGLE/pack_reverse_row_order.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.ANGLE.pack_reverse_row_order import *
from OpenGL.raw.GLES2.ANGLE.pack_reverse_row_order import _EXTENSION_NAME
def glInitPackReverseRowOrderANGLE():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/ANGLE/program_binary.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.ANGLE.program_binary import *
from OpenGL.raw.GLES2.ANGLE.program_binary import _EXTENSION_NAME
def glInitProgramBinaryANGLE():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/ANGLE/texture_usage.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.ANGLE.texture_usage import *
from OpenGL.raw.GLES2.ANGLE.texture_usage import _EXTENSION_NAME
def glInitTextureUsageANGLE():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/ARM/mali_shader_binary.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.ARM.mali_shader_binary import *
from OpenGL.raw.GLES2.ARM.mali_shader_binary import _EXTENSION_NAME
def glInitMaliShaderBinaryARM():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/ARM/shader_framebuffer_fetch.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.ARM.shader_framebuffer_fetch import *
from OpenGL.raw.GLES2.ARM.shader_framebuffer_fetch import _EXTENSION_NAME
def glInitShaderFramebufferFetchARM():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/EXT/buffer_storage.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.buffer_storage import *
from OpenGL.raw.GLES2.EXT.buffer_storage import _EXTENSION_NAME
def glInitBufferStorageEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glBufferStorageEXT.data size not checked against size
glBufferStorageEXT=wrapper.wrapper(glBufferStorageEXT).setInputArraySize(
'data', None
)
### END AUTOGENERATED SECTION
```
#### File: GLES2/EXT/clip_control.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.clip_control import *
from OpenGL.raw.GLES2.EXT.clip_control import _EXTENSION_NAME
def glInitClipControlEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/EXT/draw_buffers_indexed.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.draw_buffers_indexed import *
from OpenGL.raw.GLES2.EXT.draw_buffers_indexed import _EXTENSION_NAME
def glInitDrawBuffersIndexedEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/EXT/multi_draw_indirect.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.multi_draw_indirect import *
from OpenGL.raw.GLES2.EXT.multi_draw_indirect import _EXTENSION_NAME
def glInitMultiDrawIndirectEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glMultiDrawArraysIndirectEXT.indirect size not checked against 'drawcount,stride'
glMultiDrawArraysIndirectEXT=wrapper.wrapper(glMultiDrawArraysIndirectEXT).setInputArraySize(
'indirect', None
)
# INPUT glMultiDrawElementsIndirectEXT.indirect size not checked against 'drawcount,stride'
glMultiDrawElementsIndirectEXT=wrapper.wrapper(glMultiDrawElementsIndirectEXT).setInputArraySize(
'indirect', None
)
### END AUTOGENERATED SECTION
```
#### File: GLES2/EXT/shader_pixel_local_storage.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.shader_pixel_local_storage import *
from OpenGL.raw.GLES2.EXT.shader_pixel_local_storage import _EXTENSION_NAME
def glInitShaderPixelLocalStorageEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/EXT/shader_texture_lod.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.shader_texture_lod import *
from OpenGL.raw.GLES2.EXT.shader_texture_lod import _EXTENSION_NAME
def glInitShaderTextureLodEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/EXT/sRGB_write_control.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.sRGB_write_control import *
from OpenGL.raw.GLES2.EXT.sRGB_write_control import _EXTENSION_NAME
def glInitSrgbWriteControlEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/EXT/texture_buffer.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.texture_buffer import *
from OpenGL.raw.GLES2.EXT.texture_buffer import _EXTENSION_NAME
def glInitTextureBufferEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/EXT/texture_compression_bptc.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.texture_compression_bptc import *
from OpenGL.raw.GLES2.EXT.texture_compression_bptc import _EXTENSION_NAME
def glInitTextureCompressionBptcEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/EXT/texture_mirror_clamp_to_edge.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.texture_mirror_clamp_to_edge import *
from OpenGL.raw.GLES2.EXT.texture_mirror_clamp_to_edge import _EXTENSION_NAME
def glInitTextureMirrorClampToEdgeEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/EXT/texture_sRGB_RG8.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.texture_sRGB_RG8 import *
from OpenGL.raw.GLES2.EXT.texture_sRGB_RG8 import _EXTENSION_NAME
def glInitTextureSrgbRg8EXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/IMG/program_binary.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.IMG.program_binary import *
from OpenGL.raw.GLES2.IMG.program_binary import _EXTENSION_NAME
def glInitProgramBinaryIMG():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/IMG/read_format.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.IMG.read_format import *
from OpenGL.raw.GLES2.IMG.read_format import _EXTENSION_NAME
def glInitReadFormatIMG():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/NV/conservative_raster.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.NV.conservative_raster import *
from OpenGL.raw.GLES2.NV.conservative_raster import _EXTENSION_NAME
def glInitConservativeRasterNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/NV/fill_rectangle.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.NV.fill_rectangle import *
from OpenGL.raw.GLES2.NV.fill_rectangle import _EXTENSION_NAME
def glInitFillRectangleNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/NV/fragment_shader_barycentric.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.NV.fragment_shader_barycentric import *
from OpenGL.raw.GLES2.NV.fragment_shader_barycentric import _EXTENSION_NAME
def glInitFragmentShaderBarycentricNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/NV/image_formats.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.NV.image_formats import *
from OpenGL.raw.GLES2.NV.image_formats import _EXTENSION_NAME
def glInitImageFormatsNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/NV/mesh_shader.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.NV.mesh_shader import *
from OpenGL.raw.GLES2.NV.mesh_shader import _EXTENSION_NAME
def glInitMeshShaderNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/NV/non_square_matrices.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.NV.non_square_matrices import *
from OpenGL.raw.GLES2.NV.non_square_matrices import _EXTENSION_NAME
def glInitNonSquareMatricesNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glUniformMatrix2x3fvNV.value size not checked against count*6
glUniformMatrix2x3fvNV=wrapper.wrapper(glUniformMatrix2x3fvNV).setInputArraySize(
'value', None
)
# INPUT glUniformMatrix3x2fvNV.value size not checked against count*6
glUniformMatrix3x2fvNV=wrapper.wrapper(glUniformMatrix3x2fvNV).setInputArraySize(
'value', None
)
# INPUT glUniformMatrix2x4fvNV.value size not checked against count*8
glUniformMatrix2x4fvNV=wrapper.wrapper(glUniformMatrix2x4fvNV).setInputArraySize(
'value', None
)
# INPUT glUniformMatrix4x2fvNV.value size not checked against count*8
glUniformMatrix4x2fvNV=wrapper.wrapper(glUniformMatrix4x2fvNV).setInputArraySize(
'value', None
)
# INPUT glUniformMatrix3x4fvNV.value size not checked against count*12
glUniformMatrix3x4fvNV=wrapper.wrapper(glUniformMatrix3x4fvNV).setInputArraySize(
'value', None
)
# INPUT glUniformMatrix4x3fvNV.value size not checked against count*12
glUniformMatrix4x3fvNV=wrapper.wrapper(glUniformMatrix4x3fvNV).setInputArraySize(
'value', None
)
### END AUTOGENERATED SECTION
```
#### File: GLES2/NV/sample_locations.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.NV.sample_locations import *
from OpenGL.raw.GLES2.NV.sample_locations import _EXTENSION_NAME
def glInitSampleLocationsNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/NVX/blend_equation_advanced_multi_draw_buffers.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.NVX.blend_equation_advanced_multi_draw_buffers import *
from OpenGL.raw.GLES2.NVX.blend_equation_advanced_multi_draw_buffers import _EXTENSION_NAME
def glInitBlendEquationAdvancedMultiDrawBuffersNVX():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/OES/fbo_render_mipmap.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.OES.fbo_render_mipmap import *
from OpenGL.raw.GLES2.OES.fbo_render_mipmap import _EXTENSION_NAME
def glInitFboRenderMipmapOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/OES/primitive_bounding_box.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.OES.primitive_bounding_box import *
from OpenGL.raw.GLES2.OES.primitive_bounding_box import _EXTENSION_NAME
def glInitPrimitiveBoundingBoxOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/OES/sample_variables.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.OES.sample_variables import *
from OpenGL.raw.GLES2.OES.sample_variables import _EXTENSION_NAME
def glInitSampleVariablesOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/OES/shader_multisample_interpolation.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.OES.shader_multisample_interpolation import *
from OpenGL.raw.GLES2.OES.shader_multisample_interpolation import _EXTENSION_NAME
def glInitShaderMultisampleInterpolationOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/OES/texture_float_linear.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.OES.texture_float_linear import *
from OpenGL.raw.GLES2.OES.texture_float_linear import _EXTENSION_NAME
def glInitTextureFloatLinearOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/OES/viewport_array.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.OES.viewport_array import *
from OpenGL.raw.GLES2.OES.viewport_array import _EXTENSION_NAME
def glInitViewportArrayOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glViewportArrayvOES.v size not checked against 'count'
glViewportArrayvOES=wrapper.wrapper(glViewportArrayvOES).setInputArraySize(
'v', None
)
glViewportIndexedfvOES=wrapper.wrapper(glViewportIndexedfvOES).setInputArraySize(
'v', 4
)
# INPUT glScissorArrayvOES.v size not checked against 'count'
glScissorArrayvOES=wrapper.wrapper(glScissorArrayvOES).setInputArraySize(
'v', None
)
glScissorIndexedvOES=wrapper.wrapper(glScissorIndexedvOES).setInputArraySize(
'v', 4
)
# INPUT glGetFloati_vOES.data size not checked against 'target'
glGetFloati_vOES=wrapper.wrapper(glGetFloati_vOES).setInputArraySize(
'data', None
)
### END AUTOGENERATED SECTION
```
#### File: GLES2/OVR/multiview_multisampled_render_to_texture.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.OVR.multiview_multisampled_render_to_texture import *
from OpenGL.raw.GLES2.OVR.multiview_multisampled_render_to_texture import _EXTENSION_NAME
def glInitMultiviewMultisampledRenderToTextureOVR():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/QCOM/alpha_test.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.QCOM.alpha_test import *
from OpenGL.raw.GLES2.QCOM.alpha_test import _EXTENSION_NAME
def glInitAlphaTestQCOM():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/EXT/cmyka.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.cmyka import *
from OpenGL.raw.GL.EXT.cmyka import _EXTENSION_NAME
def glInitCmykaEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
from OpenGL import images as _i
_i.COMPONENT_COUNTS[ GL_CMYK_EXT ] = 4
_i.COMPONENT_COUNTS[ GL_CMYKA_EXT ] = 5
```
#### File: GL/EXT/compiled_vertex_array.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.compiled_vertex_array import *
from OpenGL.raw.GL.EXT.compiled_vertex_array import _EXTENSION_NAME
def glInitCompiledVertexArrayEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/EXT/coordinate_frame.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.coordinate_frame import *
from OpenGL.raw.GL.EXT.coordinate_frame import _EXTENSION_NAME
def glInitCoordinateFrameEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glTangent3bvEXT=wrapper.wrapper(glTangent3bvEXT).setInputArraySize(
'v', 3
)
glTangent3dvEXT=wrapper.wrapper(glTangent3dvEXT).setInputArraySize(
'v', 3
)
glTangent3fvEXT=wrapper.wrapper(glTangent3fvEXT).setInputArraySize(
'v', 3
)
glTangent3ivEXT=wrapper.wrapper(glTangent3ivEXT).setInputArraySize(
'v', 3
)
glTangent3svEXT=wrapper.wrapper(glTangent3svEXT).setInputArraySize(
'v', 3
)
glBinormal3bvEXT=wrapper.wrapper(glBinormal3bvEXT).setInputArraySize(
'v', 3
)
glBinormal3dvEXT=wrapper.wrapper(glBinormal3dvEXT).setInputArraySize(
'v', 3
)
glBinormal3fvEXT=wrapper.wrapper(glBinormal3fvEXT).setInputArraySize(
'v', 3
)
glBinormal3ivEXT=wrapper.wrapper(glBinormal3ivEXT).setInputArraySize(
'v', 3
)
glBinormal3svEXT=wrapper.wrapper(glBinormal3svEXT).setInputArraySize(
'v', 3
)
# INPUT glTangentPointerEXT.pointer size not checked against 'type,stride'
glTangentPointerEXT=wrapper.wrapper(glTangentPointerEXT).setInputArraySize(
'pointer', None
)
# INPUT glBinormalPointerEXT.pointer size not checked against 'type,stride'
glBinormalPointerEXT=wrapper.wrapper(glBinormalPointerEXT).setInputArraySize(
'pointer', None
)
### END AUTOGENERATED SECTION
```
#### File: GL/EXT/cull_vertex.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.cull_vertex import *
from OpenGL.raw.GL.EXT.cull_vertex import _EXTENSION_NAME
def glInitCullVertexEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glCullParameterdvEXT=wrapper.wrapper(glCullParameterdvEXT).setOutput(
'params',size=(4,),orPassIn=True
)
glCullParameterfvEXT=wrapper.wrapper(glCullParameterfvEXT).setOutput(
'params',size=(4,),orPassIn=True
)
### END AUTOGENERATED SECTION
```
#### File: GL/EXT/framebuffer_multisample_blit_scaled.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.framebuffer_multisample_blit_scaled import *
from OpenGL.raw.GL.EXT.framebuffer_multisample_blit_scaled import _EXTENSION_NAME
def glInitFramebufferMultisampleBlitScaledEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/EXT/histogram.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.histogram import *
from OpenGL.raw.GL.EXT.histogram import _EXTENSION_NAME
def glInitHistogramEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# OUTPUT glGetHistogramEXT.values COMPSIZE(target, format, type)
glGetHistogramParameterfvEXT=wrapper.wrapper(glGetHistogramParameterfvEXT).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetHistogramParameterivEXT=wrapper.wrapper(glGetHistogramParameterivEXT).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
# OUTPUT glGetMinmaxEXT.values COMPSIZE(target, format, type)
glGetMinmaxParameterfvEXT=wrapper.wrapper(glGetMinmaxParameterfvEXT).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetMinmaxParameterivEXT=wrapper.wrapper(glGetMinmaxParameterivEXT).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
### END AUTOGENERATED SECTION
glGetHistogramParameterfvEXT = wrapper.wrapper(glGetHistogramParameterfvEXT).setOutput(
"params",(1,), orPassIn=True
)
glGetHistogramParameterivEXT = wrapper.wrapper(glGetHistogramParameterivEXT).setOutput(
"params",(1,), orPassIn=True
)
```
#### File: GL/EXT/index_func.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.index_func import *
from OpenGL.raw.GL.EXT.index_func import _EXTENSION_NAME
def glInitIndexFuncEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/EXT/light_texture.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.light_texture import *
from OpenGL.raw.GL.EXT.light_texture import _EXTENSION_NAME
def glInitLightTextureEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/EXT/multi_draw_arrays.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.multi_draw_arrays import *
from OpenGL.raw.GL.EXT.multi_draw_arrays import _EXTENSION_NAME
def glInitMultiDrawArraysEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glMultiDrawArraysEXT.count size not checked against 'primcount'
# INPUT glMultiDrawArraysEXT.first size not checked against 'primcount'
glMultiDrawArraysEXT=wrapper.wrapper(glMultiDrawArraysEXT).setInputArraySize(
'count', None
).setInputArraySize(
'first', None
)
# INPUT glMultiDrawElementsEXT.count size not checked against 'primcount'
# INPUT glMultiDrawElementsEXT.indices size not checked against 'primcount'
glMultiDrawElementsEXT=wrapper.wrapper(glMultiDrawElementsEXT).setInputArraySize(
'count', None
).setInputArraySize(
'indices', None
)
### END AUTOGENERATED SECTION
```
#### File: GL/EXT/packed_float.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.packed_float import *
from OpenGL.raw.GL.EXT.packed_float import _EXTENSION_NAME
def glInitPackedFloatEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/EXT/paletted_texture.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.paletted_texture import *
from OpenGL.raw.GL.EXT.paletted_texture import _EXTENSION_NAME
def glInitPalettedTextureEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glColorTableEXT.table size not checked against 'format,type,width'
glColorTableEXT=wrapper.wrapper(glColorTableEXT).setInputArraySize(
'table', None
)
# OUTPUT glGetColorTableEXT.data COMPSIZE(target, format, type)
glGetColorTableParameterivEXT=wrapper.wrapper(glGetColorTableParameterivEXT).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetColorTableParameterfvEXT=wrapper.wrapper(glGetColorTableParameterfvEXT).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
### END AUTOGENERATED SECTION
```
#### File: GL/EXT/polygon_offset.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.polygon_offset import *
from OpenGL.raw.GL.EXT.polygon_offset import _EXTENSION_NAME
def glInitPolygonOffsetEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/EXT/secondary_color.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.secondary_color import *
from OpenGL.raw.GL.EXT.secondary_color import _EXTENSION_NAME
def glInitSecondaryColorEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glSecondaryColor3bvEXT=wrapper.wrapper(glSecondaryColor3bvEXT).setInputArraySize(
'v', 3
)
glSecondaryColor3dvEXT=wrapper.wrapper(glSecondaryColor3dvEXT).setInputArraySize(
'v', 3
)
glSecondaryColor3fvEXT=wrapper.wrapper(glSecondaryColor3fvEXT).setInputArraySize(
'v', 3
)
glSecondaryColor3ivEXT=wrapper.wrapper(glSecondaryColor3ivEXT).setInputArraySize(
'v', 3
)
glSecondaryColor3svEXT=wrapper.wrapper(glSecondaryColor3svEXT).setInputArraySize(
'v', 3
)
glSecondaryColor3ubvEXT=wrapper.wrapper(glSecondaryColor3ubvEXT).setInputArraySize(
'v', 3
)
glSecondaryColor3uivEXT=wrapper.wrapper(glSecondaryColor3uivEXT).setInputArraySize(
'v', 3
)
glSecondaryColor3usvEXT=wrapper.wrapper(glSecondaryColor3usvEXT).setInputArraySize(
'v', 3
)
# INPUT glSecondaryColorPointerEXT.pointer size not checked against 'size,type,stride'
glSecondaryColorPointerEXT=wrapper.wrapper(glSecondaryColorPointerEXT).setInputArraySize(
'pointer', None
)
### END AUTOGENERATED SECTION
```
#### File: GL/EXT/shader_framebuffer_fetch.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.shader_framebuffer_fetch import *
from OpenGL.raw.GL.EXT.shader_framebuffer_fetch import _EXTENSION_NAME
def glInitShaderFramebufferFetchEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/EXT/shared_texture_palette.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.shared_texture_palette import *
from OpenGL.raw.GL.EXT.shared_texture_palette import _EXTENSION_NAME
def glInitSharedTexturePaletteEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/EXT/stencil_wrap.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.stencil_wrap import *
from OpenGL.raw.GL.EXT.stencil_wrap import _EXTENSION_NAME
def glInitStencilWrapEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/EXT/texture_compression_latc.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.texture_compression_latc import *
from OpenGL.raw.GL.EXT.texture_compression_latc import _EXTENSION_NAME
def glInitTextureCompressionLatcEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/EXT/texture_env_combine.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.texture_env_combine import *
from OpenGL.raw.GL.EXT.texture_env_combine import _EXTENSION_NAME
def glInitTextureEnvCombineEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/EXT/texture_env_dot3.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.texture_env_dot3 import *
from OpenGL.raw.GL.EXT.texture_env_dot3 import _EXTENSION_NAME
def glInitTextureEnvDot3EXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/EXT/texture_integer.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.texture_integer import *
from OpenGL.raw.GL.EXT.texture_integer import _EXTENSION_NAME
def glInitTextureIntegerEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glTexParameterIivEXT.params size not checked against 'pname'
glTexParameterIivEXT=wrapper.wrapper(glTexParameterIivEXT).setInputArraySize(
'params', None
)
# INPUT glTexParameterIuivEXT.params size not checked against 'pname'
glTexParameterIuivEXT=wrapper.wrapper(glTexParameterIuivEXT).setInputArraySize(
'params', None
)
glGetTexParameterIivEXT=wrapper.wrapper(glGetTexParameterIivEXT).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetTexParameterIuivEXT=wrapper.wrapper(glGetTexParameterIuivEXT).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
### END AUTOGENERATED SECTION
```
#### File: GL/EXT/texture_snorm.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.texture_snorm import *
from OpenGL.raw.GL.EXT.texture_snorm import _EXTENSION_NAME
def glInitTextureSnormEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/EXT/vertex_shader.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.vertex_shader import *
from OpenGL.raw.GL.EXT.vertex_shader import _EXTENSION_NAME
def glInitVertexShaderEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glSetInvariantEXT.addr size not checked against 'id,type'
glSetInvariantEXT=wrapper.wrapper(glSetInvariantEXT).setInputArraySize(
'addr', None
)
# INPUT glSetLocalConstantEXT.addr size not checked against 'id,type'
glSetLocalConstantEXT=wrapper.wrapper(glSetLocalConstantEXT).setInputArraySize(
'addr', None
)
# INPUT glVariantbvEXT.addr size not checked against 'id'
glVariantbvEXT=wrapper.wrapper(glVariantbvEXT).setInputArraySize(
'addr', None
)
# INPUT glVariantsvEXT.addr size not checked against 'id'
glVariantsvEXT=wrapper.wrapper(glVariantsvEXT).setInputArraySize(
'addr', None
)
# INPUT glVariantivEXT.addr size not checked against 'id'
glVariantivEXT=wrapper.wrapper(glVariantivEXT).setInputArraySize(
'addr', None
)
# INPUT glVariantfvEXT.addr size not checked against 'id'
glVariantfvEXT=wrapper.wrapper(glVariantfvEXT).setInputArraySize(
'addr', None
)
# INPUT glVariantdvEXT.addr size not checked against 'id'
glVariantdvEXT=wrapper.wrapper(glVariantdvEXT).setInputArraySize(
'addr', None
)
# INPUT glVariantubvEXT.addr size not checked against 'id'
glVariantubvEXT=wrapper.wrapper(glVariantubvEXT).setInputArraySize(
'addr', None
)
# INPUT glVariantusvEXT.addr size not checked against 'id'
glVariantusvEXT=wrapper.wrapper(glVariantusvEXT).setInputArraySize(
'addr', None
)
# INPUT glVariantuivEXT.addr size not checked against 'id'
glVariantuivEXT=wrapper.wrapper(glVariantuivEXT).setInputArraySize(
'addr', None
)
# INPUT glVariantPointerEXT.addr size not checked against 'id,type,stride'
glVariantPointerEXT=wrapper.wrapper(glVariantPointerEXT).setInputArraySize(
'addr', None
)
glGetVariantBooleanvEXT=wrapper.wrapper(glGetVariantBooleanvEXT).setOutput(
'data',size=_glgets._glget_size_mapping,pnameArg='id',orPassIn=True
)
glGetVariantIntegervEXT=wrapper.wrapper(glGetVariantIntegervEXT).setOutput(
'data',size=_glgets._glget_size_mapping,pnameArg='id',orPassIn=True
)
glGetVariantFloatvEXT=wrapper.wrapper(glGetVariantFloatvEXT).setOutput(
'data',size=_glgets._glget_size_mapping,pnameArg='id',orPassIn=True
)
glGetVariantPointervEXT=wrapper.wrapper(glGetVariantPointervEXT).setOutput(
'data',size=_glgets._glget_size_mapping,pnameArg='id',orPassIn=True
)
glGetInvariantBooleanvEXT=wrapper.wrapper(glGetInvariantBooleanvEXT).setOutput(
'data',size=_glgets._glget_size_mapping,pnameArg='id',orPassIn=True
)
glGetInvariantIntegervEXT=wrapper.wrapper(glGetInvariantIntegervEXT).setOutput(
'data',size=_glgets._glget_size_mapping,pnameArg='id',orPassIn=True
)
glGetInvariantFloatvEXT=wrapper.wrapper(glGetInvariantFloatvEXT).setOutput(
'data',size=_glgets._glget_size_mapping,pnameArg='id',orPassIn=True
)
glGetLocalConstantBooleanvEXT=wrapper.wrapper(glGetLocalConstantBooleanvEXT).setOutput(
'data',size=_glgets._glget_size_mapping,pnameArg='id',orPassIn=True
)
glGetLocalConstantIntegervEXT=wrapper.wrapper(glGetLocalConstantIntegervEXT).setOutput(
'data',size=_glgets._glget_size_mapping,pnameArg='id',orPassIn=True
)
glGetLocalConstantFloatvEXT=wrapper.wrapper(glGetLocalConstantFloatvEXT).setOutput(
'data',size=_glgets._glget_size_mapping,pnameArg='id',orPassIn=True
)
### END AUTOGENERATED SECTION
```
#### File: OpenGL/GL/framebufferobjects.py
```python
from OpenGL.extensions import alternate
from OpenGL.GL.ARB.framebuffer_object import *
from OpenGL.GL.EXT.framebuffer_object import *
from OpenGL.GL.EXT.framebuffer_multisample import *
from OpenGL.GL.EXT.framebuffer_blit import *
glBindFramebuffer = alternate(glBindFramebuffer,glBindFramebufferEXT)
glBindRenderbuffer = alternate( glBindRenderbuffer, glBindRenderbufferEXT )
glCheckFramebufferStatus = alternate( glCheckFramebufferStatus, glCheckFramebufferStatusEXT )
glDeleteFramebuffers = alternate( glDeleteFramebuffers, glDeleteFramebuffersEXT )
glDeleteRenderbuffers = alternate( glDeleteRenderbuffers, glDeleteRenderbuffersEXT )
glFramebufferRenderbuffer = alternate( glFramebufferRenderbuffer, glFramebufferRenderbufferEXT )
glFramebufferTexture1D = alternate( glFramebufferTexture1D, glFramebufferTexture1DEXT )
glFramebufferTexture2D = alternate( glFramebufferTexture2D, glFramebufferTexture2DEXT )
glFramebufferTexture3D = alternate( glFramebufferTexture3D, glFramebufferTexture3DEXT )
glGenFramebuffers = alternate( glGenFramebuffers, glGenFramebuffersEXT )
glGenRenderbuffers = alternate( glGenRenderbuffers, glGenRenderbuffersEXT )
glGenerateMipmap = alternate( glGenerateMipmap, glGenerateMipmapEXT )
glGetFramebufferAttachmentParameteriv = alternate( glGetFramebufferAttachmentParameteriv, glGetFramebufferAttachmentParameterivEXT )
glGetRenderbufferParameteriv = alternate( glGetRenderbufferParameteriv, glGetRenderbufferParameterivEXT )
glIsFramebuffer = alternate( glIsFramebuffer, glIsFramebufferEXT )
glIsRenderbuffer = alternate( glIsRenderbuffer, glIsRenderbufferEXT )
glRenderbufferStorage = alternate( glRenderbufferStorage, glRenderbufferStorageEXT )
glBlitFramebuffer = alternate( glBlitFramebuffer, glBlitFramebufferEXT )
glRenderbufferStorageMultisample = alternate( glRenderbufferStorageMultisample, glRenderbufferStorageMultisampleEXT )
# this entry point is new to the ARB version of the extensions
#glFramebufferTextureLayer = alternate( glFramebufferTextureLayer, glFramebufferTextureLayerEXT )
def checkFramebufferStatus():
"""Utility method to check status and raise errors"""
status = glCheckFramebufferStatus( GL_FRAMEBUFFER )
if status == GL_FRAMEBUFFER_COMPLETE:
return True
from OpenGL.error import GLError
description = None
for error_constant in [
GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT,
GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT,
GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS,
GL_FRAMEBUFFER_INCOMPLETE_FORMATS,
GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER,
GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER,
GL_FRAMEBUFFER_UNSUPPORTED,
]:
if status == error_constant:
status = error_constant
description = str(status)
raise GLError(
err=status,
result=status,
baseOperation=glCheckFramebufferStatus,
description=description,
)
```
#### File: GL/HP/texture_lighting.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.HP.texture_lighting import *
from OpenGL.raw.GL.HP.texture_lighting import _EXTENSION_NAME
def glInitTextureLightingHP():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/IBM/multimode_draw_arrays.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.IBM.multimode_draw_arrays import *
from OpenGL.raw.GL.IBM.multimode_draw_arrays import _EXTENSION_NAME
def glInitMultimodeDrawArraysIBM():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glMultiModeDrawArraysIBM.count size not checked against 'primcount'
# INPUT glMultiModeDrawArraysIBM.first size not checked against 'primcount'
# INPUT glMultiModeDrawArraysIBM.mode size not checked against 'primcount'
glMultiModeDrawArraysIBM=wrapper.wrapper(glMultiModeDrawArraysIBM).setInputArraySize(
'count', None
).setInputArraySize(
'first', None
).setInputArraySize(
'mode', None
)
# INPUT glMultiModeDrawElementsIBM.count size not checked against 'primcount'
# INPUT glMultiModeDrawElementsIBM.indices size not checked against 'primcount'
# INPUT glMultiModeDrawElementsIBM.mode size not checked against 'primcount'
glMultiModeDrawElementsIBM=wrapper.wrapper(glMultiModeDrawElementsIBM).setInputArraySize(
'count', None
).setInputArraySize(
'indices', None
).setInputArraySize(
'mode', None
)
### END AUTOGENERATED SECTION
```
#### File: GL/IBM/vertex_array_lists.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.IBM.vertex_array_lists import *
from OpenGL.raw.GL.IBM.vertex_array_lists import _EXTENSION_NAME
def glInitVertexArrayListsIBM():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glColorPointerListIBM.pointer size not checked against 'size,type,stride'
glColorPointerListIBM=wrapper.wrapper(glColorPointerListIBM).setInputArraySize(
'pointer', None
)
# INPUT glSecondaryColorPointerListIBM.pointer size not checked against 'size,type,stride'
glSecondaryColorPointerListIBM=wrapper.wrapper(glSecondaryColorPointerListIBM).setInputArraySize(
'pointer', None
)
# INPUT glEdgeFlagPointerListIBM.pointer size not checked against 'stride'
glEdgeFlagPointerListIBM=wrapper.wrapper(glEdgeFlagPointerListIBM).setInputArraySize(
'pointer', None
)
# INPUT glFogCoordPointerListIBM.pointer size not checked against 'type,stride'
glFogCoordPointerListIBM=wrapper.wrapper(glFogCoordPointerListIBM).setInputArraySize(
'pointer', None
)
# INPUT glIndexPointerListIBM.pointer size not checked against 'type,stride'
glIndexPointerListIBM=wrapper.wrapper(glIndexPointerListIBM).setInputArraySize(
'pointer', None
)
# INPUT glNormalPointerListIBM.pointer size not checked against 'type,stride'
glNormalPointerListIBM=wrapper.wrapper(glNormalPointerListIBM).setInputArraySize(
'pointer', None
)
# INPUT glTexCoordPointerListIBM.pointer size not checked against 'size,type,stride'
glTexCoordPointerListIBM=wrapper.wrapper(glTexCoordPointerListIBM).setInputArraySize(
'pointer', None
)
# INPUT glVertexPointerListIBM.pointer size not checked against 'size,type,stride'
glVertexPointerListIBM=wrapper.wrapper(glVertexPointerListIBM).setInputArraySize(
'pointer', None
)
### END AUTOGENERATED SECTION
```
#### File: GL/INGR/color_clamp.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.INGR.color_clamp import *
from OpenGL.raw.GL.INGR.color_clamp import _EXTENSION_NAME
def glInitColorClampINGR():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/INTEL/fragment_shader_ordering.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.INTEL.fragment_shader_ordering import *
from OpenGL.raw.GL.INTEL.fragment_shader_ordering import _EXTENSION_NAME
def glInitFragmentShaderOrderingINTEL():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/KHR/parallel_shader_compile.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.KHR.parallel_shader_compile import *
from OpenGL.raw.GL.KHR.parallel_shader_compile import _EXTENSION_NAME
def glInitParallelShaderCompileKHR():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/KHR/robust_buffer_access_behavior.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.KHR.robust_buffer_access_behavior import *
from OpenGL.raw.GL.KHR.robust_buffer_access_behavior import _EXTENSION_NAME
def glInitRobustBufferAccessBehaviorKHR():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/MESA/tile_raster_order.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.MESA.tile_raster_order import *
from OpenGL.raw.GL.MESA.tile_raster_order import _EXTENSION_NAME
def glInitTileRasterOrderMESA():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/MESA/ycbcr_texture.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.MESA.ycbcr_texture import *
from OpenGL.raw.GL.MESA.ycbcr_texture import _EXTENSION_NAME
def glInitYcbcrTextureMESA():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/NV/blend_square.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.NV.blend_square import *
from OpenGL.raw.GL.NV.blend_square import _EXTENSION_NAME
def glInitBlendSquareNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/NV/conditional_render.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.NV.conditional_render import *
from OpenGL.raw.GL.NV.conditional_render import _EXTENSION_NAME
def glInitConditionalRenderNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/NV/copy_depth_to_color.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.NV.copy_depth_to_color import *
from OpenGL.raw.GL.NV.copy_depth_to_color import _EXTENSION_NAME
def glInitCopyDepthToColorNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/NV/draw_vulkan_image.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.NV.draw_vulkan_image import *
from OpenGL.raw.GL.NV.draw_vulkan_image import _EXTENSION_NAME
def glInitDrawVulkanImageNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glGetVkProcAddrNV.name size not checked against 'name'
glGetVkProcAddrNV=wrapper.wrapper(glGetVkProcAddrNV).setInputArraySize(
'name', None
)
### END AUTOGENERATED SECTION
```
#### File: GL/NV/fog_distance.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.NV.fog_distance import *
from OpenGL.raw.GL.NV.fog_distance import _EXTENSION_NAME
def glInitFogDistanceNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/NV/geometry_program4.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.NV.geometry_program4 import *
from OpenGL.raw.GL.NV.geometry_program4 import _EXTENSION_NAME
def glInitGeometryProgram4NV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/NV/pixel_data_range.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.NV.pixel_data_range import *
from OpenGL.raw.GL.NV.pixel_data_range import _EXTENSION_NAME
def glInitPixelDataRangeNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glPixelDataRangeNV.pointer size not checked against length
glPixelDataRangeNV=wrapper.wrapper(glPixelDataRangeNV).setInputArraySize(
'pointer', None
)
### END AUTOGENERATED SECTION
```
#### File: GL/NV/present_video.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.NV.present_video import *
from OpenGL.raw.GL.NV.present_video import _EXTENSION_NAME
def glInitPresentVideoNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glGetVideoivNV=wrapper.wrapper(glGetVideoivNV).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetVideouivNV=wrapper.wrapper(glGetVideouivNV).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetVideoi64vNV=wrapper.wrapper(glGetVideoi64vNV).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetVideoui64vNV=wrapper.wrapper(glGetVideoui64vNV).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
### END AUTOGENERATED SECTION
```
#### File: GL/NV/query_resource.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.NV.query_resource import *
from OpenGL.raw.GL.NV.query_resource import _EXTENSION_NAME
def glInitQueryResourceNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/NV/shader_atomic_counters.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.NV.shader_atomic_counters import *
from OpenGL.raw.GL.NV.shader_atomic_counters import _EXTENSION_NAME
def glInitShaderAtomicCountersNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/NV/shader_storage_buffer_object.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.NV.shader_storage_buffer_object import *
from OpenGL.raw.GL.NV.shader_storage_buffer_object import _EXTENSION_NAME
def glInitShaderStorageBufferObjectNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/NV/texture_barrier.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.NV.texture_barrier import *
from OpenGL.raw.GL.NV.texture_barrier import _EXTENSION_NAME
def glInitTextureBarrierNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/NV/texture_multisample.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.NV.texture_multisample import *
from OpenGL.raw.GL.NV.texture_multisample import _EXTENSION_NAME
def glInitTextureMultisampleNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/NV/vdpau_interop.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.NV.vdpau_interop import *
from OpenGL.raw.GL.NV.vdpau_interop import _EXTENSION_NAME
def glInitVdpauInteropNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glVDPAURegisterVideoSurfaceNV.textureNames size not checked against numTextureNames
glVDPAURegisterVideoSurfaceNV=wrapper.wrapper(glVDPAURegisterVideoSurfaceNV).setInputArraySize(
'textureNames', None
)
# INPUT glVDPAURegisterOutputSurfaceNV.textureNames size not checked against numTextureNames
# glVDPAURegisterOutputSurfaceNV.vdpSurface is OUTPUT without known output size
glVDPAURegisterOutputSurfaceNV=wrapper.wrapper(glVDPAURegisterOutputSurfaceNV).setInputArraySize(
'textureNames', None
)
# glVDPAUGetSurfaceivNV.length is OUTPUT without known output size
glVDPAUGetSurfaceivNV=wrapper.wrapper(glVDPAUGetSurfaceivNV).setOutput(
'values',size=lambda x:(x,),pnameArg='bufSize',orPassIn=True
)
# INPUT glVDPAUMapSurfacesNV.surfaces size not checked against numSurfaces
glVDPAUMapSurfacesNV=wrapper.wrapper(glVDPAUMapSurfacesNV).setInputArraySize(
'surfaces', None
)
# INPUT glVDPAUUnmapSurfacesNV.surfaces size not checked against numSurface
glVDPAUUnmapSurfacesNV=wrapper.wrapper(glVDPAUUnmapSurfacesNV).setInputArraySize(
'surfaces', None
)
### END AUTOGENERATED SECTION
```
#### File: GL/NV/vertex_program2.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.NV.vertex_program2 import *
from OpenGL.raw.GL.NV.vertex_program2 import _EXTENSION_NAME
def glInitVertexProgram2NV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/NV/vertex_program.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.NV.vertex_program import *
from OpenGL.raw.GL.NV.vertex_program import _EXTENSION_NAME
def glInitVertexProgramNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glAreProgramsResidentNV.programs size not checked against n
glAreProgramsResidentNV=wrapper.wrapper(glAreProgramsResidentNV).setInputArraySize(
'programs', None
).setOutput(
'residences',size=lambda x:(x,),pnameArg='n',orPassIn=True
)
# INPUT glDeleteProgramsNV.programs size not checked against n
glDeleteProgramsNV=wrapper.wrapper(glDeleteProgramsNV).setInputArraySize(
'programs', None
)
glExecuteProgramNV=wrapper.wrapper(glExecuteProgramNV).setInputArraySize(
'params', 4
)
glGenProgramsNV=wrapper.wrapper(glGenProgramsNV).setOutput(
'programs',size=lambda x:(x,),pnameArg='n',orPassIn=True
)
glGetProgramParameterdvNV=wrapper.wrapper(glGetProgramParameterdvNV).setOutput(
'params',size=(4,),orPassIn=True
)
glGetProgramParameterfvNV=wrapper.wrapper(glGetProgramParameterfvNV).setOutput(
'params',size=(4,),orPassIn=True
)
glGetProgramivNV=wrapper.wrapper(glGetProgramivNV).setOutput(
'params',size=(4,),orPassIn=True
)
# OUTPUT glGetProgramStringNV.program COMPSIZE(id, pname)
glGetTrackMatrixivNV=wrapper.wrapper(glGetTrackMatrixivNV).setOutput(
'params',size=(1,),orPassIn=True
)
glGetVertexAttribdvNV=wrapper.wrapper(glGetVertexAttribdvNV).setOutput(
'params',size=(1,),orPassIn=True
)
glGetVertexAttribfvNV=wrapper.wrapper(glGetVertexAttribfvNV).setOutput(
'params',size=(1,),orPassIn=True
)
glGetVertexAttribivNV=wrapper.wrapper(glGetVertexAttribivNV).setOutput(
'params',size=(1,),orPassIn=True
)
glGetVertexAttribPointervNV=wrapper.wrapper(glGetVertexAttribPointervNV).setOutput(
'pointer',size=(1,),orPassIn=True
)
# INPUT glLoadProgramNV.program size not checked against len
glLoadProgramNV=wrapper.wrapper(glLoadProgramNV).setInputArraySize(
'program', None
)
glProgramParameter4dvNV=wrapper.wrapper(glProgramParameter4dvNV).setInputArraySize(
'v', 4
)
glProgramParameter4fvNV=wrapper.wrapper(glProgramParameter4fvNV).setInputArraySize(
'v', 4
)
# INPUT glProgramParameters4dvNV.v size not checked against count*4
glProgramParameters4dvNV=wrapper.wrapper(glProgramParameters4dvNV).setInputArraySize(
'v', None
)
# INPUT glProgramParameters4fvNV.v size not checked against count*4
glProgramParameters4fvNV=wrapper.wrapper(glProgramParameters4fvNV).setInputArraySize(
'v', None
)
# INPUT glRequestResidentProgramsNV.programs size not checked against n
glRequestResidentProgramsNV=wrapper.wrapper(glRequestResidentProgramsNV).setInputArraySize(
'programs', None
)
# INPUT glVertexAttribPointerNV.pointer size not checked against 'fsize,type,stride'
glVertexAttribPointerNV=wrapper.wrapper(glVertexAttribPointerNV).setInputArraySize(
'pointer', None
)
glVertexAttrib1dvNV=wrapper.wrapper(glVertexAttrib1dvNV).setInputArraySize(
'v', 1
)
glVertexAttrib1fvNV=wrapper.wrapper(glVertexAttrib1fvNV).setInputArraySize(
'v', 1
)
glVertexAttrib1svNV=wrapper.wrapper(glVertexAttrib1svNV).setInputArraySize(
'v', 1
)
glVertexAttrib2dvNV=wrapper.wrapper(glVertexAttrib2dvNV).setInputArraySize(
'v', 2
)
glVertexAttrib2fvNV=wrapper.wrapper(glVertexAttrib2fvNV).setInputArraySize(
'v', 2
)
glVertexAttrib2svNV=wrapper.wrapper(glVertexAttrib2svNV).setInputArraySize(
'v', 2
)
glVertexAttrib3dvNV=wrapper.wrapper(glVertexAttrib3dvNV).setInputArraySize(
'v', 3
)
glVertexAttrib3fvNV=wrapper.wrapper(glVertexAttrib3fvNV).setInputArraySize(
'v', 3
)
glVertexAttrib3svNV=wrapper.wrapper(glVertexAttrib3svNV).setInputArraySize(
'v', 3
)
glVertexAttrib4dvNV=wrapper.wrapper(glVertexAttrib4dvNV).setInputArraySize(
'v', 4
)
glVertexAttrib4fvNV=wrapper.wrapper(glVertexAttrib4fvNV).setInputArraySize(
'v', 4
)
glVertexAttrib4svNV=wrapper.wrapper(glVertexAttrib4svNV).setInputArraySize(
'v', 4
)
glVertexAttrib4ubvNV=wrapper.wrapper(glVertexAttrib4ubvNV).setInputArraySize(
'v', 4
)
# INPUT glVertexAttribs1dvNV.v size not checked against count
glVertexAttribs1dvNV=wrapper.wrapper(glVertexAttribs1dvNV).setInputArraySize(
'v', None
)
# INPUT glVertexAttribs1fvNV.v size not checked against count
glVertexAttribs1fvNV=wrapper.wrapper(glVertexAttribs1fvNV).setInputArraySize(
'v', None
)
# INPUT glVertexAttribs1svNV.v size not checked against count
glVertexAttribs1svNV=wrapper.wrapper(glVertexAttribs1svNV).setInputArraySize(
'v', None
)
# INPUT glVertexAttribs2dvNV.v size not checked against count*2
glVertexAttribs2dvNV=wrapper.wrapper(glVertexAttribs2dvNV).setInputArraySize(
'v', None
)
# INPUT glVertexAttribs2fvNV.v size not checked against count*2
glVertexAttribs2fvNV=wrapper.wrapper(glVertexAttribs2fvNV).setInputArraySize(
'v', None
)
# INPUT glVertexAttribs2svNV.v size not checked against count*2
glVertexAttribs2svNV=wrapper.wrapper(glVertexAttribs2svNV).setInputArraySize(
'v', None
)
# INPUT glVertexAttribs3dvNV.v size not checked against count*3
glVertexAttribs3dvNV=wrapper.wrapper(glVertexAttribs3dvNV).setInputArraySize(
'v', None
)
# INPUT glVertexAttribs3fvNV.v size not checked against count*3
glVertexAttribs3fvNV=wrapper.wrapper(glVertexAttribs3fvNV).setInputArraySize(
'v', None
)
# INPUT glVertexAttribs3svNV.v size not checked against count*3
glVertexAttribs3svNV=wrapper.wrapper(glVertexAttribs3svNV).setInputArraySize(
'v', None
)
# INPUT glVertexAttribs4dvNV.v size not checked against count*4
glVertexAttribs4dvNV=wrapper.wrapper(glVertexAttribs4dvNV).setInputArraySize(
'v', None
)
# INPUT glVertexAttribs4fvNV.v size not checked against count*4
glVertexAttribs4fvNV=wrapper.wrapper(glVertexAttribs4fvNV).setInputArraySize(
'v', None
)
# INPUT glVertexAttribs4svNV.v size not checked against count*4
glVertexAttribs4svNV=wrapper.wrapper(glVertexAttribs4svNV).setInputArraySize(
'v', None
)
# INPUT glVertexAttribs4ubvNV.v size not checked against count*4
glVertexAttribs4ubvNV=wrapper.wrapper(glVertexAttribs4ubvNV).setInputArraySize(
'v', None
)
### END AUTOGENERATED SECTION
```
#### File: GL/NVX/gpu_memory_info.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.NVX.gpu_memory_info import *
from OpenGL.raw.GL.NVX.gpu_memory_info import _EXTENSION_NAME
def glInitGpuMemoryInfoNVX():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/NVX/progress_fence.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.NVX.progress_fence import *
from OpenGL.raw.GL.NVX.progress_fence import _EXTENSION_NAME
def glInitProgressFenceNVX():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glSignalSemaphoreui64NVX.fenceValueArray size not checked against fenceObjectCount
# INPUT glSignalSemaphoreui64NVX.semaphoreArray size not checked against fenceObjectCount
glSignalSemaphoreui64NVX=wrapper.wrapper(glSignalSemaphoreui64NVX).setInputArraySize(
'fenceValueArray', None
).setInputArraySize(
'semaphoreArray', None
)
# INPUT glWaitSemaphoreui64NVX.fenceValueArray size not checked against fenceObjectCount
# INPUT glWaitSemaphoreui64NVX.semaphoreArray size not checked against fenceObjectCount
glWaitSemaphoreui64NVX=wrapper.wrapper(glWaitSemaphoreui64NVX).setInputArraySize(
'fenceValueArray', None
).setInputArraySize(
'semaphoreArray', None
)
# INPUT glClientWaitSemaphoreui64NVX.fenceValueArray size not checked against fenceObjectCount
# INPUT glClientWaitSemaphoreui64NVX.semaphoreArray size not checked against fenceObjectCount
glClientWaitSemaphoreui64NVX=wrapper.wrapper(glClientWaitSemaphoreui64NVX).setInputArraySize(
'fenceValueArray', None
).setInputArraySize(
'semaphoreArray', None
)
### END AUTOGENERATED SECTION
```
#### File: GL/OES/byte_coordinates.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.OES.byte_coordinates import *
from OpenGL.raw.GL.OES.byte_coordinates import _EXTENSION_NAME
def glInitByteCoordinatesOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glMultiTexCoord1bvOES=wrapper.wrapper(glMultiTexCoord1bvOES).setInputArraySize(
'coords', 1
)
glMultiTexCoord2bvOES=wrapper.wrapper(glMultiTexCoord2bvOES).setInputArraySize(
'coords', 2
)
glMultiTexCoord3bvOES=wrapper.wrapper(glMultiTexCoord3bvOES).setInputArraySize(
'coords', 3
)
glMultiTexCoord4bvOES=wrapper.wrapper(glMultiTexCoord4bvOES).setInputArraySize(
'coords', 4
)
glTexCoord1bvOES=wrapper.wrapper(glTexCoord1bvOES).setInputArraySize(
'coords', 1
)
glTexCoord2bvOES=wrapper.wrapper(glTexCoord2bvOES).setInputArraySize(
'coords', 2
)
glTexCoord3bvOES=wrapper.wrapper(glTexCoord3bvOES).setInputArraySize(
'coords', 3
)
glTexCoord4bvOES=wrapper.wrapper(glTexCoord4bvOES).setInputArraySize(
'coords', 4
)
glVertex2bvOES=wrapper.wrapper(glVertex2bvOES).setInputArraySize(
'coords', 2
)
glVertex3bvOES=wrapper.wrapper(glVertex3bvOES).setInputArraySize(
'coords', 3
)
glVertex4bvOES=wrapper.wrapper(glVertex4bvOES).setInputArraySize(
'coords', 4
)
### END AUTOGENERATED SECTION
```
#### File: GL/OES/read_format.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.OES.read_format import *
from OpenGL.raw.GL.OES.read_format import _EXTENSION_NAME
def glInitReadFormatOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/SGIS/detail_texture.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.SGIS.detail_texture import *
from OpenGL.raw.GL.SGIS.detail_texture import _EXTENSION_NAME
def glInitDetailTextureSGIS():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glDetailTexFuncSGIS.points size not checked against n*2
glDetailTexFuncSGIS=wrapper.wrapper(glDetailTexFuncSGIS).setInputArraySize(
'points', None
)
glGetDetailTexFuncSGIS=wrapper.wrapper(glGetDetailTexFuncSGIS).setOutput(
'points',size=_glgets._glget_size_mapping,pnameArg='target',orPassIn=True
)
### END AUTOGENERATED SECTION
```
#### File: GL/SGIS/point_parameters.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.SGIS.point_parameters import *
from OpenGL.raw.GL.SGIS.point_parameters import _EXTENSION_NAME
def glInitPointParametersSGIS():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glPointParameterfvSGIS.params size not checked against 'pname'
glPointParameterfvSGIS=wrapper.wrapper(glPointParameterfvSGIS).setInputArraySize(
'params', None
)
### END AUTOGENERATED SECTION
```
#### File: GL/SGIS/texture_color_mask.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.SGIS.texture_color_mask import *
from OpenGL.raw.GL.SGIS.texture_color_mask import _EXTENSION_NAME
def glInitTextureColorMaskSGIS():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/SGIS/texture_select.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.SGIS.texture_select import *
from OpenGL.raw.GL.SGIS.texture_select import _EXTENSION_NAME
def glInitTextureSelectSGIS():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/SGIX/pixel_texture.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.SGIX.pixel_texture import *
from OpenGL.raw.GL.SGIX.pixel_texture import _EXTENSION_NAME
def glInitPixelTextureSGIX():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/SGIX/shadow.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.SGIX.shadow import *
from OpenGL.raw.GL.SGIX.shadow import _EXTENSION_NAME
def glInitShadowSGIX():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/SGIX/ycrcba.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.SGIX.ycrcba import *
from OpenGL.raw.GL.SGIX.ycrcba import _EXTENSION_NAME
def glInitYcrcbaSGIX():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/SUN/mesh_array.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.SUN.mesh_array import *
from OpenGL.raw.GL.SUN.mesh_array import _EXTENSION_NAME
def glInitMeshArraySUN():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/SUNX/constant_data.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.SUNX.constant_data import *
from OpenGL.raw.GL.SUNX.constant_data import _EXTENSION_NAME
def glInitConstantDataSUNX():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLU/EXT/nurbs_tessellator.py
```python
from OpenGL import extensions
from OpenGL.raw.GLU import constants
GLU_NURBS_BEGIN_EXT = _types.GLU_NURBS_BEGIN_EXT
GLU_NURBS_VERTEX_EXT = _types.GLU_NURBS_VERTEX_EXT
GLU_NURBS_COLOR_EXT = _types.GLU_NURBS_COLOR_EXT
GLU_NURBS_TEX_COORD_EXT = _types.GLU_NURBS_TEX_COORD_EXT
GLU_NURBS_END_EXT = _types.GLU_NURBS_END_EXT
GLU_NURBS_BEGIN_DATA_EXT = _types.GLU_NURBS_BEGIN_DATA_EXT
GLU_NURBS_VERTEX_DATA_EXT = _types.GLU_NURBS_VERTEX_DATA_EXT
GLU_NURBS_NORMAL_DATA_EXT = _types.GLU_NURBS_NORMAL_DATA_EXT
GLU_NURBS_COLOR_DATA_EXT = _types.GLU_NURBS_COLOR_DATA_EXT
GLU_NURBS_TEX_COORD_DATA_EXT = _types.GLU_NURBS_TEX_COORD_DATA_EXT
GLU_NURBS_END_DATA_EXT = _types.GLU_NURBS_END_DATA_EXT
GLU_NURBS_MODE_EXT = _types.GLU_NURBS_MODE_EXT
GLU_NURBS_TESSELLATOR_EXT = _types.GLU_NURBS_TESSELLATOR_EXT
GLU_NURBS_RENDERER_EXT = _types.GLU_NURBS_RENDERER_EXT
def gluInitNurbsTessellatorEXT():
'''Return boolean indicating whether this module is available'''
return extensions.hasGLUExtension( 'GLU_EXT_nurbs_tessellator' )
```
#### File: OpenGL/GLU/quadrics.py
```python
from OpenGL.raw import GLU as _simple
from OpenGL.platform import createBaseFunction, PLATFORM
import ctypes
class GLUQuadric( _simple.GLUquadric ):
"""Implementation class for GLUQuadric classes in PyOpenGL"""
FUNCTION_TYPE = PLATFORM.functionTypeFor(PLATFORM.GLU)
CALLBACK_TYPES = {
# mapping from "which" GLU enumeration to a ctypes function type
_simple.GLU_ERROR : FUNCTION_TYPE( None, _simple.GLenum )
}
def addCallback( self, which, function ):
"""Register a callback for the quadric object
At the moment only GLU_ERROR is supported by OpenGL, but
we allow for the possibility of more callbacks in the future...
"""
callbackType = self.CALLBACK_TYPES.get( which )
if not callbackType:
raise ValueError(
"""Don't have a registered callback type for %r"""%(
which,
)
)
if not isinstance( function, callbackType ):
cCallback = callbackType( function )
else:
cCallback = function
PLATFORM.GLU.gluQuadricCallback( self, which, cCallback )
# XXX catch errors!
if getattr( self, 'callbacks', None ) is None:
self.callbacks = {}
self.callbacks[ which ] = cCallback
return cCallback
GLUquadric = GLUQuadric
def gluQuadricCallback( quadric, which=_simple.GLU_ERROR, function=None ):
"""Set the GLU error callback function"""
return quadric.addCallback( which, function )
# Override to produce instances of the sub-class...
gluNewQuadric = createBaseFunction(
'gluNewQuadric', dll=PLATFORM.GLU, resultType=ctypes.POINTER(GLUQuadric),
argTypes=[],
doc="""gluNewQuadric( ) -> GLUQuadric
Create a new GLUQuadric object""",
argNames=[],
)
__all__ = (
'gluNewQuadric',
'gluQuadricCallback',
'GLUQuadric',
)
```
#### File: GL/VERSION/GL_2_0.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.VERSION.GL_2_0 import *
from OpenGL.raw.GL.VERSION.GL_2_0 import _EXTENSION_NAME
def glInitGl20VERSION():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glDrawBuffers.bufs size not checked against n
glDrawBuffers=wrapper.wrapper(glDrawBuffers).setInputArraySize(
'bufs', None
)
glGetActiveAttrib=wrapper.wrapper(glGetActiveAttrib).setOutput(
'length',size=(1,),orPassIn=True
).setOutput(
'name',size=lambda x:(x,),pnameArg='bufSize',orPassIn=True
).setOutput(
'size',size=(1,),orPassIn=True
).setOutput(
'type',size=(1,),orPassIn=True
)
glGetActiveUniform=wrapper.wrapper(glGetActiveUniform).setOutput(
'length',size=(1,),orPassIn=True
).setOutput(
'name',size=lambda x:(x,),pnameArg='bufSize',orPassIn=True
).setOutput(
'size',size=(1,),orPassIn=True
).setOutput(
'type',size=(1,),orPassIn=True
)
# glGetAttachedShaders.obj is OUTPUT without known output size
# INPUT glGetAttachedShaders.shaders size not checked against maxCount
glGetAttachedShaders=wrapper.wrapper(glGetAttachedShaders).setOutput(
'count',size=(1,),orPassIn=True
).setInputArraySize(
'shaders', None
)
glGetProgramiv=wrapper.wrapper(glGetProgramiv).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetProgramInfoLog=wrapper.wrapper(glGetProgramInfoLog).setOutput(
'infoLog',size=lambda x:(x,),pnameArg='bufSize',orPassIn=True
).setOutput(
'length',size=(1,),orPassIn=True
)
glGetShaderiv=wrapper.wrapper(glGetShaderiv).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetShaderInfoLog=wrapper.wrapper(glGetShaderInfoLog).setOutput(
'infoLog',size=lambda x:(x,),pnameArg='bufSize',orPassIn=True
).setOutput(
'length',size=(1,),orPassIn=True
)
glGetShaderSource=wrapper.wrapper(glGetShaderSource).setOutput(
'length',size=(1,),orPassIn=True
).setOutput(
'source',size=lambda x:(x,),pnameArg='bufSize',orPassIn=True
)
# glGetUniformfv.params is OUTPUT without known output size
# glGetUniformiv.params is OUTPUT without known output size
glGetVertexAttribdv=wrapper.wrapper(glGetVertexAttribdv).setOutput(
'params',size=(4,),orPassIn=True
)
glGetVertexAttribfv=wrapper.wrapper(glGetVertexAttribfv).setOutput(
'params',size=(4,),orPassIn=True
)
glGetVertexAttribiv=wrapper.wrapper(glGetVertexAttribiv).setOutput(
'params',size=(4,),orPassIn=True
)
glGetVertexAttribPointerv=wrapper.wrapper(glGetVertexAttribPointerv).setOutput(
'pointer',size=(1,),orPassIn=True
)
# INPUT glShaderSource.length size not checked against count
# INPUT glShaderSource.string size not checked against count
glShaderSource=wrapper.wrapper(glShaderSource).setInputArraySize(
'length', None
).setInputArraySize(
'string', None
)
# INPUT glUniform1fv.value size not checked against count
glUniform1fv=wrapper.wrapper(glUniform1fv).setInputArraySize(
'value', None
)
# INPUT glUniform2fv.value size not checked against count*2
glUniform2fv=wrapper.wrapper(glUniform2fv).setInputArraySize(
'value', None
)
# INPUT glUniform3fv.value size not checked against count*3
glUniform3fv=wrapper.wrapper(glUniform3fv).setInputArraySize(
'value', None
)
# INPUT glUniform4fv.value size not checked against count*4
glUniform4fv=wrapper.wrapper(glUniform4fv).setInputArraySize(
'value', None
)
# INPUT glUniform1iv.value size not checked against count
glUniform1iv=wrapper.wrapper(glUniform1iv).setInputArraySize(
'value', None
)
# INPUT glUniform2iv.value size not checked against count*2
glUniform2iv=wrapper.wrapper(glUniform2iv).setInputArraySize(
'value', None
)
# INPUT glUniform3iv.value size not checked against count*3
glUniform3iv=wrapper.wrapper(glUniform3iv).setInputArraySize(
'value', None
)
# INPUT glUniform4iv.value size not checked against count*4
glUniform4iv=wrapper.wrapper(glUniform4iv).setInputArraySize(
'value', None
)
# INPUT glUniformMatrix2fv.value size not checked against count*4
glUniformMatrix2fv=wrapper.wrapper(glUniformMatrix2fv).setInputArraySize(
'value', None
)
# INPUT glUniformMatrix3fv.value size not checked against count*9
glUniformMatrix3fv=wrapper.wrapper(glUniformMatrix3fv).setInputArraySize(
'value', None
)
# INPUT glUniformMatrix4fv.value size not checked against count*16
glUniformMatrix4fv=wrapper.wrapper(glUniformMatrix4fv).setInputArraySize(
'value', None
)
glVertexAttrib1dv=wrapper.wrapper(glVertexAttrib1dv).setInputArraySize(
'v', 1
)
glVertexAttrib1fv=wrapper.wrapper(glVertexAttrib1fv).setInputArraySize(
'v', 1
)
glVertexAttrib1sv=wrapper.wrapper(glVertexAttrib1sv).setInputArraySize(
'v', 1
)
glVertexAttrib2dv=wrapper.wrapper(glVertexAttrib2dv).setInputArraySize(
'v', 2
)
glVertexAttrib2fv=wrapper.wrapper(glVertexAttrib2fv).setInputArraySize(
'v', 2
)
glVertexAttrib2sv=wrapper.wrapper(glVertexAttrib2sv).setInputArraySize(
'v', 2
)
glVertexAttrib3dv=wrapper.wrapper(glVertexAttrib3dv).setInputArraySize(
'v', 3
)
glVertexAttrib3fv=wrapper.wrapper(glVertexAttrib3fv).setInputArraySize(
'v', 3
)
glVertexAttrib3sv=wrapper.wrapper(glVertexAttrib3sv).setInputArraySize(
'v', 3
)
glVertexAttrib4Nbv=wrapper.wrapper(glVertexAttrib4Nbv).setInputArraySize(
'v', 4
)
glVertexAttrib4Niv=wrapper.wrapper(glVertexAttrib4Niv).setInputArraySize(
'v', 4
)
glVertexAttrib4Nsv=wrapper.wrapper(glVertexAttrib4Nsv).setInputArraySize(
'v', 4
)
glVertexAttrib4Nubv=wrapper.wrapper(glVertexAttrib4Nubv).setInputArraySize(
'v', 4
)
glVertexAttrib4Nuiv=wrapper.wrapper(glVertexAttrib4Nuiv).setInputArraySize(
'v', 4
)
glVertexAttrib4Nusv=wrapper.wrapper(glVertexAttrib4Nusv).setInputArraySize(
'v', 4
)
glVertexAttrib4bv=wrapper.wrapper(glVertexAttrib4bv).setInputArraySize(
'v', 4
)
glVertexAttrib4dv=wrapper.wrapper(glVertexAttrib4dv).setInputArraySize(
'v', 4
)
glVertexAttrib4fv=wrapper.wrapper(glVertexAttrib4fv).setInputArraySize(
'v', 4
)
glVertexAttrib4iv=wrapper.wrapper(glVertexAttrib4iv).setInputArraySize(
'v', 4
)
glVertexAttrib4sv=wrapper.wrapper(glVertexAttrib4sv).setInputArraySize(
'v', 4
)
glVertexAttrib4ubv=wrapper.wrapper(glVertexAttrib4ubv).setInputArraySize(
'v', 4
)
glVertexAttrib4uiv=wrapper.wrapper(glVertexAttrib4uiv).setInputArraySize(
'v', 4
)
glVertexAttrib4usv=wrapper.wrapper(glVertexAttrib4usv).setInputArraySize(
'v', 4
)
# INPUT glVertexAttribPointer.pointer size not checked against 'size,type,stride'
glVertexAttribPointer=wrapper.wrapper(glVertexAttribPointer).setInputArraySize(
'pointer', None
)
### END AUTOGENERATED SECTION
import OpenGL
from OpenGL import _configflags
from OpenGL._bytes import bytes, _NULL_8_BYTE, as_8_bit
from OpenGL.raw.GL.ARB.shader_objects import GL_OBJECT_COMPILE_STATUS_ARB as GL_OBJECT_COMPILE_STATUS
from OpenGL.raw.GL.ARB.shader_objects import GL_OBJECT_LINK_STATUS_ARB as GL_OBJECT_LINK_STATUS
from OpenGL.raw.GL.ARB.shader_objects import GL_OBJECT_ACTIVE_UNIFORMS_ARB as GL_OBJECT_ACTIVE_UNIFORMS
from OpenGL.raw.GL.ARB.shader_objects import GL_OBJECT_ACTIVE_UNIFORM_MAX_LENGTH_ARB as GL_OBJECT_ACTIVE_UNIFORM_MAX_LENGTH
from OpenGL.lazywrapper import lazy as _lazy
from OpenGL.raw.GL import _errors
from OpenGL import converters, error, contextdata
from OpenGL.arrays.arraydatatype import ArrayDatatype, GLenumArray
GL_INFO_LOG_LENGTH = constant.Constant( 'GL_INFO_LOG_LENGTH', 0x8B84 )
glShaderSource = platform.createExtensionFunction(
'glShaderSource', dll=platform.PLATFORM.GL,
resultType=None,
argTypes=(_types.GLhandle, _types.GLsizei, ctypes.POINTER(ctypes.c_char_p), arrays.GLintArray,),
doc = 'glShaderSource( GLhandle(shaderObj),[bytes(string),...]) -> None',
argNames = ('shaderObj', 'count', 'string', 'length',),
extension = _EXTENSION_NAME,
)
conv = converters.StringLengths( name='string' )
glShaderSource = wrapper.wrapper(
glShaderSource
).setPyConverter(
'count' # number of strings
).setPyConverter(
'length' # lengths of strings
).setPyConverter(
'string', conv.stringArray
).setCResolver(
'string', conv.stringArrayForC,
).setCConverter(
'length', conv,
).setCConverter(
'count', conv.totalCount,
)
try:
del conv
except NameError as err:
pass
@_lazy( glGetShaderiv )
def glGetShaderiv( baseOperation, shader, pname, status=None ):
"""Retrieve the integer parameter for the given shader
shader -- shader ID to query
pname -- parameter name
status -- pointer to integer to receive status or None to
return the parameter as an integer value
returns
integer if status parameter is None
status if status parameter is not None
"""
if status is None:
status = arrays.GLintArray.zeros( (1,))
status[0] = 1
baseOperation(
shader, pname, status
)
return status[0]
else:
baseOperation(
shader, pname, status
)
return status
def _afterCheck( key ):
"""Generate an error-checking function for compilation operations"""
if key == GL_OBJECT_COMPILE_STATUS:
getter = glGetShaderiv
else:
getter = glGetProgramiv
def GLSLCheckError(
result,
baseOperation=None,
cArguments=None,
*args
):
result = _errors._error_checker.glCheckError( result, baseOperation, cArguments, *args )
status = ctypes.c_int()
getter( cArguments[0], key, ctypes.byref(status))
status = status.value
if not status:
raise error.GLError(
result = result,
baseOperation = baseOperation,
cArguments = cArguments,
description= glGetShaderInfoLog( cArguments[0] )
)
return result
return GLSLCheckError
if _configflags.ERROR_CHECKING:
glCompileShader.errcheck = _afterCheck( GL_OBJECT_COMPILE_STATUS )
if _configflags.ERROR_CHECKING:
glLinkProgram.errcheck = _afterCheck( GL_OBJECT_LINK_STATUS )
## Not sure why, but these give invalid operation :(
##if glValidateProgram and OpenGL.ERROR_CHECKING:
## glValidateProgram.errcheck = _afterCheck( GL_OBJECT_VALIDATE_STATUS )
@_lazy( glGetShaderInfoLog )
def glGetShaderInfoLog( baseOperation, obj ):
"""Retrieve the shader's error messages as a Python string
returns string which is '' if no message
"""
length = int(glGetShaderiv(obj, GL_INFO_LOG_LENGTH))
if length > 0:
log = ctypes.create_string_buffer(length)
baseOperation(obj, length, None, log)
return log.value.strip(_NULL_8_BYTE) # null-termination
return ''
@_lazy( glGetProgramInfoLog )
def glGetProgramInfoLog( baseOperation, obj ):
"""Retrieve the shader program's error messages as a Python string
returns string which is '' if no message
"""
length = int(glGetProgramiv(obj, GL_INFO_LOG_LENGTH))
if length > 0:
log = ctypes.create_string_buffer(length)
baseOperation(obj, length, None, log)
return log.value.strip(_NULL_8_BYTE) # null-termination
return ''
@_lazy( glGetAttachedShaders )
def glGetAttachedShaders( baseOperation, obj ):
"""Retrieve the attached objects as an array of GLhandle instances"""
length= glGetProgramiv( obj, GL_ATTACHED_SHADERS )
if length > 0:
storage = arrays.GLuintArray.zeros( (length,))
baseOperation( obj, length, None, storage )
return storage
return arrays.GLuintArray.zeros( (0,))
@_lazy( glGetShaderSource )
def glGetShaderSource( baseOperation, obj ):
"""Retrieve the program/shader's source code as a Python string
returns string which is '' if no source code
"""
length = int(glGetShaderiv(obj, GL_SHADER_SOURCE_LENGTH))
if length > 0:
source = ctypes.create_string_buffer(length)
baseOperation(obj, length, None, source)
return source.value.strip(_NULL_8_BYTE) # null-termination
return ''
@_lazy( glGetActiveAttrib )
def glGetActiveAttrib(baseOperation, program, index, bufSize=None,*args):
"""Retrieves information about the attribute variable.
program -- specifies the program to be queried
index -- index of the attribute to be queried
Following parameters are optional:
bufSize -- determines the size of the buffer (limits number of bytes written),
if not provided, will be GL_ACTIVE_ATTRIBUTE_MAX_LENGTH
length -- pointer-to-GLsizei that will hold the resulting length of the name
size -- pointer-to-GLint that will hold the size of the attribute
type -- pointer-to-GLenum that will hold the type constant of the attribute
name -- pointer-to-GLchar that will hold the (null-terminated) name string
returns (bytes) name, (int)size, (enum)type
"""
if bufSize is None:
bufSize = int(glGetProgramiv( program, GL_ACTIVE_ATTRIBUTE_MAX_LENGTH))
if bufSize <= 0:
raise RuntimeError( 'Active attribute length reported', bufsize )
name,size,type = baseOperation( program, index, bufSize, *args )[1:]
if hasattr(name,'tostring'):
name = name.tostring().rstrip(b'\000')
elif hasattr(name,'value'):
name = name.value
return name,size,type
@_lazy( glGetActiveUniform )
def glGetActiveUniform(baseOperation,program, index,bufSize=None,*args):
"""Retrieve the name, size and type of the uniform of the index in the program
program -- specifies the program to be queried
index -- index of the uniform to be queried
Following parameters are optional:
bufSize -- determines the size of the buffer (limits number of bytes written),
if not provided, will be GL_OBJECT_ACTIVE_UNIFORM_MAX_LENGTH
length -- pointer-to-GLsizei that will hold the resulting length of the name
size -- pointer-to-GLint that will hold the size of the attribute
type -- pointer-to-GLenum that will hold the type constant of the attribute
name -- pointer-to-GLchar that will hold the (null-terminated) name string
returns (bytes) name, (int)size, (enum)type
"""
max_index = int(glGetProgramiv( program, GL_ACTIVE_UNIFORMS ))
if bufSize is None:
bufSize = int(glGetProgramiv( program, GL_ACTIVE_UNIFORM_MAX_LENGTH))
if index < max_index and index >= 0:
length,name,size,type = baseOperation( program, index, bufSize, *args )
if hasattr(name,'tostring'):
name = name.tostring().rstrip(b'\000')
elif hasattr(name,'value'):
name = name.value
return name,size,type
raise IndexError( 'Index %s out of range 0 to %i' % (index, max_index - 1, ) )
@_lazy( glGetUniformLocation )
def glGetUniformLocation( baseOperation, program, name ):
"""Check that name is a string with a null byte at the end of it"""
if not name:
raise ValueError( """Non-null name required""" )
name = as_8_bit( name )
if name[-1] != _NULL_8_BYTE:
name = name + _NULL_8_BYTE
return baseOperation( program, name )
@_lazy( glGetAttribLocation )
def glGetAttribLocation( baseOperation, program, name ):
"""Check that name is a string with a null byte at the end of it"""
if not name:
raise ValueError( """Non-null name required""" )
name = as_8_bit( name )
if name[-1] != _NULL_8_BYTE:
name = name + _NULL_8_BYTE
return baseOperation( program, name )
@_lazy( glVertexAttribPointer )
def glVertexAttribPointer(
baseOperation, index, size, type,
normalized, stride, pointer,
):
"""Set an attribute pointer for a given shader (index)
index -- the index of the generic vertex to bind, see
glGetAttribLocation for retrieval of the value,
note that index is a global variable, not per-shader
size -- number of basic elements per record, 1,2,3, or 4
type -- enum constant for data-type
normalized -- whether to perform int to float
normalization on integer-type values
stride -- stride in machine units (bytes) between
consecutive records, normally used to create
"interleaved" arrays
pointer -- data-pointer which provides the data-values,
normally a vertex-buffer-object or offset into the
same.
This implementation stores a copy of the data-pointer
in the contextdata structure in order to prevent null-
reference errors in the renderer.
"""
array = ArrayDatatype.asArray( pointer, type )
key = ('vertex-attrib',index)
contextdata.setValue( key, array )
return baseOperation(
index, size, type,
normalized, stride,
ArrayDatatype.voidDataPointer( array )
)
@_lazy( glDrawBuffers )
def glDrawBuffers( baseOperation, n=None, bufs=None ):
"""glDrawBuffers( bufs ) -> bufs
Wrapper will calculate n from dims of bufs if only
one argument is provided...
"""
if bufs is None:
bufs = n
n = None
bufs = arrays.GLenumArray.asArray( bufs )
if n is None:
n = arrays.GLenumArray.arraySize( bufs )
return baseOperation( n,bufs )
```
#### File: GL/VERSION/GL_3_1.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.VERSION.GL_3_1 import *
from OpenGL.raw.GL.VERSION.GL_3_1 import _EXTENSION_NAME
def glInitGl31VERSION():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glDrawElementsInstanced.indices size not checked against 'count,type'
glDrawElementsInstanced=wrapper.wrapper(glDrawElementsInstanced).setInputArraySize(
'indices', None
)
# INPUT glGetUniformIndices.uniformNames size not checked against 'uniformCount'
glGetUniformIndices=wrapper.wrapper(glGetUniformIndices).setOutput(
'uniformIndices',size=_glgets._glget_size_mapping,pnameArg='uniformCount',orPassIn=True
).setInputArraySize(
'uniformNames', None
)
# OUTPUT glGetActiveUniformsiv.params COMPSIZE(uniformCount, pname)
# INPUT glGetActiveUniformsiv.uniformIndices size not checked against uniformCount
glGetActiveUniformsiv=wrapper.wrapper(glGetActiveUniformsiv).setInputArraySize(
'uniformIndices', None
)
glGetActiveUniformName=wrapper.wrapper(glGetActiveUniformName).setOutput(
'length',size=(1,),orPassIn=True
).setOutput(
'uniformName',size=lambda x:(x,),pnameArg='bufSize',orPassIn=True
)
# INPUT glGetUniformBlockIndex.uniformBlockName size not checked against ''
glGetUniformBlockIndex=wrapper.wrapper(glGetUniformBlockIndex).setInputArraySize(
'uniformBlockName', None
)
# OUTPUT glGetActiveUniformBlockiv.params COMPSIZE(program, uniformBlockIndex, pname)
glGetActiveUniformBlockName=wrapper.wrapper(glGetActiveUniformBlockName).setOutput(
'length',size=(1,),orPassIn=True
).setOutput(
'uniformBlockName',size=lambda x:(x,),pnameArg='bufSize',orPassIn=True
)
glGetIntegeri_v=wrapper.wrapper(glGetIntegeri_v).setOutput(
'data',size=_glgets._glget_size_mapping,pnameArg='target',orPassIn=True
)
### END AUTOGENERATED SECTION
```
#### File: GL/VERSION/GL_4_2.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.VERSION.GL_4_2 import *
from OpenGL.raw.GL.VERSION.GL_4_2 import _EXTENSION_NAME
def glInitGl42VERSION():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glDrawElementsInstancedBaseInstance.indices size not checked against count
glDrawElementsInstancedBaseInstance=wrapper.wrapper(glDrawElementsInstancedBaseInstance).setInputArraySize(
'indices', None
)
# INPUT glDrawElementsInstancedBaseVertexBaseInstance.indices size not checked against count
glDrawElementsInstancedBaseVertexBaseInstance=wrapper.wrapper(glDrawElementsInstancedBaseVertexBaseInstance).setInputArraySize(
'indices', None
)
glGetInternalformativ=wrapper.wrapper(glGetInternalformativ).setOutput(
'params',size=lambda x:(x,),pnameArg='bufSize',orPassIn=True
)
glGetActiveAtomicCounterBufferiv=wrapper.wrapper(glGetActiveAtomicCounterBufferiv).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
### END AUTOGENERATED SECTION
from OpenGL.GL.ARB.base_instance import *
from OpenGL.GL.ARB.shading_language_420pack import *
from OpenGL.GL.ARB.transform_feedback_instanced import *
from OpenGL.GL.ARB.compressed_texture_pixel_storage import *
from OpenGL.GL.ARB.conservative_depth import *
from OpenGL.GL.ARB.internalformat_query import *
from OpenGL.GL.ARB.map_buffer_alignment import *
from OpenGL.GL.ARB.shader_atomic_counters import *
from OpenGL.GL.ARB.shader_image_load_store import *
from OpenGL.GL.ARB.shading_language_packing import *
from OpenGL.GL.ARB.texture_storage import *
```
#### File: GL/WIN/specular_fog.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.WIN.specular_fog import *
from OpenGL.raw.GL.WIN.specular_fog import _EXTENSION_NAME
def glInitSpecularFogWIN():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLX/ARB/vertex_buffer_object.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLX import _types, _glgets
from OpenGL.raw.GLX.ARB.vertex_buffer_object import *
from OpenGL.raw.GLX.ARB.vertex_buffer_object import _EXTENSION_NAME
def glInitVertexBufferObjectARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLX/MESA/set_3dfx_mode.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLX import _types, _glgets
from OpenGL.raw.GLX.MESA.set_3dfx_mode import *
from OpenGL.raw.GLX.MESA.set_3dfx_mode import _EXTENSION_NAME
def glInitSet3DfxModeMESA():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: site-packages/OpenGL/images.py
```python
from OpenGL.raw.GL.VERSION import GL_1_1 as _simple
from OpenGL import arrays
from OpenGL import error
from OpenGL import _configflags
import ctypes
def SetupPixelRead( format, dims, type):
"""Setup transfer mode for a read into a numpy array return the array
Calls setupDefaultTransferMode, sets rankPacking and then
returns a createTargetArray for the parameters.
"""
setupDefaultTransferMode()
# XXX this is wrong? dims may grow or it may not, depends on whether
# the format can fit in the type or not, but rank is a property of the
# image itself? Don't know, should test.
rankPacking( len(dims)+1 )
return createTargetArray( format, dims, type )
def setupDefaultTransferMode( ):
"""Set pixel transfer mode to assumed internal structure of arrays
Basically OpenGL-ctypes (and PyOpenGL) assume that your image data is in
non-byte-swapped order, with big-endian ordering of bytes (though that
seldom matters in image data). These assumptions are normally correct
when dealing with Python libraries which expose byte-arrays.
"""
try:
_simple.glPixelStorei(_simple.GL_PACK_SWAP_BYTES, 0)
_simple.glPixelStorei(_simple.GL_PACK_LSB_FIRST, 0)
except error.GLError:
# GLES doesn't support pixel storage swapping...
pass
def rankPacking( rank ):
"""Set the pixel-transfer modes for a given image "rank" (# of dims)
Uses RANK_PACKINGS table to issue calls to glPixelStorei
"""
for func,which,arg in RANK_PACKINGS[rank]:
try:
func(which,arg)
except error.GLError:
pass
def createTargetArray( format, dims, type ):
"""Create storage array for given parameters
If storage type requires > 1 unit per format pixel, then dims will be
extended by 1, so in the common case of RGB and GL_UNSIGNED_BYTE you
will wind up with an array of dims + (3,) dimensions. See
COMPONENT_COUNTS for table which controls which formats produce
larger dimensions. The secondary table TIGHT_PACK_FORMATS overrides
this case, so that image formats registered as TIGHT_PACK_FORMATS
only ever return a dims-shaped value. TIGHT_PACK_FORMATS will raise
ValueErrors if they are used with a format that does not have the same
number of components as they define.
Note that the base storage type must provide a zeros method. The zeros
method relies on their being a registered default array-implementation for
the storage type. The default installation of OpenGL-ctypes will use
Numpy arrays for returning the result.
"""
# calculate the number of storage elements required to store
# a single pixel of format, that's the dimension of the resulting array
componentCount = formatToComponentCount( format )
if componentCount > 1:
if type not in TIGHT_PACK_FORMATS:
# requires multiple elements to store a single pixel (common)
# e.g. byte array (typeBits = 8) with RGB (24) or RGBA (32)
dims += (componentCount, )
elif TIGHT_PACK_FORMATS[ type ] < componentCount:
raise ValueError(
"""Image type: %s supports %s components, but format %s requires %s components"""%(
type,
TIGHT_PACK_FORMATS[ type ],
format,
componentCount,
)
)
arrayType = arrays.GL_CONSTANT_TO_ARRAY_TYPE[ TYPE_TO_ARRAYTYPE.get(type,type) ]
return arrayType.zeros( dims )
def formatToComponentCount( format ):
"""Given an OpenGL image format specification, get components/pixel"""
size = COMPONENT_COUNTS.get( format )
if size is None:
raise ValueError( """Unrecognised image format: %r"""%(format,))
return size
def returnFormat( data, type ):
"""Perform compatibility conversion for PyOpenGL 2.x image-as string results
Uses OpenGL.UNSIGNED_BYTE_IMAGES_AS_STRING to control whether to perform the
conversions.
"""
if _configflags.UNSIGNED_BYTE_IMAGES_AS_STRING:
if type == _simple.GL_UNSIGNED_BYTE:
if hasattr( data, 'tostring' ):
return data.tostring()
elif hasattr( data, 'raw' ):
return data.raw
elif hasattr( data, '_type_' ):
s = ctypes.string_at( ctypes.cast( data, ctypes.c_voidp ), ctypes.sizeof( data ))
result = s[:] # copy into a new string
return result
return data
COMPONENT_COUNTS = {
# Image-format-constant: number-of-components (integer)
}
TYPE_TO_BITS = {
# GL-image-storage-type-constant: number-of-bits (integer)
}
TYPE_TO_ARRAYTYPE = {
# GL-image-storage-type-constant: GL-datatype (constant)
}
TIGHT_PACK_FORMATS = {
}
RANK_PACKINGS = {
# rank (integer): list of (function,**arg) to setup for that rank
}
```
#### File: EGL/MESA/drm_image.py
```python
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.EGL import _types as _cs
# End users want this...
from OpenGL.raw.EGL._types import *
from OpenGL.raw.EGL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'EGL_MESA_drm_image'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.EGL,'EGL_MESA_drm_image',error_checker=_errors._error_checker)
EGL_DRM_BUFFER_FORMAT_ARGB32_MESA=_C('EGL_DRM_BUFFER_FORMAT_ARGB32_MESA',0x31D2)
EGL_DRM_BUFFER_FORMAT_MESA=_C('EGL_DRM_BUFFER_FORMAT_MESA',0x31D0)
EGL_DRM_BUFFER_MESA=_C('EGL_DRM_BUFFER_MESA',0x31D3)
EGL_DRM_BUFFER_STRIDE_MESA=_C('EGL_DRM_BUFFER_STRIDE_MESA',0x31D4)
EGL_DRM_BUFFER_USE_MESA=_C('EGL_DRM_BUFFER_USE_MESA',0x31D1)
EGL_DRM_BUFFER_USE_SCANOUT_MESA=_C('EGL_DRM_BUFFER_USE_SCANOUT_MESA',0x00000001)
EGL_DRM_BUFFER_USE_SHARE_MESA=_C('EGL_DRM_BUFFER_USE_SHARE_MESA',0x00000002)
@_f
@_p.types(_cs.EGLImageKHR,_cs.EGLDisplay,arrays.GLintArray)
def eglCreateDRMImageMESA(dpy,attrib_list):pass
@_f
@_p.types(_cs.EGLBoolean,_cs.EGLDisplay,_cs.EGLImageKHR,arrays.GLintArray,arrays.GLintArray,arrays.GLintArray)
def eglExportDRMImageMESA(dpy,image,name,handle,stride):pass
```
#### File: GLES1/OES/draw_texture.py
```python
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES1 import _types as _cs
# End users want this...
from OpenGL.raw.GLES1._types import *
from OpenGL.raw.GLES1 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES1_OES_draw_texture'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES1,'GLES1_OES_draw_texture',error_checker=_errors._error_checker)
GL_TEXTURE_CROP_RECT_OES=_C('GL_TEXTURE_CROP_RECT_OES',0x8B9D)
@_f
@_p.types(None,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glDrawTexfOES(x,y,z,width,height):pass
@_f
@_p.types(None,arrays.GLfloatArray)
def glDrawTexfvOES(coords):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint)
def glDrawTexiOES(x,y,z,width,height):pass
@_f
@_p.types(None,arrays.GLintArray)
def glDrawTexivOES(coords):pass
@_f
@_p.types(None,_cs.GLshort,_cs.GLshort,_cs.GLshort,_cs.GLshort,_cs.GLshort)
def glDrawTexsOES(x,y,z,width,height):pass
@_f
@_p.types(None,arrays.GLshortArray)
def glDrawTexsvOES(coords):pass
@_f
@_p.types(None,_cs.GLfixed,_cs.GLfixed,_cs.GLfixed,_cs.GLfixed,_cs.GLfixed)
def glDrawTexxOES(x,y,z,width,height):pass
@_f
@_p.types(None,arrays.GLfixedArray)
def glDrawTexxvOES(coords):pass
```
#### File: GLES2/NV/clip_space_w_scaling.py
```python
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES2 import _types as _cs
# End users want this...
from OpenGL.raw.GLES2._types import *
from OpenGL.raw.GLES2 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES2_NV_clip_space_w_scaling'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES2,'GLES2_NV_clip_space_w_scaling',error_checker=_errors._error_checker)
GL_VIEWPORT_POSITION_W_SCALE_NV=_C('GL_VIEWPORT_POSITION_W_SCALE_NV',0x937C)
GL_VIEWPORT_POSITION_W_SCALE_X_COEFF_NV=_C('GL_VIEWPORT_POSITION_W_SCALE_X_COEFF_NV',0x937D)
GL_VIEWPORT_POSITION_W_SCALE_Y_COEFF_NV=_C('GL_VIEWPORT_POSITION_W_SCALE_Y_COEFF_NV',0x937E)
@_f
@_p.types(None,_cs.GLuint,_cs.GLfloat,_cs.GLfloat)
def glViewportPositionWScaleNV(index,xcoeff,ycoeff):pass
```
#### File: GL/EXT/paletted_texture.py
```python
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_EXT_paletted_texture'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_EXT_paletted_texture',error_checker=_errors._error_checker)
GL_COLOR_INDEX12_EXT=_C('GL_COLOR_INDEX12_EXT',0x80E6)
GL_COLOR_INDEX16_EXT=_C('GL_COLOR_INDEX16_EXT',0x80E7)
GL_COLOR_INDEX1_EXT=_C('GL_COLOR_INDEX1_EXT',0x80E2)
GL_COLOR_INDEX2_EXT=_C('GL_COLOR_INDEX2_EXT',0x80E3)
GL_COLOR_INDEX4_EXT=_C('GL_COLOR_INDEX4_EXT',0x80E4)
GL_COLOR_INDEX8_EXT=_C('GL_COLOR_INDEX8_EXT',0x80E5)
GL_TEXTURE_INDEX_SIZE_EXT=_C('GL_TEXTURE_INDEX_SIZE_EXT',0x80ED)
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLsizei,_cs.GLenum,_cs.GLenum,ctypes.c_void_p)
def glColorTableEXT(target,internalFormat,width,format,type,table):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLenum,ctypes.c_void_p)
def glGetColorTableEXT(target,format,type,data):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLfloatArray)
def glGetColorTableParameterfvEXT(target,pname,params):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLintArray)
def glGetColorTableParameterivEXT(target,pname,params):pass
```
#### File: GLX/AMD/gpu_association.py
```python
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLX import _types as _cs
# End users want this...
from OpenGL.raw.GLX._types import *
from OpenGL.raw.GLX import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLX_AMD_gpu_association'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLX,'GLX_AMD_gpu_association',error_checker=_errors._error_checker)
GLX_GPU_CLOCK_AMD=_C('GLX_GPU_CLOCK_AMD',0x21A4)
GLX_GPU_FASTEST_TARGET_GPUS_AMD=_C('GLX_GPU_FASTEST_TARGET_GPUS_AMD',0x21A2)
GLX_GPU_NUM_PIPES_AMD=_C('GLX_GPU_NUM_PIPES_AMD',0x21A5)
GLX_GPU_NUM_RB_AMD=_C('GLX_GPU_NUM_RB_AMD',0x21A7)
GLX_GPU_NUM_SIMD_AMD=_C('GLX_GPU_NUM_SIMD_AMD',0x21A6)
GLX_GPU_NUM_SPI_AMD=_C('GLX_GPU_NUM_SPI_AMD',0x21A8)
GLX_GPU_OPENGL_VERSION_STRING_AMD=_C('GLX_GPU_OPENGL_VERSION_STRING_AMD',0x1F02)
GLX_GPU_RAM_AMD=_C('GLX_GPU_RAM_AMD',0x21A3)
GLX_GPU_RENDERER_STRING_AMD=_C('GLX_GPU_RENDERER_STRING_AMD',0x1F01)
GLX_GPU_VENDOR_AMD=_C('GLX_GPU_VENDOR_AMD',0x1F00)
@_f
@_p.types(None,_cs.GLXContext,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLbitfield,_cs.GLenum)
def glXBlitContextFramebufferAMD(dstCtx,srcX0,srcY0,srcX1,srcY1,dstX0,dstY0,dstX1,dstY1,mask,filter):pass
@_f
@_p.types(_cs.GLXContext,_cs.c_uint,_cs.GLXContext)
def glXCreateAssociatedContextAMD(id,share_list):pass
@_f
@_p.types(_cs.GLXContext,_cs.c_uint,_cs.GLXContext,ctypes.POINTER(_cs.c_int))
def glXCreateAssociatedContextAttribsAMD(id,share_context,attribList):pass
@_f
@_p.types(_cs.Bool,_cs.GLXContext)
def glXDeleteAssociatedContextAMD(ctx):pass
@_f
@_p.types(_cs.c_uint,_cs.GLXContext)
def glXGetContextGPUIDAMD(ctx):pass
@_f
@_p.types(_cs.GLXContext,)
def glXGetCurrentAssociatedContextAMD():pass
@_f
@_p.types(_cs.c_uint,_cs.c_uint,ctypes.POINTER(_cs.c_uint))
def glXGetGPUIDsAMD(maxCount,ids):pass
@_f
@_p.types(_cs.c_int,_cs.c_uint,_cs.c_int,_cs.GLenum,_cs.c_uint,ctypes.c_void_p)
def glXGetGPUInfoAMD(id,property,dataType,size,data):pass
@_f
@_p.types(_cs.Bool,_cs.GLXContext)
def glXMakeAssociatedContextCurrentAMD(ctx):pass
```
#### File: raw/GLX/_types.py
```python
from OpenGL import platform as _p, constant, extensions
from ctypes import *
from OpenGL.raw.GL._types import *
from OpenGL._bytes import as_8_bit
c_void = None
void = None
Bool = c_uint
class _GLXQuerier( extensions.ExtensionQuerier ):
prefix = as_8_bit('GLX_')
assumed_version = [1,1]
version_prefix = as_8_bit('GLX_VERSION_GLX_')
def getDisplay( self ):
from OpenGL.raw.GLX import _types
from OpenGL.platform import ctypesloader
import ctypes, os
X11 = ctypesloader.loadLibrary( ctypes.cdll, 'X11' )
XOpenDisplay = X11.XOpenDisplay
XOpenDisplay.restype = ctypes.POINTER(_types.Display)
return XOpenDisplay( os.environ.get( 'DISPLAY' ))
def getScreen( self, display ):
from OpenGL.platform import ctypesloader
from OpenGL.raw.GLX import _types
import ctypes, os
X11 = ctypesloader.loadLibrary( ctypes.cdll, 'X11' )
XDefaultScreen = X11.XDefaultScreen
XDefaultScreen.argtypes = [ctypes.POINTER(_types.Display)]
return XDefaultScreen( display )
def pullVersion( self ):
from OpenGL.GLX import glXQueryVersion
import ctypes
if glXQueryVersion:
display = self.getDisplay()
major,minor = ctypes.c_int(),ctypes.c_int()
glXQueryVersion(display, major, minor)
return [major.value,minor.value]
else:
return [1,1]
def pullExtensions( self ):
if self.getVersion() >= [1,2]:
from OpenGL.GLX import glXQueryExtensionsString
display = self.getDisplay()
screen = self.getScreen( display )
if glXQueryExtensionsString:
return glXQueryExtensionsString( display,screen ).split()
return []
GLXQuerier=_GLXQuerier()
class struct___GLXcontextRec(Structure):
__slots__ = [
]
struct___GLXcontextRec._fields_ = [
('_opaque_struct', c_int)
]
class struct___GLXcontextRec(Structure):
__slots__ = [
]
struct___GLXcontextRec._fields_ = [
('_opaque_struct', c_int)
]
GLXContext = POINTER(struct___GLXcontextRec) # /usr/include/GL/glx.h:178
XID = c_ulong # /usr/include/X11/X.h:66
GLXPixmap = XID # /usr/include/GL/glx.h:179
GLXDrawable = XID # /usr/include/GL/glx.h:180
class struct___GLXFBConfigRec(Structure):
__slots__ = [
]
struct___GLXFBConfigRec._fields_ = [
('_opaque_struct', c_int)
]
class struct___GLXFBConfigRec(Structure):
__slots__ = [
]
struct___GLXFBConfigRec._fields_ = [
('_opaque_struct', c_int)
]
GLXFBConfig = POINTER(struct___GLXFBConfigRec) # /usr/include/GL/glx.h:182
GLXFBConfigID = XID # /usr/include/GL/glx.h:183
GLXContextID = XID # /usr/include/GL/glx.h:184
GLXWindow = XID # /usr/include/GL/glx.h:185
GLXPbuffer = XID # /usr/include/GL/glx.h:186
GLXPbufferSGIX = XID
GLXVideoSourceSGIX = XID
class struct_anon_103(Structure):
__slots__ = [
'visual',
'visualid',
'screen',
'depth',
'class',
'red_mask',
'green_mask',
'blue_mask',
'colormap_size',
'bits_per_rgb',
]
class struct_anon_18(Structure):
__slots__ = [
'ext_data',
'visualid',
'class',
'red_mask',
'green_mask',
'blue_mask',
'bits_per_rgb',
'map_entries',
]
class struct__XExtData(Structure):
__slots__ = [
'number',
'next',
'free_private',
'private_data',
]
XPointer = c_char_p # /usr/include/X11/Xlib.h:84
struct__XExtData._fields_ = [
('number', c_int),
('next', POINTER(struct__XExtData)),
('free_private', POINTER(CFUNCTYPE(c_int, POINTER(struct__XExtData)))),
('private_data', XPointer),
]
XExtData = struct__XExtData # /usr/include/X11/Xlib.h:163
VisualID = c_ulong # /usr/include/X11/X.h:76
struct_anon_18._fields_ = [
('ext_data', POINTER(XExtData)),
('visualid', VisualID),
('class', c_int),
('red_mask', c_ulong),
('green_mask', c_ulong),
('blue_mask', c_ulong),
('bits_per_rgb', c_int),
('map_entries', c_int),
]
Visual = struct_anon_18 # /usr/include/X11/Xlib.h:246
struct_anon_103._fields_ = [
('visual', POINTER(Visual)),
('visualid', VisualID),
('screen', c_int),
('depth', c_int),
('class', c_int),
('red_mask', c_ulong),
('green_mask', c_ulong),
('blue_mask', c_ulong),
('colormap_size', c_int),
('bits_per_rgb', c_int),
]
XVisualInfo = struct_anon_103 # /usr/include/X11/Xutil.h:294
class struct__XDisplay(Structure):
__slots__ = [
]
struct__XDisplay._fields_ = [
('_opaque_struct', c_int)
]
class struct__XDisplay(Structure):
__slots__ = [
]
struct__XDisplay._fields_ = [
('_opaque_struct', c_int)
]
Display = struct__XDisplay # /usr/include/X11/Xlib.h:495
Pixmap = XID # /usr/include/X11/X.h:102
Font = XID # /usr/include/X11/X.h:100
Window = XID # /usr/include/X11/X.h:96
GLX_ARB_get_proc_address = constant.Constant( 'GLX_ARB_get_proc_address', 1 )
__GLXextFuncPtr = CFUNCTYPE(None) # /usr/include/GL/glx.h:330
# EXT_texture_from_pixmap (/usr/include/GL/glx.h:436)
class struct_anon_111(Structure):
__slots__ = [
'event_type',
'draw_type',
'serial',
'send_event',
'display',
'drawable',
'buffer_mask',
'aux_buffer',
'x',
'y',
'width',
'height',
'count',
]
struct_anon_111._fields_ = [
('event_type', c_int),
('draw_type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('drawable', GLXDrawable),
('buffer_mask', c_uint),
('aux_buffer', c_uint),
('x', c_int),
('y', c_int),
('width', c_int),
('height', c_int),
('count', c_int),
]
GLXPbufferClobberEvent = struct_anon_111 # /usr/include/GL/glx.h:502
class struct_anon_112(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'drawable',
'event_type',
'ust',
'msc',
'sbc',
]
struct_anon_112._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('drawable', GLXDrawable),
('event_type', c_int),
('ust', c_int64),
('msc', c_int64),
('sbc', c_int64),
]
GLXBufferSwapComplete = struct_anon_112 # /usr/include/GL/glx.h:514
class struct___GLXEvent(Union):
__slots__ = [
'glxpbufferclobber',
'glxbufferswapcomplete',
'pad',
]
struct___GLXEvent._fields_ = [
('glxpbufferclobber', GLXPbufferClobberEvent),
('glxbufferswapcomplete', GLXBufferSwapComplete),
('pad', c_long * 24),
]
GLXEvent = struct___GLXEvent # /usr/include/GL/glx.h:520
class GLXHyperpipeConfigSGIX( Structure ):
_fields_ = [
('pipeName', c_char * 80),
('channel',c_int),
('participationType',c_uint),
('timeSlice',c_int),
]
```
#### File: WGL/I3D/digital_video_control.py
```python
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.WGL import _types as _cs
# End users want this...
from OpenGL.raw.WGL._types import *
from OpenGL.raw.WGL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'WGL_I3D_digital_video_control'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.WGL,'WGL_I3D_digital_video_control',error_checker=_errors._error_checker)
WGL_DIGITAL_VIDEO_CURSOR_ALPHA_FRAMEBUFFER_I3D=_C('WGL_DIGITAL_VIDEO_CURSOR_ALPHA_FRAMEBUFFER_I3D',0x2050)
WGL_DIGITAL_VIDEO_CURSOR_ALPHA_VALUE_I3D=_C('WGL_DIGITAL_VIDEO_CURSOR_ALPHA_VALUE_I3D',0x2051)
WGL_DIGITAL_VIDEO_CURSOR_INCLUDED_I3D=_C('WGL_DIGITAL_VIDEO_CURSOR_INCLUDED_I3D',0x2052)
WGL_DIGITAL_VIDEO_GAMMA_CORRECTED_I3D=_C('WGL_DIGITAL_VIDEO_GAMMA_CORRECTED_I3D',0x2053)
@_f
@_p.types(_cs.BOOL,_cs.HDC,_cs.c_int,ctypes.POINTER(_cs.c_int))
def wglGetDigitalVideoParametersI3D(hDC,iAttribute,piValue):pass
@_f
@_p.types(_cs.BOOL,_cs.HDC,_cs.c_int,ctypes.POINTER(_cs.c_int))
def wglSetDigitalVideoParametersI3D(hDC,iAttribute,piValue):pass
```
#### File: WGL/ARB/robustness_application_isolation.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.WGL import _types, _glgets
from OpenGL.raw.WGL.ARB.robustness_application_isolation import *
from OpenGL.raw.WGL.ARB.robustness_application_isolation import _EXTENSION_NAME
def glInitRobustnessApplicationIsolationARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: resolution/resolvelib/provider.py
```python
from pip._vendor.resolvelib.providers import AbstractProvider
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Any, Optional, Sequence, Tuple, Union
from pip._internal.req.req_install import InstallRequirement
from .base import Requirement, Candidate
from .factory import Factory
class PipProvider(AbstractProvider):
def __init__(
self,
factory, # type: Factory
ignore_dependencies, # type: bool
):
# type: (...) -> None
self._factory = factory
self._ignore_dependencies = ignore_dependencies
def get_install_requirement(self, c):
# type: (Candidate) -> Optional[InstallRequirement]
return c.get_install_requirement()
def identify(self, dependency):
# type: (Union[Requirement, Candidate]) -> str
return dependency.name
def get_preference(
self,
resolution, # type: Optional[Candidate]
candidates, # type: Sequence[Candidate]
information # type: Sequence[Tuple[Requirement, Candidate]]
):
# type: (...) -> Any
# Use the "usual" value for now
return len(candidates)
def find_matches(self, requirement):
# type: (Requirement) -> Sequence[Candidate]
return requirement.find_matches()
def is_satisfied_by(self, requirement, candidate):
# type: (Requirement, Candidate) -> bool
return requirement.is_satisfied_by(candidate)
def get_dependencies(self, candidate):
# type: (Candidate) -> Sequence[Requirement]
if self._ignore_dependencies:
return []
return candidate.get_dependencies()
```
#### File: Bot_Module/webCrawler/MinecraftWikiCrawler.py
```python
import requests
from bs4 import BeautifulSoup
class MinecraftWikiCrawler:
def __init__(self):
self.Prefix = 'https://minecraft-zh.gamepedia.com/'
def Search(self, Tag):
format_string = ''
url = self.Prefix + Tag
res = requests.get(url)
content = res.content
soup = BeautifulSoup(content, 'html.parser')
Total = ''
for index, data in enumerate(soup.select('#pageWrapper #bodyContent div.mw-parser-output p')):
format_string += str(data.text)
if data.has_attr('href'):
format_string += str(data['href'])
Total += format_string
return Total
```
#### File: Bot_Module/webCrawler/WebCrawlerCore.py
```python
import datetime
from Bot_Module.webCrawler.MinecraftWikiCrawler import MinecraftWikiCrawler
class WebCrawlerCore():
def __init__(self):
try:
self.MinecraftWikiCrawler = MinecraftWikiCrawler()
except Exception as Errr:
raise Errr
print(datetime.datetime.now(), self.__class__, 'Ready', sep=' ')
```
#### File: site-packages/attr/exceptions.py
```python
from __future__ import absolute_import, division, print_function
class FrozenError(AttributeError):
"""
A frozen/immutable instance or attribute haave been attempted to be
modified.
It mirrors the behavior of ``namedtuples`` by using the same error message
and subclassing `AttributeError`.
.. versionadded:: 20.1.0
"""
msg = "can't set attribute"
args = [msg]
class FrozenInstanceError(FrozenError):
"""
A frozen instance has been attempted to be modified.
.. versionadded:: 16.1.0
"""
class FrozenAttributeError(FrozenError):
"""
A frozen attribute has been attempted to be modified.
.. versionadded:: 20.1.0
"""
class AttrsAttributeNotFoundError(ValueError):
"""
An ``attrs`` function couldn't find an attribute that the user asked for.
.. versionadded:: 16.2.0
"""
class NotAnAttrsClassError(ValueError):
"""
A non-``attrs`` class has been passed into an ``attrs`` function.
.. versionadded:: 16.2.0
"""
class DefaultAlreadySetError(RuntimeError):
"""
A default has been set using ``attr.ib()`` and is attempted to be reset
using the decorator.
.. versionadded:: 17.1.0
"""
class UnannotatedAttributeError(RuntimeError):
"""
A class with ``auto_attribs=True`` has an ``attr.ib()`` without a type
annotation.
.. versionadded:: 17.3.0
"""
class PythonTooOldError(RuntimeError):
"""
It was attempted to use an ``attrs`` feature that requires a newer Python
version.
.. versionadded:: 18.2.0
"""
class NotCallableError(TypeError):
"""
A ``attr.ib()`` requiring a callable has been set with a value
that is not callable.
.. versionadded:: 19.2.0
"""
def __init__(self, msg, value):
super(TypeError, self).__init__(msg, value)
self.msg = msg
self.value = value
def __str__(self):
return str(self.msg)
```
#### File: site-packages/attr/_version_info.py
```python
from __future__ import absolute_import, division, print_function
from functools import total_ordering
from ._funcs import astuple
from ._make import attrib, attrs
@total_ordering
@attrs(eq=False, order=False, slots=True, frozen=True)
class VersionInfo(object):
"""
A version object that can be compared to tuple of length 1--4:
>>> attr.VersionInfo(19, 1, 0, "final") <= (19, 2)
True
>>> attr.VersionInfo(19, 1, 0, "final") < (19, 1, 1)
True
>>> vi = attr.VersionInfo(19, 2, 0, "final")
>>> vi < (19, 1, 1)
False
>>> vi < (19,)
False
>>> vi == (19, 2,)
True
>>> vi == (19, 2, 1)
False
.. versionadded:: 19.2
"""
year = attrib(type=int)
minor = attrib(type=int)
micro = attrib(type=int)
releaselevel = attrib(type=str)
@classmethod
def _from_version_string(cls, s):
"""
Parse *s* and return a _VersionInfo.
"""
v = s.split(".")
if len(v) == 3:
v.append("final")
return cls(
year=int(v[0]), minor=int(v[1]), micro=int(v[2]), releaselevel=v[3]
)
def _ensure_tuple(self, other):
"""
Ensure *other* is a tuple of a valid length.
Returns a possibly transformed *other* and ourselves as a tuple of
the same length as *other*.
"""
if self.__class__ is other.__class__:
other = astuple(other)
if not isinstance(other, tuple):
raise NotImplementedError
if not (1 <= len(other) <= 4):
raise NotImplementedError
return astuple(self)[: len(other)], other
def __eq__(self, other):
try:
us, them = self._ensure_tuple(other)
except NotImplementedError:
return NotImplemented
return us == them
def __lt__(self, other):
try:
us, them = self._ensure_tuple(other)
except NotImplementedError:
return NotImplemented
# Since alphabetically "dev0" < "final" < "post1" < "post2", we don't
# have to do anything special with releaselevel for now.
return us < them
```
#### File: bs4/tests/test_htmlparser.py
```python
from pdb import set_trace
import pickle
from bs4.testing import SoupTest, HTMLTreeBuilderSmokeTest
from bs4.builder import HTMLParserTreeBuilder
from bs4.builder._htmlparser import BeautifulSoupHTMLParser
class HTMLParserTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest):
default_builder = HTMLParserTreeBuilder
def test_namespaced_system_doctype(self):
# html.parser can't handle namespaced doctypes, so skip this one.
pass
def test_namespaced_public_doctype(self):
# html.parser can't handle namespaced doctypes, so skip this one.
pass
def test_builder_is_pickled(self):
"""Unlike most tree builders, HTMLParserTreeBuilder and will
be restored after pickling.
"""
tree = self.soup("<a><b>foo</a>")
dumped = pickle.dumps(tree, 2)
loaded = pickle.loads(dumped)
self.assertTrue(isinstance(loaded.builder, type(tree.builder)))
def test_redundant_empty_element_closing_tags(self):
self.assertSoupEquals('<br></br><br></br><br></br>', "<br/><br/><br/>")
self.assertSoupEquals('</br></br></br>', "")
def test_empty_element(self):
# This verifies that any buffered data present when the parser
# finishes working is handled.
self.assertSoupEquals("foo &# bar", "foo &# bar")
def test_tracking_line_numbers(self):
# The html.parser TreeBuilder keeps track of line number and
# position of each element.
markup = "\n <p>\n\n<sourceline>\n<b>text</b></sourceline><sourcepos></p>"
soup = self.soup(markup)
self.assertEqual(2, soup.p.sourceline)
self.assertEqual(3, soup.p.sourcepos)
self.assertEqual("sourceline", soup.p.find('sourceline').name)
# You can deactivate this behavior.
soup = self.soup(markup, store_line_numbers=False)
self.assertEqual("sourceline", soup.p.sourceline.name)
self.assertEqual("sourcepos", soup.p.sourcepos.name)
def test_on_duplicate_attribute(self):
# The html.parser tree builder has a variety of ways of
# handling a tag that contains the same attribute multiple times.
markup = '<a class="cls" href="url1" href="url2" href="url3" id="id">'
# If you don't provide any particular value for
# on_duplicate_attribute, later values replace earlier values.
soup = self.soup(markup)
self.assertEqual("url3", soup.a['href'])
self.assertEqual(["cls"], soup.a['class'])
self.assertEqual("id", soup.a['id'])
# You can also get this behavior explicitly.
def assert_attribute(on_duplicate_attribute, expected):
soup = self.soup(
markup, on_duplicate_attribute=on_duplicate_attribute
)
self.assertEqual(expected, soup.a['href'])
# Verify that non-duplicate attributes are treated normally.
self.assertEqual(["cls"], soup.a['class'])
self.assertEqual("id", soup.a['id'])
assert_attribute(None, "url3")
assert_attribute(BeautifulSoupHTMLParser.REPLACE, "url3")
# You can ignore subsequent values in favor of the first.
assert_attribute(BeautifulSoupHTMLParser.IGNORE, "url1")
# And you can pass in a callable that does whatever you want.
def accumulate(attrs, key, value):
if not isinstance(attrs[key], list):
attrs[key] = [attrs[key]]
attrs[key].append(value)
assert_attribute(accumulate, ["url1", "url2", "url3"])
class TestHTMLParserSubclass(SoupTest):
def test_error(self):
"""Verify that our HTMLParser subclass implements error() in a way
that doesn't cause a crash.
"""
parser = BeautifulSoupHTMLParser()
parser.error("don't crash")
```
#### File: site-packages/multidict/_multidict_base.py
```python
from collections.abc import ItemsView, Iterable, KeysView, Set, ValuesView
def _abc_itemsview_register(view_cls):
ItemsView.register(view_cls)
def _abc_keysview_register(view_cls):
KeysView.register(view_cls)
def _abc_valuesview_register(view_cls):
ValuesView.register(view_cls)
def _viewbaseset_richcmp(view, other, op):
if op == 0: # <
if not isinstance(other, Set):
return NotImplemented
return len(view) < len(other) and view <= other
elif op == 1: # <=
if not isinstance(other, Set):
return NotImplemented
if len(view) > len(other):
return False
for elem in view:
if elem not in other:
return False
return True
elif op == 2: # ==
if not isinstance(other, Set):
return NotImplemented
return len(view) == len(other) and view <= other
elif op == 3: # !=
return not view == other
elif op == 4: # >
if not isinstance(other, Set):
return NotImplemented
return len(view) > len(other) and view >= other
elif op == 5: # >=
if not isinstance(other, Set):
return NotImplemented
if len(view) < len(other):
return False
for elem in other:
if elem not in view:
return False
return True
def _viewbaseset_and(view, other):
if not isinstance(other, Iterable):
return NotImplemented
if isinstance(view, Set):
view = set(iter(view))
if isinstance(other, Set):
other = set(iter(other))
if not isinstance(other, Set):
other = set(iter(other))
return view & other
def _viewbaseset_or(view, other):
if not isinstance(other, Iterable):
return NotImplemented
if isinstance(view, Set):
view = set(iter(view))
if isinstance(other, Set):
other = set(iter(other))
if not isinstance(other, Set):
other = set(iter(other))
return view | other
def _viewbaseset_sub(view, other):
if not isinstance(other, Iterable):
return NotImplemented
if isinstance(view, Set):
view = set(iter(view))
if isinstance(other, Set):
other = set(iter(other))
if not isinstance(other, Set):
other = set(iter(other))
return view - other
def _viewbaseset_xor(view, other):
if not isinstance(other, Iterable):
return NotImplemented
if isinstance(view, Set):
view = set(iter(view))
if isinstance(other, Set):
other = set(iter(other))
if not isinstance(other, Set):
other = set(iter(other))
return view ^ other
def _itemsview_isdisjoint(view, other):
"Return True if two sets have a null intersection."
for v in other:
if v in view:
return False
return True
def _itemsview_repr(view):
lst = []
for k, v in view:
lst.append("{!r}: {!r}".format(k, v))
body = ", ".join(lst)
return "{}({})".format(view.__class__.__name__, body)
def _keysview_isdisjoint(view, other):
"Return True if two sets have a null intersection."
for k in other:
if k in view:
return False
return True
def _keysview_repr(view):
lst = []
for k in view:
lst.append("{!r}".format(k))
body = ", ".join(lst)
return "{}({})".format(view.__class__.__name__, body)
def _valuesview_repr(view):
lst = []
for v in view:
lst.append("{!r}".format(v))
body = ", ".join(lst)
return "{}({})".format(view.__class__.__name__, body)
def _mdrepr(md):
lst = []
for k, v in md.items():
lst.append("'{}': {!r}".format(k, v))
body = ", ".join(lst)
return "<{}({})>".format(md.__class__.__name__, body)
```
#### File: resolution/resolvelib/resolver.py
```python
import functools
import logging
from pip._vendor import six
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.resolvelib import BaseReporter, ResolutionImpossible
from pip._vendor.resolvelib import Resolver as RLResolver
from pip._internal.exceptions import InstallationError
from pip._internal.req.req_install import check_invalid_constraint_type
from pip._internal.req.req_set import RequirementSet
from pip._internal.resolution.base import BaseResolver
from pip._internal.resolution.resolvelib.provider import PipProvider
from pip._internal.utils.misc import dist_is_editable
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from .base import Constraint
from .factory import Factory
if MYPY_CHECK_RUNNING:
from typing import Dict, List, Optional, Set, Tuple
from pip._vendor.resolvelib.resolvers import Result
from pip._vendor.resolvelib.structs import Graph
from pip._internal.cache import WheelCache
from pip._internal.index.package_finder import PackageFinder
from pip._internal.operations.prepare import RequirementPreparer
from pip._internal.req.req_install import InstallRequirement
from pip._internal.resolution.base import InstallRequirementProvider
logger = logging.getLogger(__name__)
class Resolver(BaseResolver):
_allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"}
def __init__(
self,
preparer, # type: RequirementPreparer
finder, # type: PackageFinder
wheel_cache, # type: Optional[WheelCache]
make_install_req, # type: InstallRequirementProvider
use_user_site, # type: bool
ignore_dependencies, # type: bool
ignore_installed, # type: bool
ignore_requires_python, # type: bool
force_reinstall, # type: bool
upgrade_strategy, # type: str
py_version_info=None, # type: Optional[Tuple[int, ...]]
lazy_wheel=False, # type: bool
):
super(Resolver, self).__init__()
if lazy_wheel:
logger.warning(
'pip is using lazily downloaded wheels using HTTP '
'range requests to obtain dependency information. '
'This experimental feature is enabled through '
'--use-feature=fast-deps and it is not ready for production.'
)
assert upgrade_strategy in self._allowed_strategies
self.factory = Factory(
finder=finder,
preparer=preparer,
make_install_req=make_install_req,
wheel_cache=wheel_cache,
use_user_site=use_user_site,
force_reinstall=force_reinstall,
ignore_installed=ignore_installed,
ignore_requires_python=ignore_requires_python,
py_version_info=py_version_info,
lazy_wheel=lazy_wheel,
)
self.ignore_dependencies = ignore_dependencies
self.upgrade_strategy = upgrade_strategy
self._result = None # type: Optional[Result]
def resolve(self, root_reqs, check_supported_wheels):
# type: (List[InstallRequirement], bool) -> RequirementSet
constraints = {} # type: Dict[str, Constraint]
user_requested = set() # type: Set[str]
requirements = []
for req in root_reqs:
if req.constraint:
# Ensure we only accept valid constraints
problem = check_invalid_constraint_type(req)
if problem:
raise InstallationError(problem)
if not req.match_markers():
continue
name = canonicalize_name(req.name)
if name in constraints:
constraints[name] &= req
else:
constraints[name] = Constraint.from_ireq(req)
else:
if req.user_supplied and req.name:
user_requested.add(canonicalize_name(req.name))
r = self.factory.make_requirement_from_install_req(
req, requested_extras=(),
)
if r is not None:
requirements.append(r)
provider = PipProvider(
factory=self.factory,
constraints=constraints,
ignore_dependencies=self.ignore_dependencies,
upgrade_strategy=self.upgrade_strategy,
user_requested=user_requested,
)
reporter = BaseReporter()
resolver = RLResolver(provider, reporter)
try:
try_to_avoid_resolution_too_deep = 2000000
self._result = resolver.resolve(
requirements, max_rounds=try_to_avoid_resolution_too_deep,
)
except ResolutionImpossible as e:
error = self.factory.get_installation_error(e)
six.raise_from(error, e)
req_set = RequirementSet(check_supported_wheels=check_supported_wheels)
for candidate in self._result.mapping.values():
ireq = candidate.get_install_requirement()
if ireq is None:
continue
# Check if there is already an installation under the same name,
# and set a flag for later stages to uninstall it, if needed.
# * There isn't, good -- no uninstalltion needed.
# * The --force-reinstall flag is set. Always reinstall.
# * The installation is different in version or editable-ness, so
# we need to uninstall it to install the new distribution.
# * The installed version is the same as the pending distribution.
# Skip this distrubiton altogether to save work.
installed_dist = self.factory.get_dist_to_uninstall(candidate)
if installed_dist is None:
ireq.should_reinstall = False
elif self.factory.force_reinstall:
ireq.should_reinstall = True
elif installed_dist.parsed_version != candidate.version:
ireq.should_reinstall = True
elif dist_is_editable(installed_dist) != candidate.is_editable:
ireq.should_reinstall = True
else:
continue
link = candidate.source_link
if link and link.is_yanked:
# The reason can contain non-ASCII characters, Unicode
# is required for Python 2.
msg = (
u'The candidate selected for download or install is a '
u'yanked version: {name!r} candidate (version {version} '
u'at {link})\nReason for being yanked: {reason}'
).format(
name=candidate.name,
version=candidate.version,
link=link,
reason=link.yanked_reason or u'<none given>',
)
logger.warning(msg)
req_set.add_named_requirement(ireq)
return req_set
def get_installation_order(self, req_set):
# type: (RequirementSet) -> List[InstallRequirement]
"""Get order for installation of requirements in RequirementSet.
The returned list contains a requirement before another that depends on
it. This helps ensure that the environment is kept consistent as they
get installed one-by-one.
The current implementation creates a topological ordering of the
dependency graph, while breaking any cycles in the graph at arbitrary
points. We make no guarantees about where the cycle would be broken,
other than they would be broken.
"""
assert self._result is not None, "must call resolve() first"
graph = self._result.graph
weights = get_topological_weights(graph)
sorted_items = sorted(
req_set.requirements.items(),
key=functools.partial(_req_set_item_sorter, weights=weights),
reverse=True,
)
return [ireq for _, ireq in sorted_items]
def get_topological_weights(graph):
# type: (Graph) -> Dict[Optional[str], int]
"""Assign weights to each node based on how "deep" they are.
This implementation may change at any point in the future without prior
notice.
We take the length for the longest path to any node from root, ignoring any
paths that contain a single node twice (i.e. cycles). This is done through
a depth-first search through the graph, while keeping track of the path to
the node.
Cycles in the graph result would result in node being revisited while also
being it's own path. In this case, take no action. This helps ensure we
don't get stuck in a cycle.
When assigning weight, the longer path (i.e. larger length) is preferred.
"""
path = set() # type: Set[Optional[str]]
weights = {} # type: Dict[Optional[str], int]
def visit(node):
# type: (Optional[str]) -> None
if node in path:
# We hit a cycle, so we'll break it here.
return
# Time to visit the children!
path.add(node)
for child in graph.iter_children(node):
visit(child)
path.remove(node)
last_known_parent_count = weights.get(node, 0)
weights[node] = max(last_known_parent_count, len(path))
# `None` is guaranteed to be the root node by resolvelib.
visit(None)
# Sanity checks
assert weights[None] == 0
assert len(weights) == len(graph)
return weights
def _req_set_item_sorter(
item, # type: Tuple[str, InstallRequirement]
weights, # type: Dict[Optional[str], int]
):
# type: (...) -> Tuple[int, str]
"""Key function used to sort install requirements for installation.
Based on the "weight" mapping calculated in ``get_installation_order()``.
The canonical package name is returned as the second member as a tie-
breaker to ensure the result is predictable, which is useful in tests.
"""
name = canonicalize_name(item[0])
return weights[name], name
```
#### File: Python_FileIO/Models/Os_Detail.py
```python
import platform
import os
class Os_Detail():
def __init__(self):
pass
def Os_Name(self):
return os.name
def Os_System(self):
return platform.system()
def Os_Machine(self):
return platform.machine()
def Os_Platform(self):
return platform.platform()
def Os_Uname(self):
return platform.uname()
def Os_Version(self):
return platform.version()
def Os_MacVer(self):
return platform.mac_ver()
def Os_All_Detial(self):
print(f"system: {platform.system()}",
f"machine: {platform.machine()}",
f"platform: {platform.platform()}",
f"uname: {platform.uname()}",
f"version: {platform.version()}",
f"mac_ver: {platform.mac_ver()}",sep='\n')
def Python_Ver(self):
return platform.python_version()
```
#### File: Python_FileIO/Models/Os_Dir.py
```python
import os
import shutil
import pathlib
class Os_Dir():
def __init__(self):
self.WorkPath=os.getcwd()+'/'
# ----------------------------------------------------------------------------------------------
#刪除該位址資料夾
def Delete_Dir(self,Dir_Path):
try:
shutil.rmtree(Dir_Path)
except OSError as Err:
print(Err)
else:
print(Dir_Path,"The directory is deleted successfully",sep=' ')
# ----------------------------------------------------------------------------------------------
#創造資料夾
def Create_Dir(self,Dir_Path):
if not os.path.isdir(Dir_Path):
os.mkdir(Dir_Path)
# ----------------------------------------------------------------------------------------------
#檢查資料夾
def Check_Dir(self,Dir_Path):
Dir_Path = Dir_Path
File = pathlib.Path(Dir_Path)
return File.is_dir()
# ----------------------------------------------------------------------------------------------
#取得工作目錄
def Get_WorkPath(self):
return os.getcwd()
```
#### File: Python_Mask_Bot_TaiwanOnly_JE/Core/Mask_Map_Core.py
```python
from Models.Map_Search import Map_Search
from Models.Mask_Map_Load import Mask_Map_Load
from Models.Time_difference import Time_difference
from Models.Mask_Search import Mask_Search
class Mask_Map_Core():
def __init__(self):
try:
self.Map_Search=Map_Search()
self.Mask_Map_Load=Mask_Map_Load()
self.Time_difference=Time_difference()
self.Mask_Search=Mask_Search()
except Exception as Errr:
raise Errr
```
#### File: je_old_repo/Python_Mask_Bot_TaiwanOnly_JE/LineBot_Main.py
```python
from flask import Flask, request, abort
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage, AudienceRecipient, Limit, AgeFilter, RichMenu, RichMenuSize,
RichMenuArea, RichMenuBounds, URIAction,ImagemapSendMessage,
QuickReplyButton, MessageAction, ImageSendMessage, TemplateSendMessage, ImageCarouselTemplate, ImageCarouselColumn,
PostbackAction, CarouselTemplate, CarouselColumn, ConfirmTemplate, ButtonsTemplate, StickerSendMessage,
AudioSendMessage,QuickReply,ImageMessage, VideoMessage, AudioMessage,StickerMessage,LocationMessage,
LocationSendMessage, VideoSendMessage,BaseSize,Video,ExternalLink,MessageImagemapAction,ImagemapArea,FlexSendMessage,
URIImagemapAction,BubbleContainer,ImageComponent,FileMessage,MemberLeftEvent,MemberJoinedEvent,
FollowEvent,BeaconEvent,PostbackEvent,LeaveEvent,JoinEvent,UnfollowEvent
)
# 讀confing用
import configparser
# Line訊息主系統
from Models.Line_MessageMain import Line_MessageMain
from Core.Mask_Map_Core import Mask_Map_Core
Mask=Mask_Map_Core()
# configparser套件
config = configparser.ConfigParser()
config.read('key.ini')
# LINE 聊天機器人的基本資料
line_bot_api = LineBotApi(config.get('line-bot', 'channel_access_token'))
handler = WebhookHandler(config.get('line-bot', 'channel_secret'))
Line_Main = Line_MessageMain(line_bot_api,MessageEvent, TextMessage, TextSendMessage, AudienceRecipient, Limit,
AgeFilter, RichMenu, RichMenuSize,
RichMenuArea, RichMenuBounds, URIAction,
QuickReplyButton, MessageAction, ImageSendMessage, TemplateSendMessage,
ImageCarouselTemplate, ImageCarouselColumn,
PostbackAction, CarouselTemplate,
CarouselColumn, ConfirmTemplate, ButtonsTemplate, StickerSendMessage, AudioSendMessage,
LocationSendMessage,
VideoSendMessage,QuickReply,ImagemapSendMessage,BaseSize,Video,ExternalLink,URIImagemapAction,
MessageImagemapAction,ImagemapArea,FlexSendMessage,BubbleContainer,ImageComponent)
app = Flask(__name__)
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK'
@handler.add(MessageEvent, message=TextMessage)
def handle_text_message(event):
'''
每個event物件包含以下幾個屬性:
type: event(事件)的形式,包括Message(使用者傳送訊息給聊天機器人),Follow(使用者將聊天機器人家為好友), Unfollow(使用者封鎖聊天機器人), Join(使用者將聊天機器人加入群組), Leave(聊天機器人離開群組), Postback(當使用者表現postback的動作), Beacon(當使用者進入或離開Line beacon的範圍) event。
replyToken: 在回應此訊息使用, 當程式要回應此訊息時須使用此token
source: 訊息的來源,可能是一個user(使用者)、group(群組)或room(聊天室)
若訊息的來源為user,則source中包含兩個屬性type(訊息來源的形式)及userId(傳送此訊息的使用者id)
若訊息的來源為group,則source中包含兩個屬性type(訊息來源的形式)及groupId(傳送此訊息的群組id),若使用者有同意Official Accounts Terms of Use會多一個屬性userId(傳送此訊息的使用者id)
若訊息的來源為room,則source中包含兩個屬性type(訊息來源的形式)及roomId(傳送此訊息的聊天室id),若使用者有同意Official Accounts Terms of Use會多一個屬性userId(傳送此訊息的使用者id)
timestamp: 此事件觸發的時間
message: 傳送的訊息
若為text(文字)訊息,則message包含三個屬性:id(訊息的識別id)、type(訊息的形式,text訊息type=text)、text(文字訊息中的文字)
若為image(圖片)訊息,則message包含兩個屬性:id(訊息的識別id,此id可使用來下載此圖片)、type(訊息的形式,image訊息type=image)
若為video(影片)訊息,則message包含兩個屬性:id(訊息的識別id,此id可使用來下載此影片)、type(訊息的形式,image訊息type=video)
若為audio(聲音)訊息,則message包含兩個屬性:id(訊息的識別id,此id可使用來下載此音檔)、type(訊息的形式,image訊息type=audio)
若為file(檔案)訊息,則message包含四個屬性:id(訊息的識別id,此id可使用來下載此檔案)、type(訊息的形式,file訊息type=file)、filename(檔案名稱)、filesize(檔案大小)
若為location(位置)訊息,則message包含六個屬性:id(訊息的識別id)、type(訊息的形式,location訊息type=location)、title(位置名稱)、address(地址)、latitude(緯度)、longitude(經度)
若為sticker(貼圖)訊息,則message包含四個屬性:id(訊息的識別id)、type(訊息的形式,sticker訊息type=sticker)、packageId、stickerId(每張貼圖都有其相對應的packageId及stickerId)
'''
if(event.message.type=='text'):
text=event.message.text
#Line_Main.Reply_Message(event.reply_token, text)
print('Text!:\t',text)
@handler.add(MessageEvent, message=ImageMessage)
def handle_image_message(event):
if(event.message.type=='image'):
print('image!')
@handler.add(MessageEvent, message=VideoMessage)
def handle_image_message(event):
if(event.message.type=='video'):
print('video!')
@handler.add(MessageEvent, message=LocationMessage)
def handle_location_message(event):
if (event.message.type == 'location'):
print('location!')
lat=event.message.latitude
lng = event.message.longitude
Total=''
for i in Mask.Mask_Search.Return_Nearby(lng,lat):
Total+=str(i)+'\n'
Line_Main.Reply_Message(event.reply_token, Total)
@handler.add(MessageEvent, message=StickerMessage)
def handle_sticker_message(event):
if (event.message.type == 'sticker'):
print('sticker!')
@handler.add(MessageEvent, message=AudioMessage)
def handle_audio_message(event):
if (event.message.type == 'audio'):
print('audio!')
@handler.add(MessageEvent, message=FileMessage)
def handle_file_message(event):
if (event.message.type == 'file'):
print('file!')
@handler.add(FollowEvent)
def handle_follow(event):
print('FollowEvent')
@handler.add(UnfollowEvent)
def handle_unfollow(event):
print('UnfollowEvent')
@handler.add(JoinEvent)
def handle_join(event):
print('JoinEvent')
@handler.add(LeaveEvent)
def handle_leave():
print('LeaveEvent')
@handler.add(PostbackEvent)
def handle_postback(event):
print('PostbackEvent')
@handler.add(BeaconEvent)
def handle_beacon(event):
print('BeaconEvent')
@handler.add(MemberJoinedEvent)
def handle_member_joined(event):
print('MemberJoinedEvent')
@handler.add(MemberLeftEvent)
def handle_member_left(event):
print('MemberLeftEvent')
if __name__ == "__main__":
app.run()
```
#### File: Python_Mask_Bot_TaiwanOnly_JE/Models/Map_Search.py
```python
import webbrowser
import folium # 匯入 folium 套件
from geopy.geocoders import Nominatim
from math import radians, cos, sin, asin, sqrt
class Map_Search():
def __init__(self):
self.File_Name=".html"
self.geolocator = Nominatim(user_agent="Test")
def Create_Fmap(self,list_location=[22.632082,120.299156],zoom_start=15):
# 建立地圖與設定位置
self.fmap = folium.Map(location=list_location, zoom_start=zoom_start)
# ----------------------------------------------------------------------------------------------
'''
設置Icon Nail
icon type :
驚嘆號 : 'info-sign'
顏色 : 'green'
雲的 : 'cloud'
'''
#
def Set_Icon(self,list_location,width,height,str='22.632082,120.299156',Icon_Text='六合夜市\n經緯度',icon_color='green'):
#設定點擊Icon文字
Icon_Text = Icon_Text+str
#設置點擊Icon出現的Frame
iframe = folium.IFrame(Icon_Text, width=width, height=height)
popup = folium.Popup(iframe, max_width=250)
#放置地點
Place = folium.Marker(location=list_location, popup=popup,
icon=folium.Icon(icon_color=icon_color))
#加進地圖
self.fmap.add_child(Place)
#設置更多Icon
def Make_More_Nail(self,location=[22.632082,120.299156],popup='六合夜市',icon_color='green'):
folium.Marker(
location=location,
popup=popup,
icon=folium.Icon(color=icon_color)
).add_to(self.fmap)
#按下點顯示經緯度
def Set_Fmap_Get_location(self):
self.fmap.add_child(folium.LatLngPopup())
#再按下點產生icon
def Realtime_Nail(self):
self.fmap.add_child(folium.ClickForMarker(popup='Waypoint'))
# ----------------------------------------------------------------------------------------------
#要儲存的Html 檔案名
def Set_File_Name(self,File_Name):
self.File_Name=File_Name
#儲存檔案
def Save_Map(self,Map_Name):
self.fmap.save(Map_Name+self.File_Name)
return Map_Name+".html"
#開啟檔案
def Open_Map(self,Map_Name):
webbrowser.open_new_tab(Map_Name+self.File_Name)
return Map_Name+".html"
#儲存並開啟
def Save_Open(self,Map_Name):
self.fmap.save(Map_Name+self.File_Name)
webbrowser.open_new_tab(Map_Name + self.File_Name)
# ----------------------------------------------------------------------------------------------
#設定要查詢的地點
def Set_Geocode(self,Geocode_Text="六合夜市"):
self.location = self.geolocator.geocode(Geocode_Text)
if(self.location==None):
self.Log.Debug("Wrong Address")
return "需要更詳細的地址"
else:
return self.location
#取得地址
def Get_Address(self):
print(self.location.address)
return self.location.address
#取得經緯度
def Get_Lat_Lon(self):
print((self.location.latitude, self.location.longitude))
return self.location.latitude, self.location.longitude
#取得原始資料
def Get_Raw(self):
print(self.location.raw)
return self.location.raw
#印出所有資料
def Print_All_Detail(self):
print(self.location.address)
print((self.location.latitude, self.location.longitude))
print(self.location.raw)
# ----------------------------------------------------------------------------------------------
#畫出線段
def Map_Draw_Line(self,Fill=True,*args):
'''
[22.73264868398435, 120.28450012207031],
[22.72837380478485, 120.28450012207031],
[22.723307108275556, 120.28604507446288]
'''
folium.PolyLine(locations=args,fill=Fill).add_to(self.fmap)
#畫出多邊形區域
def Map_Draw_Polygon(self,Fill=True,*args):
'''
locations=[
[22.73264868398435, 120.28450012207031],
[22.72837380478485, 120.28450012207031],
[22.723307108275556, 120.28604507446288]
]
'''
folium.Polygon(locations=args,fill=Fill).add_to(self.fmap)
#畫出矩形區域
def Map_Draw_Rectangle(self,Fill=True,*args):
'''
[
[22.727344647244575, 120.27111053466797],
[22.739219071089853, 120.29419898986816]
]
'''
folium.Rectangle(locations=args,fill=Fill).add_to(self.fmap)
#畫出圓形區域
#採用真實尺寸
def Map_Draw_Circle(self,Radius=10,Fill=True,*args):
'''
[22.73444963475145, 120.28458595275877],
'''
folium.Circle(locations=args,radius=Radius,fill=Fill).add_to(self.fmap)
#畫出圓形標記區域
#採用地圖尺寸
def Map_Draw_CircleMaker(self,Radius=10,Fill=True,*args):
'''
[22.73444963475145, 120.28458595275877]
'''
folium.CircleMarker(locations=args,radius=Radius,fill=Fill).add_to(self.fmap)
# ----------------------------------------------------------------------------------------------
#用圖片取代區域
def Map_ImageOverlay(self,ImageUrl= 'https://opendata.cwb.gov.tw/fileapi/opendata/MSC/O-B0028-003.jpg',*args):
'''
imageBounds = [
[18.600625745, 115.976888855],
[27.79937425, 126.02300114]
]
'''
ImageBounds = [
args
]
folium.raster_layers.ImageOverlay(
ImageUrl,
ImageBounds,
opacity=0.4
).add_to(self.fmap)
#用影片取代區域
def Map_VideoOverlay(self, VideoUrl = 'https://www.mapbox.com/bites/00188/patricia_nasa.webm',*args):
# videoBounds = [[32, -130], [13, -100]]
VideoBounds = args
folium.raster_layers.VideoOverlay(
VideoUrl,
VideoBounds
).add_to(self.fmap)
self.myMap.fit_bounds(VideoBounds)
# ----------------------------------------------------------------------------------------------
#計算2地相差距離
def Get_Haversine(self,lon1, lat1, lon2, lat2): # 經度1,緯度1,經度2,緯度2 (十進制度數)
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# 將十進制度數轉化為弧度
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# Haversine公式
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * asin(sqrt(a))
r = 6371 # 地球平均半徑,單位為公里
return c * r
```
#### File: Python_MiniGame_Fighter/Core/Python_Game.py
```python
import tkinter as tk
from tkinter import messagebox
from Models.Enviroment.Canvas_Thread import Canvas_Thread
from Models.Music.Music_Thread import Music_Thread
# 用以音樂撥放
Menu_Music = Music_Thread(r"..\Source\Music\Astral_-_Astral_Travel.mp3", id="Menu_Music")
# 按下Play按鈕
def game_start_button():
# 銷毀Play按鈕
Start_Button.destroy()
# 銷毀Explain按鈕
Explain_Button.destroy()
# 添加Canvas
Canvas = tk.Canvas()
# Canvas 占滿整個螢幕
Canvas.place(x=0, y=0, relwidth=1, relheight=1)
# 開啟執行緒 開始遊戲
thread_ready(Canvas_Thread(window=window, Canvas=Canvas, id="Game_Start"))
# 更換標題
window.title("Game-Playing")
# 清除舊音樂
Menu_Music.Dead()
# 解釋用按鈕
def explain_button():
tk.messagebox.showinfo(title="How to Play", message="上下左右鍵操控玩家閃躲來襲的敵機\n按下空白鍵可射擊子彈擊落敵軍\n當被敵軍碰撞即失敗\n請開聲音有2首主題曲")
# 初始設定
window = tk.Tk()
# 設定視窗大小 500x500
window.geometry("500x500")
# 設定視窗背景為黑
window.configure(background='black')
# 設定不可縮放
window.resizable(0, 0)
# 設定標題
window.title("JE-Chen-Game-Start-Scene")
# 讀取背景圖片
BackGround_Photo = tk.PhotoImage(file="../Source/Picture/back_ground.png")
# label 放置背景
BackGround = tk.Label(window, image=BackGround_Photo)
# 占滿整個螢幕
BackGround.place(x=0, y=0, relwidth=1, relheight=1)
# 放置Play按鈕 與上方widget 間隔為 5
Start_Button = tk.Button(text="Play", font=('Arial,12'), command=game_start_button)
Start_Button.pack(anchor='center', pady=5)
# 放置Explain按鈕 與上方widget 間隔為 5
Explain_Button = tk.Button(text="Explain", font=('Arial,12'), command=explain_button)
Explain_Button.pack(anchor='center', pady=5)
# 設置執行緒用函數
def thread_ready(Thread):
# 設置守護線程 確保主程式關閉執行緒也關閉
Thread.setDaemon(True)
# 開始執行緒
Thread.start()
# 非import一定會執行到
def main():
# 開始撥放音樂
thread_ready(Menu_Music)
# 秀出說明
tk.messagebox.showinfo(title="How to Play", message="上下左右鍵操控玩家閃躲來襲的敵機\n按下空白鍵可射擊子彈擊落敵軍\n當被敵軍碰撞即失敗\n請開聲音有2首主題曲")
# 直接執行才呼叫
if __name__ == "__main__":
main()
window.mainloop()
```
#### File: Models/Enviroment/Canvas_Thread.py
```python
import threading
import time
# import pyttsx3
from datetime import datetime
from tkinter import messagebox
from PIL import Image, ImageTk
from Models.Enemy.Enemy import Enemy
from Models.Enviroment.BackGround import BackGround
from Models.Music.Music_Thread import Music_Thread
from Models.Objects.Bullet import Bullet
from Models.Player.Player import Player
from Models.Rule.Collision import Collision
# 類別 Canvas_Thread 繼承自執行緒
class Canvas_Thread(threading.Thread):
# 類別初始化 創立類別時執行
def __init__(self, window, Canvas, X=0, Y=0, Right_X=500, Bottom_Y=500, id="id", img=None):
# 執行緒初始化
threading.Thread.__init__(self)
# 判斷是否運行旗標
self.Alive = True
# 圖片
self.img = img
# ID
self.id = id
# 視窗
self.window = window
# 畫布
self.Canvas = Canvas
# 擺放的左 X
self.X = X
# 擺放的上 Y
self.Y = Y
# 擺放的右X
self.Right_X = Right_X
# 擺放的下 Y
self.Bottom_Y = Bottom_Y
# 碰撞偵測用
self.Collision = Collision()
self.Bullet_Collision = Collision()
'''
#語音合成引擎
self.engine = pyttsx3.init()
#取得聲音的可用列表
voices = self.engine.getProperty('voices')
#設置聲音
self.engine.setProperty('voice', voices[1].id)
#設置說話速率
self.engine.setProperty('rate', 125)
'''
# 取得現在時間 用以改難度 (活越久越難)
self.Time = datetime.now()
# 設置難度用 最大敵人數量
self.Hard = 5
# 設置難度用 最大敵人速度
self.Speed = 10
# 玩家用執行緒
self.Player_Born = None
# 背景用執行緒
self.BG = None
# 用來幫敵人編號
self.Enemy_Count = 0
# 敵人列表
self.Enemy_List = []
# 子彈列表
self.Bullet_List = []
# 背景圖片
self.Image_Open = Image.open("../Source/Picture/back_ground.png")
# 更改圖片大小
self.Image_Open = self.Image_Open.resize((500, 500), Image.ANTIALIAS)
# 取得圖片
self.Background_Png = ImageTk.PhotoImage(self.Image_Open)
# 主角圖片
self.Image_Open = Image.open("../Source/Picture/air_01_blue.png")
# 更改圖片大小
self.Image_Open = self.Image_Open.resize((50, 50), Image.ANTIALIAS)
# 取得圖片
self.Player_Png = ImageTk.PhotoImage(self.Image_Open)
# 敵人圖片
self.Image_Open = Image.open("../Source/Picture/enemy_type_1.png")
# 更改圖片大小
self.Image_Open = self.Image_Open.resize((50, 50), Image.ANTIALIAS)
# 取得圖片
self.Enemy_Png = ImageTk.PhotoImage(self.Image_Open)
# 子彈圖片
self.Image_Open = Image.open("../Source/Picture/bullet_length.png")
# 更改圖片大小
self.Image_Open = self.Image_Open.resize((10, 40), Image.ANTIALIAS)
# 取得圖片
self.Player_Bullet_Png = ImageTk.PhotoImage(self.Image_Open)
# 用以撥放音樂執行緒
self.Music = Music_Thread(r"..\Source\Music\Battle-Legendary.mp3", id="Start_Music")
# 執行緒舊術用函數
self.Thread_Ready(self.Music)
# 用以計算分數
self.Score_Time = datetime.now()
# 當對類別使用str()返回
def __str__(self):
return self.X + "self.X\n" + \
self.Y + "self.Y\n" + \
self.Right_X + "self.Right_X\n" + \
self.Bottom_Y + "self.Bottom_Y\n" + \
self.id + "self.id\n" + \
self.img + "self.img"
# 執行緒初始完後會呼叫run
def run(self):
# 當狀態為遊戲開始
if (self.id == "Game_Start"):
# 背景用執行緒
self.BG = BackGround(self.Canvas, self.X, self.Y, self.Right_X, self.Bottom_Y, id="BackGround",
img=self.Background_Png)
self.Thread_Ready(self.BG)
# 主角用執行緒
self.Player_Born = Player(self.Canvas, self.X, self.Y, self.Right_X, self.Bottom_Y, id="Player",
img=self.Player_Png)
self.Thread_Ready(self.Player_Born)
'''
# 語音合成 並說出 Game Start
self.engine.say('Game Start')
self.engine.runAndWait()
'''
# 當此類別活著 (旗標Alive 為True) 和 此個執行緒為遊戲開始執行緒
while self.Alive and self.id == "Game_Start":
# 每次執行都停0.1秒
time.sleep(0.1)
# 從列表清除死去敵人
self.clean_enemy()
# 從列表清除死去子彈
self.clean_bullet()
# 當敵人數量小於難度 產生難度隻
if (len(self.Enemy_List) < self.Hard):
Enemy_Born = Enemy(self.Canvas, self.X, self.Y, self.Right_X, self.Bottom_Y,
id="Enemy" + str(self.Enemy_Count), img=self.Enemy_Png, Speed=self.Speed)
# 編號+1
self.Enemy_Count += 1
# 產生敵人
self.Thread_Ready(Enemy_Born)
# 加入列表
self.Enemy_List.append(Enemy_Born)
# 遍尋敵人列表
for enemy in range(len(self.Enemy_List)):
# 如果敵人還活著
if (self.Enemy_List[enemy].Alive == True):
# 判斷是否跟玩家碰撞
if (self.Collision.Is_Collision(Object_X=self.Enemy_List[enemy].X,
Object_Y=self.Enemy_List[enemy].Bottom_Y,
Object_Width=self.Enemy_List[enemy].Width,
Object_Height=self.Enemy_List[enemy].Height,
Collision_X=self.Player_Born.X,
Collision_Y=self.Player_Born.Bottom_Y,
Collision_Width=self.Player_Born.Width,
Collision_Heihgt=self.Player_Born.Height)):
# 設回False以備下一次碰撞
self.Collision.Now_Collision = False
# 跟敵人碰撞直接Game Over
self.Game_Over()
# 遍尋子彈列表
for bullet in range(len(self.Bullet_List)):
# 創建新碰撞
# 如果子彈還活著 判斷是否撞上敵人
if (self.Bullet_Collision.Is_Collision(Object_X=self.Bullet_List[bullet].X,
Object_Y=self.Bullet_List[bullet].Bottom_Y,
Object_Width=self.Bullet_List[bullet].Width,
Object_Height=self.Bullet_List[bullet].Height,
Collision_X=self.Enemy_List[enemy].X,
Collision_Y=self.Enemy_List[enemy].Bottom_Y,
Collision_Width=self.Enemy_List[enemy].Width,
Collision_Heihgt=self.Enemy_List[enemy].Height)):
# 如果敵人撞到子彈 敵人死亡
self.Enemy_List[enemy].Dead()
# 撞到敵人的子彈也死亡
self.Bullet_List[bullet].Dead()
# 設回False以備下一次碰撞
self.Bullet_Collision.Now_Collision = False
# 如果玩家準備發射狀態為True
if (self.Player_Born.Shoot == True):
# 產生子彈
Bullet_Born = Bullet(self.Canvas, self.Player_Born.X,
(self.Player_Born.Bottom_Y - (
self.Player_Born.Height) * 2) + self.Player_Born.Height, self.Right_X,
self.Bottom_Y - self.Player_Born.Height, id="Player_Bullet",
img=self.Player_Bullet_Png)
# 開始子彈執行緒
self.Thread_Ready(Bullet_Born)
# 加進子彈列表
self.Bullet_List.append(Bullet_Born)
# 玩家準備發射狀態為False
self.Player_Born.Shoot = False
# 增加難度用時間
Last_Time = datetime.now()
# 每10秒增加一次難度
if ((Last_Time - self.Time).seconds > 10):
if (self.Hard < 20):
self.Hard += 1
else:
print("Hard Over")
if (self.Speed < 40):
self.Speed += 0.5
else:
print("Speed Over")
self.Time = datetime.now()
# 執行緒準備用
def Thread_Ready(self, Thread):
Thread.setDaemon(True)
Thread.start()
# 清掉死掉的子彈
def clean_bullet(self):
try:
for bullet in range(len(self.Bullet_List)):
if (self.Bullet_List[bullet].Alive == False):
self.Bullet_List.remove(self.Bullet_List[bullet])
except:
self.clean_bullet()
# 清掉死掉的敵人
def clean_enemy(self):
try:
for enemy in range(len(self.Enemy_List)):
if (self.Enemy_List[enemy].Alive == False):
self.Enemy_List.remove(self.Enemy_List[enemy])
except:
self.clean_enemy()
# 傳回是否活著
def Return_Alive(self):
return self.Alive
# 死亡
def Dead(self):
self.Canvas.delete("all")
print(self.id + " Dead")
self.Alive = False
del self
# GameOver事件
def Game_Over(self):
self.Alive = False
# 清空所有列表 背景 主角
for Dead_All in range(len(self.Enemy_List)):
self.Enemy_List[Dead_All].Dead()
for Dead_All in range(len(self.Bullet_List)):
self.Bullet_List[Dead_All].Dead()
self.BG.Dead()
self.Player_Born.Dead()
# 畫布清空
self.Canvas.delete("all")
# 關閉音樂
self.Music.Dead()
'''
#間隔0.3秒後唸出 GameOver 並顯示分數
time.sleep(0.3)
self.engine.say('Game Over')
self.engine.runAndWait()
'''
# 計算分數用
Last_Time = datetime.now()
print("Score: ", (Last_Time - self.Score_Time).seconds)
# 秀出分數
if (messagebox.askyesno("Your Score", "Score: " + str((Last_Time - self.Score_Time).seconds))):
self.window.destroy()
else:
self.window.destroy()
```
#### File: Models/Objects/Bullet.py
```python
import threading
import time
# 類別 Bullet 繼承自執行緒
class Bullet(threading.Thread):
# 類別初始化 創立類別時執行
def __init__(self, Canvas, X=0, Y=0, Right_X=500, Bottom_Y=500, id="id", Bullet_id="Player1", img=None, Height=50,
Width=50):
# 執行緒初始化
threading.Thread.__init__(self)
# 判斷是否運行旗標
self.Alive = True
# 圖片
self.img = img
# 此執行緒id
self.id = id
# 子彈id
self.Bullet_id = Bullet_id
# 畫布
self.Canvas = Canvas
# 左x
self.X = X
# 上 Y
self.Y = Y
# 高度
self.Height = Height
# 寬度
self.Width = Width
# 右X
self.Right_X = Right_X
# 下Y
self.Bottom_Y = Bottom_Y
# 執行緒初始完後會呼叫run
def run(self):
# 當活著
while self.Alive:
# 如果由敵人射出 往下移動
if (self.id.startswith("Enemy")):
self.Y += 5
if (self.X >= 500):
self.Dead()
if (self.X <= 0):
self.Dead()
if (self.Y >= 500):
self.Dead()
if (self.Y <= 0):
self.Dead()
# 如果由玩家射出 往上移動
elif (self.id.startswith("Player")):
self.Y -= 5
if (self.X >= 500):
self.Dead()
if (self.X <= 0):
self.Dead()
if (self.Y >= 500):
self.Dead()
if (self.Y <= 0):
self.Dead()
try:
# 每隔0.2秒重繪一次
time.sleep(0.05)
self.Canvas.delete(self.id)
item = self.Canvas.create_image(self.X, self.Y, image=self.img)
self.Canvas.itemconfig(item, tag=self.id)
except AttributeError:
print(self.id + " No Canvas")
print(self.id + " Break")
break
except:
print(self.id + " 高危錯誤")
print(self.id + " Break")
break
# 死了就抹除
if (not self.Alive):
self.Canvas.delete(self.id)
def Return_Alive(self):
return self.Alive
def Dead(self):
try:
self.Canvas.delete(self.id)
print(self.id + " Dead")
self.Alive = False
del self
except:
print(self.id + " Canvas Error")
```
#### File: Models/Window/Window_Multi_Toplevel.py
```python
import threading
import tkinter as tk
# 用來產生新視窗用
class Window_Multi_Toplevel(threading.Thread):
def __init__(self, window, Title="JE-Chen-Game", Str_Height_Width='500x500', id="id", event=None):
threading.Thread.__init__(self)
self.window = tk.Toplevel(window)
self.Canvas = None
self.Alive = True
self.id = id
self.Str_Height_Width = Str_Height_Width
self.Title = Title
# 設定標題
self.window.title(self.Title)
# 設定大小
self.window.geometry(self.Str_Height_Width)
# 設定不可縮放
self.window.resizable(False, False)
# 設定關閉要執行的事件
if (event == None):
self.window.protocol("WM_DELETE_WINDOW", self.Close)
else:
self.event = event
self.window.protocol("WM_DELETE_WINDOW", self.event)
self.Close()
# 更新視窗
def run(self):
while self.Alive:
self.window.update()
def Dead(self):
self.Alive = False
self.window.destroy()
del self
# 關閉執行的事件
def Close(self):
self.window.destroy()
print(self.id + ' Closed')
del self
# 添加label
def Add_Widget_Label(self, text="Hello World", font=('Arial,12'), fill='x', side='left', anchor='center'):
self.Label = tk.Label(self.window, text=text, font=font)
self.Label.pack(fill=fill, side=side, anchor=anchor)
# 添加button
def Add_Widget_Button(self, command, text="Hello World", font=('Arial,12'), fill='x', side='left', anchor='center'):
self.Button = tk.Button(self.window, command=command, text=text, font=font)
self.Button.pack(fill=fill, side=side, anchor=anchor)
# 添加畫布
def Add_Widget_Canvas(self, bg='white', height=500, width=500):
self.Canvas = tk.Canvas(self.window, bg=bg, height=height, width=width)
self.Canvas.pack()
# 返回畫布
def Return_Widget_Canvas(self):
if (self.Canvas != None):
return self.Canvas
else:
print("No Canvas")
#
def Return_Widget_Window(self):
if (self.Canvas != None):
return self.window
else:
print("No window")
```
#### File: pygame/examples/blend_fill.py
```python
import os
import pygame as pg
from pygame import K_1, K_2, K_3, K_4, K_5, K_6, K_7, K_8, K_9
def usage():
print("Press R, G, B to increase the color channel values,")
print("1-9 to set the step range for the increment,")
print("A - ADD, S- SUB, M- MULT, - MIN, + MAX")
print(" to change the blend modes")
main_dir = os.path.split(os.path.abspath(__file__))[0]
data_dir = os.path.join(main_dir, "data")
def main():
color = [0, 0, 0]
changed = False
blendtype = 0
step = 5
pg.init()
screen = pg.display.set_mode((640, 480), 0, 32)
screen.fill((100, 100, 100))
image = pg.image.load(os.path.join(data_dir, "liquid.bmp")).convert()
blendimage = pg.image.load(os.path.join(data_dir, "liquid.bmp")).convert()
screen.blit(image, (10, 10))
screen.blit(blendimage, (200, 10))
pg.display.flip()
pg.key.set_repeat(500, 30)
usage()
going = True
while going:
for event in pg.event.get():
if event.type == pg.QUIT:
going = False
if event.type == pg.KEYDOWN:
usage()
if event.key == pg.K_ESCAPE:
going = False
if event.key == pg.K_r:
color[0] += step
if color[0] > 255:
color[0] = 0
changed = True
elif event.key == pg.K_g:
color[1] += step
if color[1] > 255:
color[1] = 0
changed = True
elif event.key == pg.K_b:
color[2] += step
if color[2] > 255:
color[2] = 0
changed = True
elif event.key == pg.K_a:
blendtype = pg.BLEND_ADD
changed = True
elif event.key == pg.K_s:
blendtype = pg.BLEND_SUB
changed = True
elif event.key == pg.K_m:
blendtype = pg.BLEND_MULT
changed = True
elif event.key == pg.K_PLUS:
blendtype = pg.BLEND_MAX
changed = True
elif event.key == pg.K_MINUS:
blendtype = pg.BLEND_MIN
changed = True
elif event.key in (K_1, K_2, K_3, K_4, K_5, K_6, K_7, K_8, K_9):
step = int(event.unicode)
if changed:
screen.fill((100, 100, 100))
screen.blit(image, (10, 10))
blendimage.blit(image, (0, 0))
# blendimage.fill (color, (0, 0, 20, 20), blendtype)
blendimage.fill(color, None, blendtype)
screen.blit(blendimage, (200, 10))
print(
"Color: %s, Pixel (0,0): %s"
% (tuple(color), [blendimage.get_at((0, 0))])
)
changed = False
pg.display.flip()
pg.quit()
if __name__ == "__main__":
main()
```
#### File: pygame/examples/glcube.py
```python
import math
import ctypes
import pygame as pg
try:
import OpenGL.GL as GL
import OpenGL.GLU as GLU
except ImportError:
print("pyopengl missing. The GLCUBE example requires: pyopengl numpy")
raise SystemExit
try:
from numpy import array, dot, eye, zeros, float32, uint32
except ImportError:
print("numpy missing. The GLCUBE example requires: pyopengl numpy")
raise SystemExit
# do we want to use the 'modern' OpenGL API or the old one?
# This example shows you how to do both.
USE_MODERN_GL = True
# Some simple data for a colored cube here we have the 3D point position
# and color for each corner. A list of indices describes each face, and a
# list of indices describes each edge.
CUBE_POINTS = (
(0.5, -0.5, -0.5),
(0.5, 0.5, -0.5),
(-0.5, 0.5, -0.5),
(-0.5, -0.5, -0.5),
(0.5, -0.5, 0.5),
(0.5, 0.5, 0.5),
(-0.5, -0.5, 0.5),
(-0.5, 0.5, 0.5),
)
# colors are 0-1 floating values
CUBE_COLORS = (
(1, 0, 0),
(1, 1, 0),
(0, 1, 0),
(0, 0, 0),
(1, 0, 1),
(1, 1, 1),
(0, 0, 1),
(0, 1, 1),
)
CUBE_QUAD_VERTS = (
(0, 1, 2, 3),
(3, 2, 7, 6),
(6, 7, 5, 4),
(4, 5, 1, 0),
(1, 5, 7, 2),
(4, 0, 3, 6),
)
CUBE_EDGES = (
(0, 1),
(0, 3),
(0, 4),
(2, 1),
(2, 3),
(2, 7),
(6, 3),
(6, 4),
(6, 7),
(5, 1),
(5, 4),
(5, 7),
)
def translate(matrix, x=0.0, y=0.0, z=0.0):
"""
Translate (move) a matrix in the x, y and z axes.
:param matrix: Matrix to translate.
:param x: direction and magnitude to translate in x axis. Defaults to 0.
:param y: direction and magnitude to translate in y axis. Defaults to 0.
:param z: direction and magnitude to translate in z axis. Defaults to 0.
:return: The translated matrix.
"""
translation_matrix = array(
[
[1.0, 0.0, 0.0, x],
[0.0, 1.0, 0.0, y],
[0.0, 0.0, 1.0, z],
[0.0, 0.0, 0.0, 1.0],
],
dtype=matrix.dtype,
).T
matrix[...] = dot(matrix, translation_matrix)
return matrix
def frustum(left, right, bottom, top, znear, zfar):
"""
Build a perspective matrix from the clipping planes, or camera 'frustrum'
volume.
:param left: left position of the near clipping plane.
:param right: right position of the near clipping plane.
:param bottom: bottom position of the near clipping plane.
:param top: top position of the near clipping plane.
:param znear: z depth of the near clipping plane.
:param zfar: z depth of the far clipping plane.
:return: A perspective matrix.
"""
perspective_matrix = zeros((4, 4), dtype=float32)
perspective_matrix[0, 0] = +2.0 * znear / (right - left)
perspective_matrix[2, 0] = (right + left) / (right - left)
perspective_matrix[1, 1] = +2.0 * znear / (top - bottom)
perspective_matrix[3, 1] = (top + bottom) / (top - bottom)
perspective_matrix[2, 2] = -(zfar + znear) / (zfar - znear)
perspective_matrix[3, 2] = -2.0 * znear * zfar / (zfar - znear)
perspective_matrix[2, 3] = -1.0
return perspective_matrix
def perspective(fovy, aspect, znear, zfar):
"""
Build a perspective matrix from field of view, aspect ratio and depth
planes.
:param fovy: the field of view angle in the y axis.
:param aspect: aspect ratio of our view port.
:param znear: z depth of the near clipping plane.
:param zfar: z depth of the far clipping plane.
:return: A perspective matrix.
"""
h = math.tan(fovy / 360.0 * math.pi) * znear
w = h * aspect
return frustum(-w, w, -h, h, znear, zfar)
def rotate(matrix, angle, x, y, z):
"""
Rotate a matrix around an axis.
:param matrix: The matrix to rotate.
:param angle: The angle to rotate by.
:param x: x of axis to rotate around.
:param y: y of axis to rotate around.
:param z: z of axis to rotate around.
:return: The rotated matrix
"""
angle = math.pi * angle / 180
c, s = math.cos(angle), math.sin(angle)
n = math.sqrt(x * x + y * y + z * z)
x, y, z = x / n, y / n, z / n
cx, cy, cz = (1 - c) * x, (1 - c) * y, (1 - c) * z
rotation_matrix = array(
[
[cx * x + c, cy * x - z * s, cz * x + y * s, 0],
[cx * y + z * s, cy * y + c, cz * y - x * s, 0],
[cx * z - y * s, cy * z + x * s, cz * z + c, 0],
[0, 0, 0, 1],
],
dtype=matrix.dtype,
).T
matrix[...] = dot(matrix, rotation_matrix)
return matrix
class Rotation:
"""
Data class that stores rotation angles in three axes.
"""
def __init__(self):
self.theta = 20
self.phi = 40
self.psi = 25
def drawcube_old():
"""
Draw the cube using the old open GL methods pre 3.2 core context.
"""
allpoints = list(zip(CUBE_POINTS, CUBE_COLORS))
GL.glBegin(GL.GL_QUADS)
for face in CUBE_QUAD_VERTS:
for vert in face:
pos, color = allpoints[vert]
GL.glColor3fv(color)
GL.glVertex3fv(pos)
GL.glEnd()
GL.glColor3f(1.0, 1.0, 1.0)
GL.glBegin(GL.GL_LINES)
for line in CUBE_EDGES:
for vert in line:
pos, color = allpoints[vert]
GL.glVertex3fv(pos)
GL.glEnd()
def init_gl_stuff_old():
"""
Initialise open GL, prior to core context 3.2
"""
GL.glEnable(GL.GL_DEPTH_TEST) # use our zbuffer
# setup the camera
GL.glMatrixMode(GL.GL_PROJECTION)
GL.glLoadIdentity()
GLU.gluPerspective(45.0, 640 / 480.0, 0.1, 100.0) # setup lens
GL.glTranslatef(0.0, 0.0, -3.0) # move back
GL.glRotatef(25, 1, 0, 0) # orbit higher
def init_gl_modern(display_size):
"""
Initialise open GL in the 'modern' open GL style for open GL versions
greater than 3.1.
:param display_size: Size of the window/viewport.
"""
# Create shaders
# --------------------------------------
vertex_code = """
#version 150
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
uniform vec4 colour_mul;
uniform vec4 colour_add;
in vec4 vertex_colour; // vertex colour in
in vec3 vertex_position;
out vec4 vertex_color_out; // vertex colour out
void main()
{
vertex_color_out = (colour_mul * vertex_colour) + colour_add;
gl_Position = projection * view * model * vec4(vertex_position, 1.0);
}
"""
fragment_code = """
#version 150
in vec4 vertex_color_out; // vertex colour from vertex shader
out vec4 fragColor;
void main()
{
fragColor = vertex_color_out;
}
"""
program = GL.glCreateProgram()
vertex = GL.glCreateShader(GL.GL_VERTEX_SHADER)
fragment = GL.glCreateShader(GL.GL_FRAGMENT_SHADER)
GL.glShaderSource(vertex, vertex_code)
GL.glCompileShader(vertex)
# this logs issues the shader compiler finds.
log = GL.glGetShaderInfoLog(vertex)
if isinstance(log, bytes):
log = log.decode()
for line in log.split("\n"):
print(line)
GL.glAttachShader(program, vertex)
GL.glShaderSource(fragment, fragment_code)
GL.glCompileShader(fragment)
# this logs issues the shader compiler finds.
log = GL.glGetShaderInfoLog(fragment)
if isinstance(log, bytes):
log = log.decode()
for line in log.split("\n"):
print(line)
GL.glAttachShader(program, fragment)
GL.glValidateProgram(program)
GL.glLinkProgram(program)
GL.glDetachShader(program, vertex)
GL.glDetachShader(program, fragment)
GL.glUseProgram(program)
# Create vertex buffers and shader constants
# ------------------------------------------
# Cube Data
vertices = zeros(
8, [("vertex_position", float32, 3), ("vertex_colour", float32, 4)]
)
vertices["vertex_position"] = [
[1, 1, 1],
[-1, 1, 1],
[-1, -1, 1],
[1, -1, 1],
[1, -1, -1],
[1, 1, -1],
[-1, 1, -1],
[-1, -1, -1],
]
vertices["vertex_colour"] = [
[0, 1, 1, 1],
[0, 0, 1, 1],
[0, 0, 0, 1],
[0, 1, 0, 1],
[1, 1, 0, 1],
[1, 1, 1, 1],
[1, 0, 1, 1],
[1, 0, 0, 1],
]
filled_cube_indices = array(
[
0,
1,
2,
0,
2,
3,
0,
3,
4,
0,
4,
5,
0,
5,
6,
0,
6,
1,
1,
6,
7,
1,
7,
2,
7,
4,
3,
7,
3,
2,
4,
7,
6,
4,
6,
5,
],
dtype=uint32,
)
outline_cube_indices = array(
[0, 1, 1, 2, 2, 3, 3, 0, 4, 7, 7, 6, 6, 5, 5, 4, 0, 5, 1, 6, 2, 7, 3, 4],
dtype=uint32,
)
shader_data = {"buffer": {}, "constants": {}}
GL.glBindVertexArray(GL.glGenVertexArrays(1)) # Have to do this first
shader_data["buffer"]["vertices"] = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, shader_data["buffer"]["vertices"])
GL.glBufferData(GL.GL_ARRAY_BUFFER, vertices.nbytes, vertices, GL.GL_DYNAMIC_DRAW)
stride = vertices.strides[0]
offset = ctypes.c_void_p(0)
loc = GL.glGetAttribLocation(program, "vertex_position")
GL.glEnableVertexAttribArray(loc)
GL.glVertexAttribPointer(loc, 3, GL.GL_FLOAT, False, stride, offset)
offset = ctypes.c_void_p(vertices.dtype["vertex_position"].itemsize)
loc = GL.glGetAttribLocation(program, "vertex_colour")
GL.glEnableVertexAttribArray(loc)
GL.glVertexAttribPointer(loc, 4, GL.GL_FLOAT, False, stride, offset)
shader_data["buffer"]["filled"] = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, shader_data["buffer"]["filled"])
GL.glBufferData(
GL.GL_ELEMENT_ARRAY_BUFFER,
filled_cube_indices.nbytes,
filled_cube_indices,
GL.GL_STATIC_DRAW,
)
shader_data["buffer"]["outline"] = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, shader_data["buffer"]["outline"])
GL.glBufferData(
GL.GL_ELEMENT_ARRAY_BUFFER,
outline_cube_indices.nbytes,
outline_cube_indices,
GL.GL_STATIC_DRAW,
)
shader_data["constants"]["model"] = GL.glGetUniformLocation(program, "model")
GL.glUniformMatrix4fv(shader_data["constants"]["model"], 1, False, eye(4))
shader_data["constants"]["view"] = GL.glGetUniformLocation(program, "view")
view = translate(eye(4), z=-6)
GL.glUniformMatrix4fv(shader_data["constants"]["view"], 1, False, view)
shader_data["constants"]["projection"] = GL.glGetUniformLocation(
program, "projection"
)
GL.glUniformMatrix4fv(shader_data["constants"]["projection"], 1, False, eye(4))
# This colour is multiplied with the base vertex colour in producing
# the final output
shader_data["constants"]["colour_mul"] = GL.glGetUniformLocation(
program, "colour_mul"
)
GL.glUniform4f(shader_data["constants"]["colour_mul"], 1, 1, 1, 1)
# This colour is added on to the base vertex colour in producing
# the final output
shader_data["constants"]["colour_add"] = GL.glGetUniformLocation(
program, "colour_add"
)
GL.glUniform4f(shader_data["constants"]["colour_add"], 0, 0, 0, 0)
# Set GL drawing data
# -------------------
GL.glClearColor(0, 0, 0, 0)
GL.glPolygonOffset(1, 1)
GL.glEnable(GL.GL_LINE_SMOOTH)
GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)
GL.glDepthFunc(GL.GL_LESS)
GL.glHint(GL.GL_LINE_SMOOTH_HINT, GL.GL_NICEST)
GL.glLineWidth(1.0)
projection = perspective(45.0, display_size[0] / float(display_size[1]), 2.0, 100.0)
GL.glUniformMatrix4fv(shader_data["constants"]["projection"], 1, False, projection)
return shader_data, filled_cube_indices, outline_cube_indices
def draw_cube_modern(shader_data, filled_cube_indices, outline_cube_indices, rotation):
"""
Draw a cube in the 'modern' Open GL style, for post 3.1 versions of
open GL.
:param shader_data: compile vertex & pixel shader data for drawing a cube.
:param filled_cube_indices: the indices to draw the 'filled' cube.
:param outline_cube_indices: the indices to draw the 'outline' cube.
:param rotation: the current rotations to apply.
"""
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
# Filled cube
GL.glDisable(GL.GL_BLEND)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glEnable(GL.GL_POLYGON_OFFSET_FILL)
GL.glUniform4f(shader_data["constants"]["colour_mul"], 1, 1, 1, 1)
GL.glUniform4f(shader_data["constants"]["colour_add"], 0, 0, 0, 0.0)
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, shader_data["buffer"]["filled"])
GL.glDrawElements(
GL.GL_TRIANGLES, len(filled_cube_indices), GL.GL_UNSIGNED_INT, None
)
# Outlined cube
GL.glDisable(GL.GL_POLYGON_OFFSET_FILL)
GL.glEnable(GL.GL_BLEND)
GL.glUniform4f(shader_data["constants"]["colour_mul"], 0, 0, 0, 0.0)
GL.glUniform4f(shader_data["constants"]["colour_add"], 1, 1, 1, 1.0)
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, shader_data["buffer"]["outline"])
GL.glDrawElements(GL.GL_LINES, len(outline_cube_indices), GL.GL_UNSIGNED_INT, None)
# Rotate cube
# rotation.theta += 1.0 # degrees
rotation.phi += 1.0 # degrees
# rotation.psi += 1.0 # degrees
model = eye(4, dtype=float32)
# rotate(model, rotation.theta, 0, 0, 1)
rotate(model, rotation.phi, 0, 1, 0)
rotate(model, rotation.psi, 1, 0, 0)
GL.glUniformMatrix4fv(shader_data["constants"]["model"], 1, False, model)
def main():
"""run the demo """
# initialize pygame and setup an opengl display
pg.init()
gl_version = (3, 0) # GL Version number (Major, Minor)
if USE_MODERN_GL:
gl_version = (3, 2) # GL Version number (Major, Minor)
# By setting these attributes we can choose which Open GL Profile
# to use, profiles greater than 3.2 use a different rendering path
pg.display.gl_set_attribute(pg.GL_CONTEXT_MAJOR_VERSION, gl_version[0])
pg.display.gl_set_attribute(pg.GL_CONTEXT_MINOR_VERSION, gl_version[1])
pg.display.gl_set_attribute(
pg.GL_CONTEXT_PROFILE_MASK, pg.GL_CONTEXT_PROFILE_CORE
)
fullscreen = False # start in windowed mode
display_size = (640, 480)
pg.display.set_mode(display_size, pg.OPENGL | pg.DOUBLEBUF | pg.RESIZABLE)
if USE_MODERN_GL:
gpu, f_indices, o_indices = init_gl_modern(display_size)
rotation = Rotation()
else:
init_gl_stuff_old()
going = True
while going:
# check for quit'n events
events = pg.event.get()
for event in events:
if event.type == pg.QUIT or (
event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE
):
going = False
elif event.type == pg.KEYDOWN and event.key == pg.K_f:
if not fullscreen:
print("Changing to FULLSCREEN")
pg.display.set_mode(
(640, 480), pg.OPENGL | pg.DOUBLEBUF | pg.FULLSCREEN
)
else:
print("Changing to windowed mode")
pg.display.set_mode((640, 480), pg.OPENGL | pg.DOUBLEBUF)
fullscreen = not fullscreen
if gl_version[0] >= 4 or (gl_version[0] == 3 and gl_version[1] >= 2):
gpu, f_indices, o_indices = init_gl_modern(display_size)
rotation = Rotation()
else:
init_gl_stuff_old()
if USE_MODERN_GL:
draw_cube_modern(gpu, f_indices, o_indices, rotation)
else:
# clear screen and move camera
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
# orbit camera around by 1 degree
GL.glRotatef(1, 0, 1, 0)
drawcube_old()
pg.display.flip()
pg.time.wait(10)
if __name__ == "__main__":
main()
```
#### File: pygame/examples/sprite_texture.py
```python
import os
import pygame as pg
if pg.get_sdl_version()[0] < 2:
raise SystemExit("This example requires pygame 2 and SDL2.")
from pygame._sdl2 import Window, Texture, Image, Renderer
data_dir = os.path.join(os.path.split(os.path.abspath(__file__))[0], "data")
def load_img(file):
return pg.image.load(os.path.join(data_dir, file))
pg.display.init()
pg.key.set_repeat(10, 10)
win = Window("asdf", resizable=True)
renderer = Renderer(win)
tex = Texture.from_surface(renderer, load_img("alien1.gif"))
class Something(pg.sprite.Sprite):
def __init__(self, img):
pg.sprite.Sprite.__init__(self)
self.rect = img.get_rect()
self.image = img
self.rect.w *= 5
self.rect.h *= 5
img.origin = self.rect.w / 2, self.rect.h / 2
sprite = Something(Image(tex, (0, 0, tex.width / 2, tex.height / 2)))
sprite.rect.x = 250
sprite.rect.y = 50
# sprite2 = Something(Image(sprite.image))
sprite2 = Something(Image(tex))
sprite2.rect.x = 250
sprite2.rect.y = 250
sprite2.rect.w /= 2
sprite2.rect.h /= 2
group = pg.sprite.Group()
group.add(sprite2)
group.add(sprite)
import math
t = 0
running = True
clock = pg.time.Clock()
renderer.draw_color = (255, 0, 0, 255)
while running:
for event in pg.event.get():
if event.type == pg.QUIT:
running = False
elif event.type == pg.KEYDOWN:
if event.key == pg.K_ESCAPE:
running = False
elif event.key == pg.K_LEFT:
sprite.rect.x -= 5
elif event.key == pg.K_RIGHT:
sprite.rect.x += 5
elif event.key == pg.K_DOWN:
sprite.rect.y += 5
elif event.key == pg.K_UP:
sprite.rect.y -= 5
renderer.clear()
t += 1
img = sprite.image
img.angle += 1
img.flipX = t % 50 < 25
img.flipY = t % 100 < 50
img.color[0] = int(255.0 * (0.5 + math.sin(0.5 * t + 10.0) / 2.0))
img.alpha = int(255.0 * (0.5 + math.sin(0.1 * t) / 2.0))
# img.draw(dstrect=(x, y, 5 * img.srcrect['w'], 5 * img.srcrect['h']))
group.draw(renderer)
renderer.present()
clock.tick(60)
win.title = str("FPS: {}".format(clock.get_fps()))
```
#### File: site-packages/pygame/ftfont.py
```python
__all__ = ['Font', 'init', 'quit', 'get_default_font', 'get_init', 'SysFont',
"match_font", "get_fonts"]
from pygame._freetype import init, Font as _Font, get_default_resolution
from pygame._freetype import quit, get_default_font, get_init as _get_init
from pygame._freetype import __PYGAMEinit__
from pygame.sysfont import match_font, get_fonts, SysFont as _SysFont
from pygame import encode_file_path
from pygame.compat import bytes_, unicode_, as_unicode, as_bytes
class Font(_Font):
"""Font(filename, size) -> Font
Font(object, size) -> Font
create a new Font object from a file (freetype alternative)
This Font type differs from font.Font in that it can render glyphs
for Unicode code points in the supplementary planes (> 0xFFFF).
"""
__encode_file_path = staticmethod(encode_file_path)
__get_default_resolution = staticmethod(get_default_resolution)
__default_font = encode_file_path(get_default_font())
__unull = as_unicode(r"\x00")
__bnull = as_bytes("\x00")
def __init__(self, file, size=-1):
if size <= 1:
size = 1
if isinstance(file, unicode_):
try:
bfile = self.__encode_file_path(file, ValueError)
except ValueError:
bfile = ''
else:
bfile = file
if isinstance(bfile, bytes_) and bfile == self.__default_font:
file = None
if file is None:
resolution = int(self.__get_default_resolution() * 0.6875)
if resolution == 0:
resolution = 1
else:
resolution = 0
super(Font, self).__init__(file, size=size, resolution=resolution)
self.strength = 1.0 / 12.0
self.kerning = False
self.origin = True
self.pad = True
self.ucs4 = True
self.underline_adjustment = 1.0
def render(self, text, antialias, color, background=None):
"""render(text, antialias, color, background=None) -> Surface
draw text on a new Surface"""
if text is None:
text = ""
if (isinstance(text, unicode_) and self.__unull in text):
raise ValueError("A null character was found in the text")
if (isinstance(text, bytes_) and self.__bnull in text):
raise ValueError("A null character was found in the text")
save_antialiased = self.antialiased
self.antialiased = bool(antialias)
try:
s, r = super(Font, self).render(text, color, background)
return s
finally:
self.antialiased = save_antialiased
def set_bold(self, value):
"""set_bold(bool) -> None
enable fake rendering of bold text"""
self.wide = bool(value)
def get_bold(self):
"""get_bold() -> bool
check if text will be rendered bold"""
return self.wide
bold = property(get_bold, set_bold)
def set_italic(self, value):
"""set_italic(bool) -> None
enable fake rendering of italic text"""
self.oblique = bool(value)
def get_italic(self):
"""get_italic() -> bool
check if the text will be rendered italic"""
return self.oblique
italic = property(get_italic, set_italic)
def set_underline(self, value):
"""set_underline(bool) -> None
control if text is rendered with an underline"""
self.underline = bool(value)
def get_underline(self):
"""set_bold(bool) -> None
enable fake rendering of bold text"""
return self.underline
def metrics(self, text):
"""metrics(text) -> list
Gets the metrics for each character in the passed string."""
return self.get_metrics(text)
def get_ascent(self):
"""get_ascent() -> int
get the ascent of the font"""
return self.get_sized_ascender()
def get_descent(self):
"""get_descent() -> int
get the descent of the font"""
return self.get_sized_descender()
def get_height(self):
"""get_height() -> int
get the height of the font"""
return self.get_sized_ascender() - self.get_sized_descender() + 1
def get_linesize(self):
"""get_linesize() -> int
get the line space of the font text"""
return self.get_sized_height()
def size(self, text):
"""size(text) -> (width, height)
determine the amount of space needed to render text"""
return self.get_rect(text).size
FontType = Font
def get_init():
"""get_init() -> bool
true if the font module is initialized"""
return _get_init()
def SysFont(name, size, bold=0, italic=0, constructor=None):
"""pygame.ftfont.SysFont(name, size, bold=False, italic=False, constructor=None) -> Font
Create a pygame Font from system font resources.
This will search the system fonts for the given font
name. You can also enable bold or italic styles, and
the appropriate system font will be selected if available.
This will always return a valid Font object, and will
fallback on the builtin pygame font if the given font
is not found.
Name can also be an iterable of font names, a string of
comma-separated font names, or a bytes of comma-separated
font names, in which case the set of names will be searched
in order. Pygame uses a small set of common font aliases. If the
specific font you ask for is not available, a reasonable
alternative may be used.
If optional constructor is provided, it must be a function with
signature constructor(fontpath, size, bold, italic) which returns
a Font instance. If None, a pygame.ftfont.Font object is created.
"""
if constructor is None:
def constructor(fontpath, size, bold, italic):
font = Font(fontpath, size)
font.set_bold(bold)
font.set_italic(italic)
return font
return _SysFont(name, size, bold, italic, constructor)
del _Font, get_default_resolution, encode_file_path, as_unicode, as_bytes
```
#### File: site-packages/pygame/macosx.py
```python
import platform
import os
import sys
from pygame.pkgdata import getResource
from pygame import sdlmain_osx
__all__ = ['Video_AutoInit']
def Video_AutoInit():
"""Called from the base.c just before display module is initialized."""
if 'Darwin' in platform.platform():
if not sdlmain_osx.RunningFromBundleWithNSApplication():
default_icon_data = None
try:
with getResource('pygame_icon.tiff') as file_resource:
default_icon_data = file_resource.read()
except (IOError, NotImplementedError):
pass
sdlmain_osx.InstallNSApplication(default_icon_data)
if (os.getcwd() == '/') and len(sys.argv) > 1:
os.chdir(os.path.dirname(sys.argv[0]))
return True
```
#### File: pygame/tests/cdrom_test.py
```python
import unittest
from pygame.tests.test_utils import question, prompt
import pygame
pygame.cdrom.init()
# The number of CD drives available for testing.
CD_DRIVE_COUNT = pygame.cdrom.get_count()
pygame.cdrom.quit()
class CDROMModuleTest(unittest.TestCase):
def setUp(self):
pygame.cdrom.init()
def tearDown(self):
pygame.cdrom.quit()
def todo_test_CD(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD:
# pygame.cdrom.CD(id): return CD
# class to manage a cdrom drive
#
# You can create a CD object for each cdrom on the system. Use
# pygame.cdrom.get_count() to determine how many drives actually
# exist. The id argument is an integer of the drive, starting at zero.
#
# The CD object is not initialized, you can only call CD.get_id() and
# CD.get_name() on an uninitialized drive.
#
# It is safe to create multiple CD objects for the same drive, they
# will all cooperate normally.
#
self.fail()
def test_get_count(self):
"""Ensure the correct number of CD drives can be detected."""
count = pygame.cdrom.get_count()
response = question(
"Is the correct number of CD drives on this " "system [{}]?".format(count)
)
self.assertTrue(response)
def test_get_init(self):
"""Ensure the initialization state can be retrieved."""
self.assertTrue(pygame.cdrom.get_init())
def test_init(self):
"""Ensure module still initialized after multiple init() calls."""
pygame.cdrom.init()
pygame.cdrom.init()
self.assertTrue(pygame.cdrom.get_init())
def test_quit(self):
"""Ensure module not initialized after quit() called."""
pygame.cdrom.quit()
self.assertFalse(pygame.cdrom.get_init())
def test_quit__multiple(self):
"""Ensure module still not initialized after multiple quit() calls."""
pygame.cdrom.quit()
pygame.cdrom.quit()
self.assertFalse(pygame.cdrom.get_init())
@unittest.skipIf(0 == CD_DRIVE_COUNT, "No CD drives detected")
class CDTypeTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
pygame.cdrom.init()
cls._cd_id = 0 # Only testing drive 0 for now. Expand in the future.
cls._cd = pygame.cdrom.CD(cls._cd_id)
@classmethod
def tearDownClass(cls):
pygame.cdrom.quit()
def setUp(self):
self._cd.init()
def tearDown(self):
self._cd.quit()
def test_eject(self):
"""Ensure CD drive opens/ejects."""
self._cd.eject()
response = question("Did the CD eject?")
self.assertTrue(response)
prompt("Please close the CD drive")
def test_get_name(self):
"""Ensure correct name for CD drive."""
cd_name = self._cd.get_name()
response = question(
"Is the correct name for the CD drive [{}]?" "".format(cd_name)
)
self.assertTrue(response)
def todo_test_get_all(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD.get_all:
# CD.get_all(): return [(audio, start, end, lenth), ...]
# get all track information
#
# Return a list with information for every track on the cdrom. The
# information consists of a tuple with four values. The audio value is
# True if the track contains audio data. The start, end, and length
# values are floating point numbers in seconds. Start and end
# represent absolute times on the entire disc.
#
self.fail()
def todo_test_get_busy(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD.get_busy:
# CD.get_busy(): return bool
# true if the drive is playing audio
#
# Returns True if the drive busy playing back audio.
self.fail()
def todo_test_get_current(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD.get_current:
# CD.get_current(): return track, seconds
# the current audio playback position
#
# Returns both the current track and time of that track. This method
# works when the drive is either playing or paused.
#
# Note, track 0 is the first track on the CD. Track numbers start at zero.
self.fail()
def test_get_empty(self):
"""Ensure correct name for CD drive."""
prompt("Please ensure the CD drive is closed")
is_empty = self._cd.get_empty()
response = question("Is the CD drive empty?")
self.assertEqual(is_empty, response)
def test_get_id(self):
"""Ensure the drive id/index is correct."""
cd_id = self._cd.get_id()
self.assertEqual(self._cd_id, cd_id)
def test_get_init(self):
"""Ensure the initialization state can be retrieved."""
self.assertTrue(self._cd.get_init())
def todo_test_get_numtracks(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD.get_numtracks:
# CD.get_numtracks(): return count
# the number of tracks on the cdrom
#
# Return the number of tracks on the cdrom in the drive. This will
# return zero of the drive is empty or has no tracks.
#
self.fail()
def todo_test_get_paused(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD.get_paused:
# CD.get_paused(): return bool
# true if the drive is paused
#
# Returns True if the drive is currently paused.
self.fail()
def todo_test_get_track_audio(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD.get_track_audio:
# CD.get_track_audio(track): return bool
# true if the cdrom track has audio data
#
# Determine if a track on a cdrom contains audio data. You can also
# call CD.num_tracks() and CD.get_all() to determine more information
# about the cdrom.
#
# Note, track 0 is the first track on the CD. Track numbers start at zero.
self.fail()
def todo_test_get_track_length(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD.get_track_length:
# CD.get_track_length(track): return seconds
# length of a cdrom track
#
# Return a floating point value in seconds of the length of the cdrom track.
# Note, track 0 is the first track on the CD. Track numbers start at zero.
self.fail()
def todo_test_get_track_start(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD.get_track_start:
# CD.get_track_start(track): return seconds
# start time of a cdrom track
#
# Return the absolute time in seconds where at start of the cdrom track.
# Note, track 0 is the first track on the CD. Track numbers start at zero.
self.fail()
def test_init(self):
"""Ensure CD drive still initialized after multiple init() calls."""
self._cd.init()
self._cd.init()
self.assertTrue(self._cd.get_init())
def todo_test_pause(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD.pause:
# CD.pause(): return None
# temporarily stop audio playback
#
# Temporarily stop audio playback on the CD. The playback can be
# resumed at the same point with the CD.resume() method. If the CD is
# not playing this method does nothing.
#
# Note, track 0 is the first track on the CD. Track numbers start at zero.
self.fail()
def todo_test_play(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD.play:
# CD.init(): return None
# initialize a cdrom drive for use
#
# Playback audio from an audio cdrom in the drive. Besides the track
# number argument, you can also pass a starting and ending time for
# playback. The start and end time are in seconds, and can limit the
# section of an audio track played.
#
# If you pass a start time but no end, the audio will play to the end
# of the track. If you pass a start time and 'None' for the end time,
# the audio will play to the end of the entire disc.
#
# See the CD.get_numtracks() and CD.get_track_audio() to find tracks to playback.
# Note, track 0 is the first track on the CD. Track numbers start at zero.
self.fail()
def test_quit(self):
"""Ensure CD drive not initialized after quit() called."""
self._cd.quit()
self.assertFalse(self._cd.get_init())
def test_quit__multiple(self):
"""Ensure CD drive still not initialized after multiple quit() calls.
"""
self._cd.quit()
self._cd.quit()
self.assertFalse(self._cd.get_init())
def todo_test_resume(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD.resume:
# CD.resume(): return None
# unpause audio playback
#
# Unpause a paused CD. If the CD is not paused or already playing,
# this method does nothing.
#
self.fail()
def todo_test_stop(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD.stop:
# CD.stop(): return None
# stop audio playback
#
# Stops playback of audio from the cdrom. This will also lose the
# current playback position. This method does nothing if the drive
# isn't already playing audio.
#
self.fail()
################################################################################
if __name__ == "__main__":
unittest.main()
```
#### File: site-packages/pygetwindow/__init__.py
```python
# Useful info:
# https://stackoverflow.com/questions/373020/finding-the-current-active-window-in-mac-os-x-using-python
# https://stackoverflow.com/questions/7142342/get-window-position-size-with-python
# win32 api and ctypes on Windows
# cocoa api and pyobjc on Mac
# Xlib on linux
# Possible Future Features:
# get/click menu (win32: GetMenuItemCount, GetMenuItemInfo, GetMenuItemID, GetMenu, GetMenuItemRect)
__version__ = "0.0.9"
import sys, collections, pyrect
class PyGetWindowException(Exception):
"""
Base class for exceptions raised when PyGetWindow functions
encounter a problem. If PyGetWindow raises an exception that isn't
this class, that indicates a bug in the module.
"""
pass
def pointInRect(x, y, left, top, width, height):
"""Returns ``True`` if the ``(x, y)`` point is within the box described
by ``(left, top, width, height)``."""
return left < x < left + width and top < y < top + height
# NOTE: `Rect` is a named tuple for use in Python, while structs.RECT represents
# the win32 RECT struct. PyRect's Rect class is used for handling changing
# geometry of rectangular areas.
Rect = collections.namedtuple("Rect", "left top right bottom")
Point = collections.namedtuple("Point", "x y")
Size = collections.namedtuple("Size", "width height")
class BaseWindow:
def __init__(self):
pass
def _setupRectProperties(self):
def _onRead(attrName):
r = self._getWindowRect()
self._rect._left = r.left # Setting _left directly to skip the onRead.
self._rect._top = r.top # Setting _top directly to skip the onRead.
self._rect._width = r.right - r.left # Setting _width directly to skip the onRead.
self._rect._height = r.bottom - r.top # Setting _height directly to skip the onRead.
def _onChange(oldBox, newBox):
self.moveTo(newBox.left, newBox.top)
self.resizeTo(newBox.width, newBox.height)
r = self._getWindowRect()
self._rect = pyrect.Rect(r.left, r.top, r.right - r.left, r.bottom - r.top, onChange=_onChange, onRead=_onRead)
def _getWindowRect(self):
raise NotImplementedError
def __str__(self):
r = self._getWindowRect()
width = r.right - r.left
height = r.bottom - r.top
return '<%s left="%s", top="%s", width="%s", height="%s", title="%s">' % (
self.__class__.__qualname__,
r.left,
r.top,
width,
height,
self.title,
)
def close(self):
"""Closes this window. This may trigger "Are you sure you want to
quit?" dialogs or other actions that prevent the window from
actually closing. This is identical to clicking the X button on the
window."""
raise NotImplementedError
def minimize(self):
"""Minimizes this window."""
raise NotImplementedError
def maximize(self):
"""Maximizes this window."""
raise NotImplementedError
def restore(self):
"""If maximized or minimized, restores the window to it's normal size."""
raise NotImplementedError
def activate(self):
"""Activate this window and make it the foreground window."""
raise NotImplementedError
def resizeRel(self, widthOffset, heightOffset):
"""Resizes the window relative to its current size."""
raise NotImplementedError
def resizeTo(self, newWidth, newHeight):
"""Resizes the window to a new width and height."""
raise NotImplementedError
def moveRel(self, xOffset, yOffset):
"""Moves the window relative to its current position."""
raise NotImplementedError
def moveTo(self, newLeft, newTop):
"""Moves the window to new coordinates on the screen."""
raise NotImplementedError
@property
def isMinimized(self):
"""Returns True if the window is currently minimized."""
raise NotImplementedError
@property
def isMaximized(self):
"""Returns True if the window is currently maximized."""
raise NotImplementedError
@property
def isActive(self):
"""Returns True if the window is currently the active, foreground window."""
raise NotImplementedError
@property
def title(self):
"""Returns the window title as a string."""
raise NotImplementedError
@property
def visible(self):
raise NotImplementedError
# Wrappers for pyrect.Rect object's properties:
@property
def left(self):
return self._rect.left
@left.setter
def left(self, value):
# import pdb; pdb.set_trace()
self._rect.left # Run rect's onRead to update the Rect object.
self._rect.left = value
@property
def right(self):
return self._rect.right
@right.setter
def right(self, value):
self._rect.right # Run rect's onRead to update the Rect object.
self._rect.right = value
@property
def top(self):
return self._rect.top
@top.setter
def top(self, value):
self._rect.top # Run rect's onRead to update the Rect object.
self._rect.top = value
@property
def bottom(self):
return self._rect.bottom
@bottom.setter
def bottom(self, value):
self._rect.bottom # Run rect's onRead to update the Rect object.
self._rect.bottom = value
@property
def topleft(self):
return self._rect.topleft
@topleft.setter
def topleft(self, value):
self._rect.topleft # Run rect's onRead to update the Rect object.
self._rect.topleft = value
@property
def topright(self):
return self._rect.topright
@topright.setter
def topright(self, value):
self._rect.topright # Run rect's onRead to update the Rect object.
self._rect.topright = value
@property
def bottomleft(self):
return self._rect.bottomleft
@bottomleft.setter
def bottomleft(self, value):
self._rect.bottomleft # Run rect's onRead to update the Rect object.
self._rect.bottomleft = value
@property
def bottomright(self):
return self._rect.bottomright
@bottomright.setter
def bottomright(self, value):
self._rect.bottomright # Run rect's onRead to update the Rect object.
self._rect.bottomright = value
@property
def midleft(self):
return self._rect.midleft
@midleft.setter
def midleft(self, value):
self._rect.midleft # Run rect's onRead to update the Rect object.
self._rect.midleft = value
@property
def midright(self):
return self._rect.midright
@midright.setter
def midright(self, value):
self._rect.midright # Run rect's onRead to update the Rect object.
self._rect.midright = value
@property
def midtop(self):
return self._rect.midtop
@midtop.setter
def midtop(self, value):
self._rect.midtop # Run rect's onRead to update the Rect object.
self._rect.midtop = value
@property
def midbottom(self):
return self._rect.midbottom
@midbottom.setter
def midbottom(self, value):
self._rect.midbottom # Run rect's onRead to update the Rect object.
self._rect.midbottom = value
@property
def center(self):
return self._rect.center
@center.setter
def center(self, value):
self._rect.center # Run rect's onRead to update the Rect object.
self._rect.center = value
@property
def centerx(self):
return self._rect.centerx
@centerx.setter
def centerx(self, value):
self._rect.centerx # Run rect's onRead to update the Rect object.
self._rect.centerx = value
@property
def centery(self):
return self._rect.centery
@centery.setter
def centery(self, value):
self._rect.centery # Run rect's onRead to update the Rect object.
self._rect.centery = value
@property
def width(self):
return self._rect.width
@width.setter
def width(self, value):
self._rect.width # Run rect's onRead to update the Rect object.
self._rect.width = value
@property
def height(self):
return self._rect.height
@height.setter
def height(self, value):
self._rect.height # Run rect's onRead to update the Rect object.
self._rect.height = value
@property
def size(self):
return self._rect.size
@size.setter
def size(self, value):
self._rect.size # Run rect's onRead to update the Rect object.
self._rect.size = value
@property
def area(self):
return self._rect.area
@area.setter
def area(self, value):
self._rect.area # Run rect's onRead to update the Rect object.
self._rect.area = value
@property
def box(self):
return self._rect.box
@box.setter
def box(self, value):
self._rect.box # Run rect's onRead to update the Rect object.
self._rect.box = value
if sys.platform == "darwin":
# raise NotImplementedError('PyGetWindow currently does not support macOS. If you have Appkit/Cocoa knowledge, please contribute! https://github.com/asweigart/pygetwindow') # TODO - implement mac
from ._pygetwindow_macos import *
Window = MacOSWindow
elif sys.platform == "win32":
from ._pygetwindow_win import (
Win32Window,
getActiveWindow,
getActiveWindowTitle,
getWindowsAt,
getWindowsWithTitle,
getAllWindows,
getAllTitles,
)
Window = Win32Window
else:
raise NotImplementedError(
"PyGetWindow currently does not support Linux. If you have Xlib knowledge, please contribute! https://github.com/asweigart/pygetwindow"
)
```
#### File: site-packages/pymsgbox/_native_win.py
```python
# The documentation for the MessageBox winapi is at:
# https://docs.microsoft.com/en-us/windows/desktop/api/winuser/nf-winuser-messagebox
import sys, ctypes
import pymsgbox
MB_OK = 0x0
MB_OKCANCEL = 0x1
MB_ABORTRETRYIGNORE = 0x2
MB_YESNOCANCEL = 0x3
MB_YESNO = 0x4
MB_RETRYCANCEL = 0x5
MB_CANCELTRYCONTINUE = 0x6
NO_ICON = 0
STOP = MB_ICONHAND = MB_ICONSTOP = MB_ICONERRPR = 0x10
QUESTION = MB_ICONQUESTION = 0x20
WARNING = MB_ICONEXCLAIMATION = 0x30
INFO = MB_ICONASTERISK = MB_ICONINFOMRAITON = 0x40
MB_DEFAULTBUTTON1 = 0x0
MB_DEFAULTBUTTON2 = 0x100
MB_DEFAULTBUTTON3 = 0x200
MB_DEFAULTBUTTON4 = 0x300
MB_SETFOREGROUND = 0x10000
MB_TOPMOST = 0x40000
IDABORT = 0x3
IDCANCEL = 0x2
IDCONTINUE = 0x11
IDIGNORE = 0x5
IDNO = 0x7
IDOK = 0x1
IDRETRY = 0x4
IDTRYAGAIN = 0x10
IDYES = 0x6
runningOnPython2 = sys.version_info[0] == 2
if runningOnPython2:
messageBoxFunc = ctypes.windll.user32.MessageBoxA
else: # Python 3 functions.
messageBoxFunc = ctypes.windll.user32.MessageBoxW
def alert(
text="",
title="",
button=pymsgbox.OK_TEXT,
root=None,
timeout=None,
icon=NO_ICON,
_tkinter=False,
):
"""Displays a simple message box with text and a single OK button. Returns the text of the button clicked on."""
text = str(text)
if (_tkinter) or (timeout is not None) or (button != pymsgbox.OK_TEXT):
# Timeouts are not supported by Windows message boxes.
# Call the original tkinter alert function, not this native one:
return pymsgbox._alertTkinter(text, title, button, root, timeout)
messageBoxFunc(0, text, title, MB_OK | MB_SETFOREGROUND | MB_TOPMOST | icon)
return button
def confirm(
text="",
title="",
buttons=(pymsgbox.OK_TEXT, pymsgbox.CANCEL_TEXT),
root=None,
timeout=None,
icon=QUESTION,
_tkinter=False,
):
"""Displays a message box with OK and Cancel buttons. Number and text of buttons can be customized. Returns the text of the button clicked on."""
text = str(text)
buttonFlag = None
if len(buttons) == 1:
if buttons[0] == pymsgbox.OK_TEXT:
buttonFlag = MB_OK
elif len(buttons) == 2:
if buttons[0] == pymsgbox.OK_TEXT and buttons[1] == pymsgbox.CANCEL_TEXT:
buttonFlag = MB_OKCANCEL
elif buttons[0] == pymsgbox.YES_TEXT and buttons[1] == pymsgbox.NO_TEXT:
buttonFlag = MB_YESNO
elif buttons[0] == pymsgbox.RETRY_TEXT and buttons[1] == pymsgbox.CANCEL_TEXT:
buttonFlag = MB_RETRYCANCEL
elif len(buttons) == 3:
if (
buttons[0] == pymsgbox.ABORT_TEXT
and buttons[1] == pymsgbox.RETRY_TEXT
and buttons[2] == pymsgbox.IGNORE_TEXT
):
buttonFlag = MB_ABORTRETRYIGNORE
elif (
buttons[0] == pymsgbox.CANCEL_TEXT
and buttons[1] == pymsgbox.TRY_AGAIN_TEXT
and buttons[2] == pymsgbox.CONTINUE_TEXT
):
buttonFlag = MB_CANCELTRYCONTINUE
elif (
buttons[0] == pymsgbox.YES_TEXT
and buttons[1] == pymsgbox.NO_TEXT
and buttons[2] == pymsgbox.CANCEL_TEXT
):
buttonFlag = MB_YESNOCANCEL
if (_tkinter) or (timeout is not None) or (buttonFlag is None):
# Call the original tkinter confirm() function, not this native one:
return pymsgbox._confirmTkinter(text, title, buttons, root, timeout)
retVal = messageBoxFunc(
0, text, title, buttonFlag | MB_SETFOREGROUND | MB_TOPMOST | icon
)
if retVal == IDOK or len(buttons) == 1:
return pymsgbox.OK_TEXT
elif retVal == IDCANCEL:
return pymsgbox.CANCEL_TEXT
elif retVal == IDYES:
return pymsgbox.YES_TEXT
elif retVal == IDNO:
return pymsgbox.NO_TEXT
elif retVal == IDTRYAGAIN:
return pymsgbox.TRY_TEXT
elif retVal == IDRETRY:
return pymsgbox.RETRY_TEXT
elif retVal == IDIGNORE:
return pymsgbox.IGNORE_TEXT
elif retVal == IDCONTINUE:
return pymsgbox.CONTINUE_TEXT
elif retVal == IDABORT:
return pymsgbox.ABORT_TEXT
else:
assert False, "Unexpected return value from MessageBox: %s" % (retVal)
'''
def prompt(text='', title='' , default=''):
"""Displays a message box with text input, and OK & Cancel buttons. Returns the text entered, or None if Cancel was clicked."""
pass
def password(text='', title='', default='', mask='*'):
"""Displays a message box with text input, and OK & Cancel buttons. Typed characters appear as *. Returns the text entered, or None if Cancel was clicked."""
pass
'''
```
#### File: Python_OCR_JE/Models/OCR.py
```python
from PIL import Image
from pytesseract import pytesseract
class OCR:
def __init__(self):
self.pyTesserAct = pytesseract
# 開啟圖片並辨識
def OpenImage_And_OCR(self, Image_Name):
"""
:param Image_Name: need to ocr image
:return: OCR detect string
"""
OCRImage = Image.open(Image_Name)
OCRImage = OCRImage.convert('L')
return self.pyTesserAct.image_to_string(OCRImage)
# 英文 'eng'、簡體中文 'chi_sim'、繁體中文 'chi_tra' 可以用+的疊加
def OpenImage_And_OCR_Lang(self, Image_Name, Lang='eng'):
"""
:param Image_Name: need to ocr image
:param Lang: OCR detect language
:return: OCR detect string
"""
OCRImage = Image.open(Image_Name)
OCRImage = OCRImage.convert('L')
return self.pyTesserAct.image_to_string(OCRImage, lang=Lang)
```
#### File: site-packages/numpy/conftest.py
```python
import os
import tempfile
import hypothesis
import pytest
import numpy
from numpy.core._multiarray_tests import get_fpu_mode
_old_fpu_mode = None
_collect_results = {}
# Use a known and persistent tmpdir for hypothesis' caches, which
# can be automatically cleared by the OS or user.
hypothesis.configuration.set_hypothesis_home_dir(
os.path.join(tempfile.gettempdir(), ".hypothesis")
)
# We register two custom profiles for Numpy - for details see
# https://hypothesis.readthedocs.io/en/latest/settings.html
# The first is designed for our own CI runs; the latter also
# forces determinism and is designed for use via np.test()
hypothesis.settings.register_profile(
name="numpy-profile", deadline=None, print_blob=True,
)
hypothesis.settings.register_profile(
name="np.test() profile",
deadline=None, print_blob=True, database=None, derandomize=True,
suppress_health_check=hypothesis.HealthCheck.all(),
)
# Note that the default profile is chosen based on the presence
# of pytest.ini, but can be overriden by passing the
# --hypothesis-profile=NAME argument to pytest.
_pytest_ini = os.path.join(os.path.dirname(__file__), "..", "pytest.ini")
hypothesis.settings.load_profile(
"numpy-profile" if os.path.isfile(_pytest_ini) else "np.test() profile"
)
def pytest_configure(config):
config.addinivalue_line("markers",
"valgrind_error: Tests that are known to error under valgrind.")
config.addinivalue_line("markers",
"leaks_references: Tests that are known to leak references.")
config.addinivalue_line("markers",
"slow: Tests that are very slow.")
config.addinivalue_line("markers",
"slow_pypy: Tests that are very slow on pypy.")
def pytest_addoption(parser):
parser.addoption("--available-memory", action="store", default=None,
help=("Set amount of memory available for running the "
"test suite. This can result to tests requiring "
"especially large amounts of memory to be skipped. "
"Equivalent to setting environment variable "
"NPY_AVAILABLE_MEM. Default: determined"
"automatically."))
def pytest_sessionstart(session):
available_mem = session.config.getoption('available_memory')
if available_mem is not None:
os.environ['NPY_AVAILABLE_MEM'] = available_mem
#FIXME when yield tests are gone.
@pytest.hookimpl()
def pytest_itemcollected(item):
"""
Check FPU precision mode was not changed during test collection.
The clumsy way we do it here is mainly necessary because numpy
still uses yield tests, which can execute code at test collection
time.
"""
global _old_fpu_mode
mode = get_fpu_mode()
if _old_fpu_mode is None:
_old_fpu_mode = mode
elif mode != _old_fpu_mode:
_collect_results[item] = (_old_fpu_mode, mode)
_old_fpu_mode = mode
@pytest.fixture(scope="function", autouse=True)
def check_fpu_mode(request):
"""
Check FPU precision mode was not changed during the test.
"""
old_mode = get_fpu_mode()
yield
new_mode = get_fpu_mode()
if old_mode != new_mode:
raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}"
" during the test".format(old_mode, new_mode))
collect_result = _collect_results.get(request.node)
if collect_result is not None:
old_mode, new_mode = collect_result
raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}"
" when collecting the test".format(old_mode,
new_mode))
@pytest.fixture(autouse=True)
def add_np(doctest_namespace):
doctest_namespace['np'] = numpy
@pytest.fixture(autouse=True)
def env_setup(monkeypatch):
monkeypatch.setenv('PYTHONHASHSEED', '0')
```
#### File: core/tests/test_function_base.py
```python
from numpy import (
logspace, linspace, geomspace, dtype, array, sctypes, arange, isnan,
ndarray, sqrt, nextafter, stack, errstate
)
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal, assert_allclose,
)
class PhysicalQuantity(float):
def __new__(cls, value):
return float.__new__(cls, value)
def __add__(self, x):
assert_(isinstance(x, PhysicalQuantity))
return PhysicalQuantity(float(x) + float(self))
__radd__ = __add__
def __sub__(self, x):
assert_(isinstance(x, PhysicalQuantity))
return PhysicalQuantity(float(self) - float(x))
def __rsub__(self, x):
assert_(isinstance(x, PhysicalQuantity))
return PhysicalQuantity(float(x) - float(self))
def __mul__(self, x):
return PhysicalQuantity(float(x) * float(self))
__rmul__ = __mul__
def __div__(self, x):
return PhysicalQuantity(float(self) / float(x))
def __rdiv__(self, x):
return PhysicalQuantity(float(x) / float(self))
class PhysicalQuantity2(ndarray):
__array_priority__ = 10
class TestLogspace:
def test_basic(self):
y = logspace(0, 6)
assert_(len(y) == 50)
y = logspace(0, 6, num=100)
assert_(y[-1] == 10 ** 6)
y = logspace(0, 6, endpoint=False)
assert_(y[-1] < 10 ** 6)
y = logspace(0, 6, num=7)
assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
def test_start_stop_array(self):
start = array([0., 1.])
stop = array([6., 7.])
t1 = logspace(start, stop, 6)
t2 = stack([logspace(_start, _stop, 6)
for _start, _stop in zip(start, stop)], axis=1)
assert_equal(t1, t2)
t3 = logspace(start, stop[0], 6)
t4 = stack([logspace(_start, stop[0], 6)
for _start in start], axis=1)
assert_equal(t3, t4)
t5 = logspace(start, stop, 6, axis=-1)
assert_equal(t5, t2.T)
def test_dtype(self):
y = logspace(0, 6, dtype='float32')
assert_equal(y.dtype, dtype('float32'))
y = logspace(0, 6, dtype='float64')
assert_equal(y.dtype, dtype('float64'))
y = logspace(0, 6, dtype='int32')
assert_equal(y.dtype, dtype('int32'))
def test_physical_quantities(self):
a = PhysicalQuantity(1.0)
b = PhysicalQuantity(5.0)
assert_equal(logspace(a, b), logspace(1.0, 5.0))
def test_subclass(self):
a = array(1).view(PhysicalQuantity2)
b = array(7).view(PhysicalQuantity2)
ls = logspace(a, b)
assert type(ls) is PhysicalQuantity2
assert_equal(ls, logspace(1.0, 7.0))
ls = logspace(a, b, 1)
assert type(ls) is PhysicalQuantity2
assert_equal(ls, logspace(1.0, 7.0, 1))
class TestGeomspace:
def test_basic(self):
y = geomspace(1, 1e6)
assert_(len(y) == 50)
y = geomspace(1, 1e6, num=100)
assert_(y[-1] == 10 ** 6)
y = geomspace(1, 1e6, endpoint=False)
assert_(y[-1] < 10 ** 6)
y = geomspace(1, 1e6, num=7)
assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
y = geomspace(8, 2, num=3)
assert_allclose(y, [8, 4, 2])
assert_array_equal(y.imag, 0)
y = geomspace(-1, -100, num=3)
assert_array_equal(y, [-1, -10, -100])
assert_array_equal(y.imag, 0)
y = geomspace(-100, -1, num=3)
assert_array_equal(y, [-100, -10, -1])
assert_array_equal(y.imag, 0)
def test_boundaries_match_start_and_stop_exactly(self):
# make sure that the boundaries of the returned array exactly
# equal 'start' and 'stop' - this isn't obvious because
# np.exp(np.log(x)) isn't necessarily exactly equal to x
start = 0.3
stop = 20.3
y = geomspace(start, stop, num=1)
assert_equal(y[0], start)
y = geomspace(start, stop, num=1, endpoint=False)
assert_equal(y[0], start)
y = geomspace(start, stop, num=3)
assert_equal(y[0], start)
assert_equal(y[-1], stop)
y = geomspace(start, stop, num=3, endpoint=False)
assert_equal(y[0], start)
def test_nan_interior(self):
with errstate(invalid='ignore'):
y = geomspace(-3, 3, num=4)
assert_equal(y[0], -3.0)
assert_(isnan(y[1:-1]).all())
assert_equal(y[3], 3.0)
with errstate(invalid='ignore'):
y = geomspace(-3, 3, num=4, endpoint=False)
assert_equal(y[0], -3.0)
assert_(isnan(y[1:]).all())
def test_complex(self):
# Purely imaginary
y = geomspace(1j, 16j, num=5)
assert_allclose(y, [1j, 2j, 4j, 8j, 16j])
assert_array_equal(y.real, 0)
y = geomspace(-4j, -324j, num=5)
assert_allclose(y, [-4j, -12j, -36j, -108j, -324j])
assert_array_equal(y.real, 0)
y = geomspace(1+1j, 1000+1000j, num=4)
assert_allclose(y, [1+1j, 10+10j, 100+100j, 1000+1000j])
y = geomspace(-1+1j, -1000+1000j, num=4)
assert_allclose(y, [-1+1j, -10+10j, -100+100j, -1000+1000j])
# Logarithmic spirals
y = geomspace(-1, 1, num=3, dtype=complex)
assert_allclose(y, [-1, 1j, +1])
y = geomspace(0+3j, -3+0j, 3)
assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j])
y = geomspace(0+3j, 3+0j, 3)
assert_allclose(y, [0+3j, 3/sqrt(2)+3j/sqrt(2), 3+0j])
y = geomspace(-3+0j, 0-3j, 3)
assert_allclose(y, [-3+0j, -3/sqrt(2)-3j/sqrt(2), 0-3j])
y = geomspace(0+3j, -3+0j, 3)
assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j])
y = geomspace(-2-3j, 5+7j, 7)
assert_allclose(y, [-2-3j, -0.29058977-4.15771027j,
2.08885354-4.34146838j, 4.58345529-3.16355218j,
6.41401745-0.55233457j, 6.75707386+3.11795092j,
5+7j])
# Type promotion should prevent the -5 from becoming a NaN
y = geomspace(3j, -5, 2)
assert_allclose(y, [3j, -5])
y = geomspace(-5, 3j, 2)
assert_allclose(y, [-5, 3j])
def test_dtype(self):
y = geomspace(1, 1e6, dtype='float32')
assert_equal(y.dtype, dtype('float32'))
y = geomspace(1, 1e6, dtype='float64')
assert_equal(y.dtype, dtype('float64'))
y = geomspace(1, 1e6, dtype='int32')
assert_equal(y.dtype, dtype('int32'))
# Native types
y = geomspace(1, 1e6, dtype=float)
assert_equal(y.dtype, dtype('float_'))
y = geomspace(1, 1e6, dtype=complex)
assert_equal(y.dtype, dtype('complex'))
def test_start_stop_array_scalar(self):
lim1 = array([120, 100], dtype="int8")
lim2 = array([-120, -100], dtype="int8")
lim3 = array([1200, 1000], dtype="uint16")
t1 = geomspace(lim1[0], lim1[1], 5)
t2 = geomspace(lim2[0], lim2[1], 5)
t3 = geomspace(lim3[0], lim3[1], 5)
t4 = geomspace(120.0, 100.0, 5)
t5 = geomspace(-120.0, -100.0, 5)
t6 = geomspace(1200.0, 1000.0, 5)
# t3 uses float32, t6 uses float64
assert_allclose(t1, t4, rtol=1e-2)
assert_allclose(t2, t5, rtol=1e-2)
assert_allclose(t3, t6, rtol=1e-5)
def test_start_stop_array(self):
# Try to use all special cases.
start = array([1.e0, 32., 1j, -4j, 1+1j, -1])
stop = array([1.e4, 2., 16j, -324j, 10000+10000j, 1])
t1 = geomspace(start, stop, 5)
t2 = stack([geomspace(_start, _stop, 5)
for _start, _stop in zip(start, stop)], axis=1)
assert_equal(t1, t2)
t3 = geomspace(start, stop[0], 5)
t4 = stack([geomspace(_start, stop[0], 5)
for _start in start], axis=1)
assert_equal(t3, t4)
t5 = geomspace(start, stop, 5, axis=-1)
assert_equal(t5, t2.T)
def test_physical_quantities(self):
a = PhysicalQuantity(1.0)
b = PhysicalQuantity(5.0)
assert_equal(geomspace(a, b), geomspace(1.0, 5.0))
def test_subclass(self):
a = array(1).view(PhysicalQuantity2)
b = array(7).view(PhysicalQuantity2)
gs = geomspace(a, b)
assert type(gs) is PhysicalQuantity2
assert_equal(gs, geomspace(1.0, 7.0))
gs = geomspace(a, b, 1)
assert type(gs) is PhysicalQuantity2
assert_equal(gs, geomspace(1.0, 7.0, 1))
def test_bounds(self):
assert_raises(ValueError, geomspace, 0, 10)
assert_raises(ValueError, geomspace, 10, 0)
assert_raises(ValueError, geomspace, 0, 0)
class TestLinspace:
def test_basic(self):
y = linspace(0, 10)
assert_(len(y) == 50)
y = linspace(2, 10, num=100)
assert_(y[-1] == 10)
y = linspace(2, 10, endpoint=False)
assert_(y[-1] < 10)
assert_raises(ValueError, linspace, 0, 10, num=-1)
def test_corner(self):
y = list(linspace(0, 1, 1))
assert_(y == [0.0], y)
assert_raises(TypeError, linspace, 0, 1, num=2.5)
def test_type(self):
t1 = linspace(0, 1, 0).dtype
t2 = linspace(0, 1, 1).dtype
t3 = linspace(0, 1, 2).dtype
assert_equal(t1, t2)
assert_equal(t2, t3)
def test_dtype(self):
y = linspace(0, 6, dtype='float32')
assert_equal(y.dtype, dtype('float32'))
y = linspace(0, 6, dtype='float64')
assert_equal(y.dtype, dtype('float64'))
y = linspace(0, 6, dtype='int32')
assert_equal(y.dtype, dtype('int32'))
def test_start_stop_array_scalar(self):
lim1 = array([-120, 100], dtype="int8")
lim2 = array([120, -100], dtype="int8")
lim3 = array([1200, 1000], dtype="uint16")
t1 = linspace(lim1[0], lim1[1], 5)
t2 = linspace(lim2[0], lim2[1], 5)
t3 = linspace(lim3[0], lim3[1], 5)
t4 = linspace(-120.0, 100.0, 5)
t5 = linspace(120.0, -100.0, 5)
t6 = linspace(1200.0, 1000.0, 5)
assert_equal(t1, t4)
assert_equal(t2, t5)
assert_equal(t3, t6)
def test_start_stop_array(self):
start = array([-120, 120], dtype="int8")
stop = array([100, -100], dtype="int8")
t1 = linspace(start, stop, 5)
t2 = stack([linspace(_start, _stop, 5)
for _start, _stop in zip(start, stop)], axis=1)
assert_equal(t1, t2)
t3 = linspace(start, stop[0], 5)
t4 = stack([linspace(_start, stop[0], 5)
for _start in start], axis=1)
assert_equal(t3, t4)
t5 = linspace(start, stop, 5, axis=-1)
assert_equal(t5, t2.T)
def test_complex(self):
lim1 = linspace(1 + 2j, 3 + 4j, 5)
t1 = array([1.0+2.j, 1.5+2.5j, 2.0+3j, 2.5+3.5j, 3.0+4j])
lim2 = linspace(1j, 10, 5)
t2 = array([0.0+1.j, 2.5+0.75j, 5.0+0.5j, 7.5+0.25j, 10.0+0j])
assert_equal(lim1, t1)
assert_equal(lim2, t2)
def test_physical_quantities(self):
a = PhysicalQuantity(0.0)
b = PhysicalQuantity(1.0)
assert_equal(linspace(a, b), linspace(0.0, 1.0))
def test_subclass(self):
a = array(0).view(PhysicalQuantity2)
b = array(1).view(PhysicalQuantity2)
ls = linspace(a, b)
assert type(ls) is PhysicalQuantity2
assert_equal(ls, linspace(0.0, 1.0))
ls = linspace(a, b, 1)
assert type(ls) is PhysicalQuantity2
assert_equal(ls, linspace(0.0, 1.0, 1))
def test_array_interface(self):
# Regression test for https://github.com/numpy/numpy/pull/6659
# Ensure that start/stop can be objects that implement
# __array_interface__ and are convertible to numeric scalars
class Arrayish:
"""
A generic object that supports the __array_interface__ and hence
can in principle be converted to a numeric scalar, but is not
otherwise recognized as numeric, but also happens to support
multiplication by floats.
Data should be an object that implements the buffer interface,
and contains at least 4 bytes.
"""
def __init__(self, data):
self._data = data
@property
def __array_interface__(self):
return {'shape': (), 'typestr': '<i4', 'data': self._data,
'version': 3}
def __mul__(self, other):
# For the purposes of this test any multiplication is an
# identity operation :)
return self
one = Arrayish(array(1, dtype='<i4'))
five = Arrayish(array(5, dtype='<i4'))
assert_equal(linspace(one, five), linspace(1, 5))
def test_denormal_numbers(self):
# Regression test for gh-5437. Will probably fail when compiled
# with ICC, which flushes denormals to zero
for ftype in sctypes['float']:
stop = nextafter(ftype(0), ftype(1)) * 5 # A denormal number
assert_(any(linspace(0, stop, 10, endpoint=False, dtype=ftype)))
def test_equivalent_to_arange(self):
for j in range(1000):
assert_equal(linspace(0, j, j+1, dtype=int),
arange(j+1, dtype=int))
def test_retstep(self):
for num in [0, 1, 2]:
for ept in [False, True]:
y = linspace(0, 1, num, endpoint=ept, retstep=True)
assert isinstance(y, tuple) and len(y) == 2
if num == 2:
y0_expect = [0.0, 1.0] if ept else [0.0, 0.5]
assert_array_equal(y[0], y0_expect)
assert_equal(y[1], y0_expect[1])
elif num == 1 and not ept:
assert_array_equal(y[0], [0.0])
assert_equal(y[1], 1.0)
else:
assert_array_equal(y[0], [0.0][:num])
assert isnan(y[1])
def test_object(self):
start = array(1, dtype='O')
stop = array(2, dtype='O')
y = linspace(start, stop, 3)
assert_array_equal(y, array([1., 1.5, 2.]))
def test_round_negative(self):
y = linspace(-1, 3, num=8, dtype=int)
t = array([-1, -1, 0, 0, 1, 1, 2, 3], dtype=int)
assert_array_equal(y, t)
```
#### File: distutils/fcompiler/fujitsu.py
```python
from numpy.distutils.fcompiler import FCompiler
compilers = ['FujitsuFCompiler']
class FujitsuFCompiler(FCompiler):
compiler_type = 'fujitsu'
description = 'Fujitsu Fortran Compiler'
possible_executables = ['frt']
version_pattern = r'frt \(FRT\) (?P<version>[a-z\d.]+)'
# $ frt --version
# frt (FRT) x.x.x yyyymmdd
executables = {
'version_cmd' : ["<F77>", "--version"],
'compiler_f77' : ["frt", "-Fixed"],
'compiler_fix' : ["frt", "-Fixed"],
'compiler_f90' : ["frt"],
'linker_so' : ["frt", "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
pic_flags = ['-KPIC']
module_dir_switch = '-M'
module_include_switch = '-I'
def get_flags_opt(self):
return ['-O3']
def get_flags_debug(self):
return ['-g']
def runtime_library_dir_option(self, dir):
return f'-Wl,-rpath={dir}'
def get_libraries(self):
return ['fj90f', 'fj90i', 'fjsrcinfo']
if __name__ == '__main__':
from distutils import log
from numpy.distutils import customized_fcompiler
log.set_verbosity(2)
print(customized_fcompiler('fujitsu').get_version())
```
#### File: f2py/tests/test_module_doc.py
```python
import os
import sys
import pytest
import textwrap
from . import util
from numpy.testing import assert_equal, IS_PYPY
def _path(*a):
return os.path.join(*((os.path.dirname(__file__),) + a))
class TestModuleDocString(util.F2PyTest):
sources = [_path('src', 'module_data', 'module_data_docstring.f90')]
@pytest.mark.skipif(sys.platform=='win32',
reason='Fails with MinGW64 Gfortran (Issue #9673)')
@pytest.mark.xfail(IS_PYPY,
reason="PyPy cannot modify tp_doc after PyType_Ready")
def test_module_docstring(self):
assert_equal(self.module.mod.__doc__,
textwrap.dedent('''\
i : 'i'-scalar
x : 'i'-array(4)
a : 'f'-array(2,3)
b : 'f'-array(-1,-1), not allocated\x00
foo()\n
Wrapper for ``foo``.\n\n''')
)
```
#### File: numpy/testing/print_coercion_tables.py
```python
import numpy as np
from collections import namedtuple
# Generic object that can be added, but doesn't do anything else
class GenericObject:
def __init__(self, v):
self.v = v
def __add__(self, other):
return self
def __radd__(self, other):
return self
dtype = np.dtype('O')
def print_cancast_table(ntypes):
print('X', end=' ')
for char in ntypes:
print(char, end=' ')
print()
for row in ntypes:
print(row, end=' ')
for col in ntypes:
if np.can_cast(row, col, "equiv"):
cast = "#"
elif np.can_cast(row, col, "safe"):
cast = "="
elif np.can_cast(row, col, "same_kind"):
cast = "~"
elif np.can_cast(row, col, "unsafe"):
cast = "."
else:
cast = " "
print(cast, end=' ')
print()
def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray, use_promote_types=False):
print('+', end=' ')
for char in ntypes:
print(char, end=' ')
print()
for row in ntypes:
if row == 'O':
rowtype = GenericObject
else:
rowtype = np.obj2sctype(row)
print(row, end=' ')
for col in ntypes:
if col == 'O':
coltype = GenericObject
else:
coltype = np.obj2sctype(col)
try:
if firstarray:
rowvalue = np.array([rowtype(inputfirstvalue)], dtype=rowtype)
else:
rowvalue = rowtype(inputfirstvalue)
colvalue = coltype(inputsecondvalue)
if use_promote_types:
char = np.promote_types(rowvalue.dtype, colvalue.dtype).char
else:
value = np.add(rowvalue, colvalue)
if isinstance(value, np.ndarray):
char = value.dtype.char
else:
char = np.dtype(type(value)).char
except ValueError:
char = '!'
except OverflowError:
char = '@'
except TypeError:
char = '#'
print(char, end=' ')
print()
def print_new_cast_table(*, can_cast=True, legacy=False, flags=False):
"""Prints new casts, the values given are default "can-cast" values, not
actual ones.
"""
from numpy.core._multiarray_tests import get_all_cast_information
cast_table = {
0 : "#", # No cast (classify as equivalent here)
1 : "#", # equivalent casting
2 : "=", # safe casting
3 : "~", # same-kind casting
4 : ".", # unsafe casting
}
flags_table = {
0 : "▗", 7: "█",
1: "▚", 2: "▐", 4: "▄",
3: "▜", 5: "▙",
6: "▟",
}
cast_info = namedtuple("cast_info", ["can_cast", "legacy", "flags"])
no_cast_info = cast_info(" ", " ", " ")
casts = get_all_cast_information()
table = {}
dtypes = set()
for cast in casts:
dtypes.add(cast["from"])
dtypes.add(cast["to"])
if cast["from"] not in table:
table[cast["from"]] = {}
to_dict = table[cast["from"]]
can_cast = cast_table[cast["casting"]]
legacy = "L" if cast["legacy"] else "."
flags = 0
if cast["requires_pyapi"]:
flags |= 1
if cast["supports_unaligned"]:
flags |= 2
if cast["no_floatingpoint_errors"]:
flags |= 4
flags = flags_table[flags]
to_dict[cast["to"]] = cast_info(can_cast=can_cast, legacy=legacy, flags=flags)
# The np.dtype(x.type) is a bit strange, because dtype classes do
# not expose much yet.
types = np.typecodes["All"]
def sorter(x):
# This is a bit weird hack, to get a table as close as possible to
# the one printing all typecodes (but expecting user-dtypes).
dtype = np.dtype(x.type)
try:
indx = types.index(dtype.char)
except ValueError:
indx = np.inf
return (indx, dtype.char)
dtypes = sorted(dtypes, key=sorter)
def print_table(field="can_cast"):
print('X', end=' ')
for dt in dtypes:
print(np.dtype(dt.type).char, end=' ')
print()
for from_dt in dtypes:
print(np.dtype(from_dt.type).char, end=' ')
row = table.get(from_dt, {})
for to_dt in dtypes:
print(getattr(row.get(to_dt, no_cast_info), field), end=' ')
print()
if can_cast:
# Print the actual table:
print()
print("Casting: # is equivalent, = is safe, ~ is same-kind, and . is unsafe")
print()
print_table("can_cast")
if legacy:
print()
print("L denotes a legacy cast . a non-legacy one.")
print()
print_table("legacy")
if flags:
print()
print(f"{flags_table[0]}: no flags, {flags_table[1]}: PyAPI, "
f"{flags_table[2]}: supports unaligned, {flags_table[4]}: no-float-errors")
print()
print_table("flags")
if __name__ == '__main__':
print("can cast")
print_cancast_table(np.typecodes['All'])
print()
print("In these tables, ValueError is '!', OverflowError is '@', TypeError is '#'")
print()
print("scalar + scalar")
print_coercion_table(np.typecodes['All'], 0, 0, False)
print()
print("scalar + neg scalar")
print_coercion_table(np.typecodes['All'], 0, -1, False)
print()
print("array + scalar")
print_coercion_table(np.typecodes['All'], 0, 0, True)
print()
print("array + neg scalar")
print_coercion_table(np.typecodes['All'], 0, -1, True)
print()
print("promote_types")
print_coercion_table(np.typecodes['All'], 0, 0, False, True)
print("New casting type promotion:")
print_new_cast_table(can_cast=True, legacy=True, flags=True)
```
#### File: data/fail/ufunc_config.py
```python
import numpy as np
def func1(a: str, b: int, c: float) -> None: ...
def func2(a: str, *, b: int) -> None: ...
class Write1:
def write1(self, a: str) -> None: ...
class Write2:
def write(self, a: str, b: str) -> None: ...
class Write3:
def write(self, *, a: str) -> None: ...
np.seterrcall(func1) # E: Argument 1 to "seterrcall" has incompatible type
np.seterrcall(func2) # E: Argument 1 to "seterrcall" has incompatible type
np.seterrcall(Write1()) # E: Argument 1 to "seterrcall" has incompatible type
np.seterrcall(Write2()) # E: Argument 1 to "seterrcall" has incompatible type
np.seterrcall(Write3()) # E: Argument 1 to "seterrcall" has incompatible type
```
#### File: data/reveal/nbit_base_example.py
```python
from typing import TypeVar, Union
import numpy as np
import numpy.typing as npt
T = TypeVar("T", bound=npt.NBitBase)
def add(a: np.floating[T], b: np.integer[T]) -> np.floating[T]:
return a + b
i8: np.int64
i4: np.int32
f8: np.float64
f4: np.float32
reveal_type(add(f8, i8)) # E: numpy.floating[numpy.typing._64Bit]
reveal_type(add(f4, i8)) # E: numpy.floating[numpy.typing._64Bit]
reveal_type(add(f8, i4)) # E: numpy.floating[numpy.typing._64Bit]
reveal_type(add(f4, i4)) # E: numpy.floating[numpy.typing._32Bit]
```
#### File: site-packages/PIL/ImageTk.py
```python
import tkinter
from io import BytesIO
from . import Image
# --------------------------------------------------------------------
# Check for Tkinter interface hooks
_pilbitmap_ok = None
def _pilbitmap_check():
global _pilbitmap_ok
if _pilbitmap_ok is None:
try:
im = Image.new("1", (1, 1))
tkinter.BitmapImage(data=f"PIL:{im.im.id}")
_pilbitmap_ok = 1
except tkinter.TclError:
_pilbitmap_ok = 0
return _pilbitmap_ok
def _get_image_from_kw(kw):
source = None
if "file" in kw:
source = kw.pop("file")
elif "data" in kw:
source = BytesIO(kw.pop("data"))
if source:
return Image.open(source)
# --------------------------------------------------------------------
# PhotoImage
class PhotoImage:
"""
A Tkinter-compatible photo image. This can be used
everywhere Tkinter expects an image object. If the image is an RGBA
image, pixels having alpha 0 are treated as transparent.
The constructor takes either a PIL image, or a mode and a size.
Alternatively, you can use the ``file`` or ``data`` options to initialize
the photo image object.
:param image: Either a PIL image, or a mode string. If a mode string is
used, a size must also be given.
:param size: If the first argument is a mode string, this defines the size
of the image.
:keyword file: A filename to load the image from (using
``Image.open(file)``).
:keyword data: An 8-bit string containing image data (as loaded from an
image file).
"""
def __init__(self, image=None, size=None, **kw):
# Tk compatibility: file or data
if image is None:
image = _get_image_from_kw(kw)
if hasattr(image, "mode") and hasattr(image, "size"):
# got an image instead of a mode
mode = image.mode
if mode == "P":
# palette mapped data
image.load()
try:
mode = image.palette.mode
except AttributeError:
mode = "RGB" # default
size = image.size
kw["width"], kw["height"] = size
else:
mode = image
image = None
if mode not in ["1", "L", "RGB", "RGBA"]:
mode = Image.getmodebase(mode)
self.__mode = mode
self.__size = size
self.__photo = tkinter.PhotoImage(**kw)
self.tk = self.__photo.tk
if image:
self.paste(image)
def __del__(self):
name = self.__photo.name
self.__photo.name = None
try:
self.__photo.tk.call("image", "delete", name)
except Exception:
pass # ignore internal errors
def __str__(self):
"""
Get the Tkinter photo image identifier. This method is automatically
called by Tkinter whenever a PhotoImage object is passed to a Tkinter
method.
:return: A Tkinter photo image identifier (a string).
"""
return str(self.__photo)
def width(self):
"""
Get the width of the image.
:return: The width, in pixels.
"""
return self.__size[0]
def height(self):
"""
Get the height of the image.
:return: The height, in pixels.
"""
return self.__size[1]
def paste(self, im, box=None):
"""
Paste a PIL image into the photo image. Note that this can
be very slow if the photo image is displayed.
:param im: A PIL image. The size must match the target region. If the
mode does not match, the image is converted to the mode of
the bitmap image.
:param box: A 4-tuple defining the left, upper, right, and lower pixel
coordinate. See :ref:`coordinate-system`. If None is given
instead of a tuple, all of the image is assumed.
"""
# convert to blittable
im.load()
image = im.im
if image.isblock() and im.mode == self.__mode:
block = image
else:
block = image.new_block(self.__mode, im.size)
image.convert2(block, image) # convert directly between buffers
tk = self.__photo.tk
try:
tk.call("PyImagingPhoto", self.__photo, block.id)
except tkinter.TclError:
# activate Tkinter hook
try:
from . import _imagingtk
try:
if hasattr(tk, "interp"):
# Required for PyPy, which always has CFFI installed
from cffi import FFI
ffi = FFI()
# PyPy is using an FFI CDATA element
# (Pdb) self.tk.interp
# <cdata 'Tcl_Interp *' 0x3061b50>
_imagingtk.tkinit(int(ffi.cast("uintptr_t", tk.interp)), 1)
else:
_imagingtk.tkinit(tk.interpaddr(), 1)
except AttributeError:
_imagingtk.tkinit(id(tk), 0)
tk.call("PyImagingPhoto", self.__photo, block.id)
except (ImportError, AttributeError, tkinter.TclError):
raise # configuration problem; cannot attach to Tkinter
# --------------------------------------------------------------------
# BitmapImage
class BitmapImage:
"""
A Tkinter-compatible bitmap image. This can be used everywhere Tkinter
expects an image object.
The given image must have mode "1". Pixels having value 0 are treated as
transparent. Options, if any, are passed on to Tkinter. The most commonly
used option is ``foreground``, which is used to specify the color for the
non-transparent parts. See the Tkinter documentation for information on
how to specify colours.
:param image: A PIL image.
"""
def __init__(self, image=None, **kw):
# Tk compatibility: file or data
if image is None:
image = _get_image_from_kw(kw)
self.__mode = image.mode
self.__size = image.size
if _pilbitmap_check():
# fast way (requires the pilbitmap booster patch)
image.load()
kw["data"] = f"PIL:{image.im.id}"
self.__im = image # must keep a reference
else:
# slow but safe way
kw["data"] = image.tobitmap()
self.__photo = tkinter.BitmapImage(**kw)
def __del__(self):
name = self.__photo.name
self.__photo.name = None
try:
self.__photo.tk.call("image", "delete", name)
except Exception:
pass # ignore internal errors
def width(self):
"""
Get the width of the image.
:return: The width, in pixels.
"""
return self.__size[0]
def height(self):
"""
Get the height of the image.
:return: The height, in pixels.
"""
return self.__size[1]
def __str__(self):
"""
Get the Tkinter bitmap image identifier. This method is automatically
called by Tkinter whenever a BitmapImage object is passed to a Tkinter
method.
:return: A Tkinter bitmap image identifier (a string).
"""
return str(self.__photo)
def getimage(photo):
"""Copies the contents of a PhotoImage to a PIL image memory."""
im = Image.new("RGBA", (photo.width(), photo.height()))
block = im.im
photo.tk.call("PyImagingPhotoGet", photo, block.id)
return im
def _show(image, title):
"""Helper for the Image.show method."""
class UI(tkinter.Label):
def __init__(self, master, im):
if im.mode == "1":
self.image = BitmapImage(im, foreground="white", master=master)
else:
self.image = PhotoImage(im, master=master)
super().__init__(master, image=self.image, bg="black", bd=0)
if not tkinter._default_root:
raise OSError("tkinter not initialized")
top = tkinter.Toplevel()
if title:
top.title(title)
UI(top, image).pack()
```
#### File: _internal/cli/main_parser.py
```python
import os
import sys
from pip._internal.cli import cmdoptions
from pip._internal.cli.parser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip._internal.commands import commands_dict, get_similar_commands
from pip._internal.exceptions import CommandError
from pip._internal.utils.misc import get_pip_version, get_prog
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import List, Tuple
__all__ = ["create_main_parser", "parse_command"]
def create_main_parser():
# type: () -> ConfigOptionParser
"""Creates and returns the main parser for pip's CLI
"""
parser_kw = {
'usage': '\n%prog <command> [options]',
'add_help_option': False,
'formatter': UpdatingDefaultsHelpFormatter(),
'name': 'global',
'prog': get_prog(),
}
parser = ConfigOptionParser(**parser_kw)
parser.disable_interspersed_args()
parser.version = get_pip_version()
# add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
parser.add_option_group(gen_opts)
# so the help formatter knows
parser.main = True # type: ignore
# create command listing for description
description = [''] + [
'{name:27} {command_info.summary}'.format(**locals())
for name, command_info in commands_dict.items()
]
parser.description = '\n'.join(description)
return parser
def parse_command(args):
# type: (List[str]) -> Tuple[str, List[str]]
parser = create_main_parser()
# Note: parser calls disable_interspersed_args(), so the result of this
# call is to split the initial args into the general options before the
# subcommand and everything else.
# For example:
# args: ['--timeout=5', 'install', '--user', 'INITools']
# general_options: ['--timeout==5']
# args_else: ['install', '--user', 'INITools']
general_options, args_else = parser.parse_args(args)
# --version
if general_options.version:
sys.stdout.write(parser.version)
sys.stdout.write(os.linesep)
sys.exit()
# pip || pip help -> print_help()
if not args_else or (args_else[0] == 'help' and len(args_else) == 1):
parser.print_help()
sys.exit()
# the subcommand name
cmd_name = args_else[0]
if cmd_name not in commands_dict:
guess = get_similar_commands(cmd_name)
msg = [f'unknown command "{cmd_name}"']
if guess:
msg.append(f'maybe you meant "{guess}"')
raise CommandError(' - '.join(msg))
# all the args without the subcommand
cmd_args = args[:]
cmd_args.remove(cmd_name)
return cmd_name, cmd_args
```
#### File: Python_PornHubCrawler_JE/Models/HubVideo.py
```python
from bs4 import BeautifulSoup
import requests
class HubVideo():
def __init__(self):
self.Prefix='https://www.pornhub.com'
self.Target_url = [
'https://cn.pornhub.com/', #Main
'https://www.pornhub.com/video?o=ht', #GlobalHot
'https://www.pornhub.com/video?o=mv', #MostViewed
'https://www.pornhub.com/video?o=tr', #TopRated
'https://www.pornhub.com/video?p=homemade&o=tr', #Homemade
'https://www.pornhub.com/playlists',#Playlist
'https://www.pornhub.com/channels' #Channel
]
# ----------------------------------------------------------------------------------------------
#取得主頁影片
def Get_Main_Video(self):
rs = requests.session()
res = rs.get(self.Target_url[0])
soup = BeautifulSoup(res.text, 'html.parser')
Main = soup.select('ul.videos-morepad a.linkVideoThumb')
Total=''
print('------------------------國際熱門------------------------------------')
for index, data in enumerate(Main):
print(data['title'])
Total+=data['title']+'\n'
print(self.Prefix + data['href'])
Total += self.Prefix + data['href']+'\n'
print('------------------------國際熱門------------------------------------')
return Total
# ----------------------------------------------------------------------------------------------
#取得熱門撥放清單
def Get_Hot_Playlist(self):
Play_List_Title = []
Play_List_Href = []
rs = requests.session()
res = rs.get( self.Target_url[5])
soup = BeautifulSoup(res.text, 'html.parser')
Total=''
for index, data in enumerate(soup.select('ul.videos.user-playlist.playlist-listing li.full-width a.title')):
Play_List_Title.append(data['title'])
for index, data in enumerate(
soup.select('ul.videos.user-playlist.playlist-listing li.full-width a.playAllLink')):
Play_List_Href.append(data['href'])
for index in range(len(Play_List_Href)):
Total+=(Play_List_Title[index])+'\n'
Total+=(self.Prefix+Play_List_Href[index])+'\n'
return Total
# ----------------------------------------------------------------------------------------------
#取得頻道
def Get_Channel(self):
rs = requests.session()
res = rs.get('https://www.pornhub.com/channels')
soup = BeautifulSoup(res.text, 'html.parser')
Total=''
for index, data in enumerate(soup.select('div.container .clearfix.listChannelsWrapper ul.channelGridWrapper a.usernameLink')):
Total+=(data.string)+'\n'
Total+=('https://www.pornhub.com' + data['href'])+'\n'
return Total
# ----------------------------------------------------------------------------------------------
#取得通用影片用
def Get_Video(self,List_Index):
rs = requests.session()
res = rs.get(self.Target_url[List_Index])
soup = BeautifulSoup(res.text, 'html.parser')
Search = soup.select('ul#videoCategory a.linkVideoThumb')
Total = ''
for index, data in enumerate(Search):
print(data['title'])
Total += data['title'] + '\n'
print(self.Prefix + data['href'])
Total += self.Prefix + data['href'] + '\n'
return Total
# ----------------------------------------------------------------------------------------------
'''
全球熱門
'''
def Get_GlobalHot(self):
return self.Get_Video(1)
# ----------------------------------------------------------------------------------------------
'''
全球最多觀看
'''
def Get_MostViewed(self):
return self.Get_Video(2)
# ----------------------------------------------------------------------------------------------
'''
全球評分最高
'''
def Get_TopRated(self):
return self.Get_Video(3)
# ----------------------------------------------------------------------------------------------
'''
全球本周最佳自製
'''
def Get_Homemake(self):
return self.Get_Video(4)
# ----------------------------------------------------------------------------------------------
```
#### File: Python_Socket/Module/MainThread_Socket_Client.py
```python
import socket
class MainThread_Socket_Client:
def __init__(self,Port):
self.Socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.Host = socket.gethostname()
self.Port = Port
def Connect(self):
self.Socket.connect((self.Host,self.Port))
print(self.Socket.recv(1024))
self.Socket.close()
```
#### File: Python_Socket/Module/SocketServer_Server.py
```python
import socketserver
class TCP_SocketServer_Server(socketserver.StreamRequestHandler):
def handle(self) -> None:
Data = self.rfile.readline().strip()
print("{} wrote".format(self.client_address[0]))
print(Data)
self.wfile.write(Data.upper())
class UDP_SocketServer_Server(socketserver.BaseRequestHandler):
def handle(self) -> None:
Data = self.request[0].strip()
Socket = self.request[1]
print("{} wrote:".format(self.client_address[0]))
print(Data)
Socket.sendto(Data.upper(), self.client_address)
```
#### File: Python_SoundPlayer_JE/SoundPlayer_Core/SoundPlayerCore.py
```python
import datetime
from Models.TextToSpeech import TextToSpeech
from Models.Play_Sound import Play_Sound
class VoiceIOCore():
def __init__(self):
try:
self.TextToSpeech=TextToSpeech()
self.Play_Sound=Play_Sound()
except Exception as Errr:
print(Errr)
print(datetime.datetime.now(),self.__class__,'Reday',sep=' ')
```
#### File: Python_Translate_JE/Models/GoogleTransl.py
```python
from googletrans import Translator
class GoogleTransl():
def __init__(self):
self.TextTranslate=Translator()
#翻譯至繁體中文
def Translate(self,Text):
return self.TextTranslate.translate(Text,dest='zh-tw').text
```
#### File: SREFinalProjectWebBackend/APIs/RestfulAPI.py
```python
import os
from flask import Flask, session
from flask_cors import cross_origin
from APIs.APIBlueprints.Login import Login
from APIs.APIBlueprints.Register import Register
app = Flask(__name__)
app.register_blueprint(Login.Login)
app.register_blueprint(Register.Register)
app.secret_key = os.urandom(16)
'''
全部資料:GET + 名稱
特定資料:GET + 名稱 + id
新增一筆資料:POST + 名稱
修改特定資料:PUT + 名稱 + id
刪除特定資料:DELETE + 名稱 + id
'''
# 捕抓例外路徑
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def CatchAll(path):
return f'Path : {path} no exist'
@app.route(r'/', methods=['GET', 'POST'])
@cross_origin()
def MainPage():
if session.get("LoginState") == None:
return "Not Login"
return "Main"
if __name__ == "__main__":
app.run(debug=True)
```
#### File: SREFinalProjectWebBackend/Core/SQLite_Core.py
```python
import datetime
from Module.Sqlite_Control import Sqlite_Control
class SQLite_Core():
def __init__(self, DB_Name='test.db', Table_Name='Test'):
try:
self.Sqlite_Control = Sqlite_Control(DB_Name, Table_Name)
except Exception as Errr:
print(datetime.datetime.now(), 'I JE-Database Error', sep=' ')
raise Errr
self.Table_Name = Table_Name
self.Values_Count = 1
self.SQLite_Cursor = self.Sqlite_Control.cursor
self.SQLite_Connect = self.Sqlite_Control.connect
print(datetime.datetime.now(), self.__class__, 'Ready', sep=' ')
# ----------------------------------------------------------------------------------------------
def Set_Table_Name(self, Table_Name):
self.Table_Name = Table_Name
def Set_Value_Count(self, Value_Count):
self.Value_Count = Value_Count
self.Sqlite_Control.Value_Count = Value_Count
# ----------------------------------------------------------------------------------------------
def ValueError_Log(self, Print):
print(datetime.datetime.now(), 'I JE-Database Error', sep=' ')
raise ValueError(Print)
# ----------------------------------------------------------------------------------------------
# 創造一表
def Create_Table(self, SQL_Command):
self.Sqlite_Control.Create_Table(SQL_Command)
# ----------------------------------------------------------------------------------------------
# 插入語句是insert into 加表名(欄位名,欄位名) values(值,值)
def Insert_Into(self, *args, Field=None):
if Field == None:
if self.Values_Count == 1:
SQL_Command = '''INSERT INTO ''' + self.Table_Name + ''' VALUES (?)'''
else:
SQL_Command = '''INSERT INTO ''' + self.Table_Name + ''' VALUES (''' + '?,' * (
self.Values_Count - 1) + '?' + ''')'''
else:
if self.Values_Count == 1:
SQL_Command = '''INSERT INTO ''' + self.Table_Name + '''(''' + Field + ''') VALUES (?)'''
else:
SQL_Command = '''INSERT INTO ''' + self.Table_Name + '''(''' + Field + ''') VALUES (''' + '?,' * (
self.Values_Count - 1) + '?' + ''')'''
self.Sqlite_Control.Insert_Into(SQL_Command, args)
# ----------------------------------------------------------------------------------------------
# 如果有會忽略
def Insert_Into_Ignore(self, *args, Field=None):
if Field == None:
if self.Values_Count == 1:
SQL_Command = '''INSERT OR IGNORE INTO ''' + self.Table_Name + ''' VALUES (?)'''
else:
SQL_Command = '''INSERT OR IGNORE INTO ''' + self.Table_Name + ''' VALUES (''' + '?,' * (
self.Values_Count - 1) + '?' + ''')'''
else:
if self.Values_Count == 1:
SQL_Command = '''INSERT OR IGNORE INTO ''' + self.Table_Name + '''(''' + Field + ''') VALUES (?)'''
else:
SQL_Command = '''INSERT OR IGNORE INTO ''' + self.Table_Name + '''(''' + Field + ''') VALUES (''' + '?,' * (
self.Values_Count - 1) + '?' + ''')'''
self.Sqlite_Control.Insert_Into_Ignore(SQL_Command, args)
# ----------------------------------------------------------------------------------------------
# 如果有會取代掉
def Insert_Into_Replace(self, *args, Field=None):
if Field == None:
if self.Values_Count == 1:
SQL_Command = '''REPLACE INTO ''' + self.Table_Name + ''' VALUES (?)'''
else:
SQL_Command = '''REPLACE INTO ''' + self.Table_Name + ''' VALUES (''' + '?,' * (
self.Values_Count - 1) + '?' + ''')'''
else:
if self.Values_Count == 1:
SQL_Command = '''REPLACE INTO ''' + self.Table_Name + '''(''' + Field + ''')VALUES (?)'''
else:
SQL_Command = '''REPLACE INTO ''' + self.Table_Name + '''(''' + Field + ''') VALUES (''' + '?,' * (
self.Values_Count - 1) + '?' + ''')'''
self.Sqlite_Control.Insert_Into_Replace(SQL_Command, args)
# ----------------------------------------------------------------------------------------------
# 更新資料庫語句
def UPDATE(self, *args, Field, Where_What=None):
SQL_Command = '''UPDATE ''' + self.Table_Name + ''' SET ''' + Field + '''=? WHERE ''' + Where_What + '''=?'''
self.Sqlite_Control.UPDATE(SQL_Command, args)
# ----------------------------------------------------------------------------------------------
# 刪除語句 delete from 表名 where 範圍,不加where將會刪除整個表但是表的結構還存在就是相當於回到了剛剛建立表的時候
# SQL_Command= """DELETE FROM student WHERE id = 1;"""
def DELETE(self, Field, *args):
SQL_Command = '''DELETE FROM ''' + self.Table_Name + ''' WHERE ''' + Field + ''' =? '''
self.Sqlite_Control.DELETE(SQL_Command, args)
# ----------------------------------------------------------------------------------------------
# 查詢語句select 加欄位名 查詢表中欄位的資訊 加* 查詢所有的資訊 from 表名
# SQL_Command="""SELECT id,name from student;"""
def Select_From(self, *args):
if self.Values_Count == 1:
SQL_Command = '''SELECT ? FROM ''' + self.Table_Name
return self.Sqlite_Control.Select_From(SQL_Command, args)
else:
SQL_Command = '''SELECT ''' + '?,' * (self.Values_Count - 1) + '?' + ''' FROM ''' + self.Table_Name
return self.Sqlite_Control.Select_From(SQL_Command, args)
# ----------------------------------------------------------------------------------------------
# 找出表格不同的值
def Select_Distinct(self, *args):
if self.Values_Count == 1:
SQL_Command = '''SELECT DISTINCT ? FROM ''' + self.Table_Name
return self.Sqlite_Control.Select_Distinct(SQL_Command, args)
else:
SQL_Command = '''SELECT DISTINCT''' + '?,' * (self.Values_Count - 1) + '?' + ''' FROM ''' + self.Table_Name
return self.Sqlite_Control.Select_Distinct(SQL_Command, args)
# ----------------------------------------------------------------------------------------------
# select * from 表名 where 加上條件,不加的話就是查詢所有
# SQL_Command= """SELECT * FROM student WHERE name="小明";"""
def Select_Where(self, Field, *args):
if self.Values_Count == 1:
SQL_Command = '''SELECT ? FROM ''' + self.Table_Name + ''' WHERE ''' + Field + '''=?'''
return self.Sqlite_Control.Select_Where(Field, SQL_Command, args)
else:
SQL_Command = '''SELECT ''' + '?,' * (
self.Values_Count - 1) + '?' + ''' FROM ''' + self.Table_Name + ''' WHERE ''' + Field + '''=?'''
return self.Sqlite_Control.Select_Where(Field, SQL_Command, args)
# ----------------------------------------------------------------------------------------------
# 回滾到上一次commit
def Rollback(self):
self.Sqlite_Control.Rollback()
# ----------------------------------------------------------------------------------------------
# 丟棄表
# SQL_Command="""DROP TABLE student;"""
def Drop(self):
SQL_Command = '''DROP TABLE ''' + self.Table_Name + '''=?''' # 丟棄表(此操作比delete更加嚴重,會刪除表的結構)drop table 加上表名
self.Sqlite_Control.Drop(SQL_Command, self.Table_Name)
# ----------------------------------------------------------------------------------------------
# 關閉
def Close(self):
self.Sqlite_Control.Close()
```
#### File: SREFinalProjectWebBackend/Module/Gmail_API.py
```python
import base64
import logging
# Import the email modules we'll need
import mimetypes
import os
import os.path
import pickle
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from google.auth.transport.requests import Request
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient import errors
from googleapiclient.discovery import build
logging.getLogger('googleapiclient.discovery_cache').setLevel(logging.ERROR)
class Gmail_API():
def Get_Service(self):
"""Gets an authorized Gmail API service instance.
Returns:
An authorized Gmail API service instance..
"""
# If modifying these scopes, delete the file token.pickle.
SCOPES = [
'https://www.googleapis.com/auth/gmail.readonly',
'https://www.googleapis.com/auth/gmail.send',
]
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
r'../client_secret.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('gmail', 'v1', credentials=creds)
return service
def Want_Send_Message(self, service, sender, message):
"""Send an email message.
Args:
service: Authorized Gmail API service instance.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
message: Message to be sent.
Returns:
Sent Message.
"""
try:
sent_message = (service.users().messages().send(userId=sender, body=message)
.execute())
logging.info('Message Id: %s', sent_message['id'])
return sent_message
except errors.HttpError as error:
logging.error('An HTTP error occurred: %s', error)
def Create_Message(self, sender, to, subject, message_text, Use_Html=False):
"""Create a message for an email.
Args:
sender: Email address of the sender.
to: Email address of the receiver.
subject: The subject of the email message.
message_text: The text of the email message.
Returns:
An object containing a base64url encoded email object.
"""
if Use_Html:
message = MIMEText(message_text, 'html')
else:
message = MIMEText(message_text)
message['to'] = to
message['from'] = sender
message['subject'] = subject
s = message.as_string()
b = base64.urlsafe_b64encode(s.encode('utf-8'))
return {'raw': b.decode('utf-8')}
def Create_Message_With_Attachment(self, sender, to, subject, message_text, file, Use_Html=False):
"""Create a message for an email.
Args:
sender: Email address of the sender.
to: Email address of the receiver.
subject: The subject of the email message.
message_text: The text of the email message.
file: The path to the file to be attached.
Returns:
An object containing a base64url encoded email object.
"""
message = MIMEMultipart()
message['to'] = to
message['from'] = sender
message['subject'] = subject
if Use_Html:
msg = MIMEText(message_text, 'html')
else:
msg = MIMEText(message_text)
message.attach(msg)
content_type, encoding = mimetypes.guess_type(file)
if content_type is None or encoding is not None:
content_type = 'application/octet-stream'
main_type, sub_type = content_type.split('/', 1)
if main_type == 'text':
fp = open(file, 'rb')
msg = MIMEText(fp.read(), _subtype=sub_type)
fp.close()
elif main_type == 'image':
fp = open(file, 'rb')
msg = MIMEImage(fp.read(), _subtype=sub_type)
fp.close()
elif main_type == 'audio':
fp = open(file, 'rb')
msg = MIMEAudio(fp.read(), _subtype=sub_type)
fp.close()
else:
fp = open(file, 'rb')
msg = MIMEBase(main_type, sub_type)
msg.set_payload(fp.read())
fp.close()
filename = os.path.basename(file)
msg.add_header('Content-Disposition', 'attachment', filename=filename)
message.attach(msg)
return {'raw': base64.urlsafe_b64encode(message.as_string().encode()).decode()}
def Send_Mail_Basic(self, From="Mail_Address", To="Mail_Address", Subject="Test subject", Body="Test body",
UseHTML=False):
logging.basicConfig(
format="[%(levelname)s] %(message)s",
level=logging.INFO
)
try:
service = self.Get_Service()
message = self.Create_Message(From, To, Subject, Body, Use_Html=UseHTML)
self.Want_Send_Message(service, From, message)
except Exception as e:
logging.error(e)
raise
def Send_Mail_Attach(self, From="Mail_Address", To="Mail_Address", Subject="Test subject", Body="Test body",
Attach_File='File_Path', UseHTML=False):
logging.basicConfig(
format="[%(levelname)s] %(message)s",
level=logging.INFO
)
try:
service = self.Get_Service()
# param From,To,Subject,Body,Attach_File
message = self.Create_Message_With_Attachment(From, To, Subject, Body, Attach_File, Use_Html=UseHTML)
# Service Sender,Message
self.Want_Send_Message(service, From, message)
except Exception as e:
logging.error(e)
raise
```
#### File: je_old_repo/TaiwanMapMarkerDockerServer_JE/websocket_server.py
```python
from je_websocket import websocket_server
from taiwan_map_marker.resource import create_database
from taiwan_map_marker.resource import marker_sql
create_database()
async def pre_process(websocket, message):
process_string = message.split(" ")
if process_string[0] == "select":
result = marker_sql.select_form()
for i in range(len(result)):
await websocket.send(str(result[i]))
await websocket.send("data done")
elif process_string[0] == "insert":
marker_sql.insert_into(None,
process_string[1],
process_string[2],
process_string[3],
process_string[4])
elif process_string[0] == "exit":
await websocket.close()
elif process_string[0] == "ping":
await websocket.send("pong")
server = websocket_server("websocket_server", 30005, pre_process, pre_process=True)
```
#### File: je_old_repo/TaiwanWeatherCrawlerDockerServer_JE/websocket_server.py
```python
import sys
from je_websocket import websocket_server
from taiwan_weather_crawler.resource import create_database
from taiwan_weather_crawler.resource import get_crawler_F_C0032_005_data
from taiwan_weather_crawler.resource import get_crawler_W_C0033_001_data
from taiwan_weather_crawler.resource import scheduler
from taiwan_weather_crawler.resource import sql_hazard_weather
from taiwan_weather_crawler.resource import sql_one_weak_weather
job = scheduler.add_job(get_crawler_F_C0032_005_data, "interval", minutes=60)
job1 = scheduler.add_job(get_crawler_W_C0033_001_data, "interval", minutes=60)
create_database()
get_crawler_F_C0032_005_data()
get_crawler_W_C0033_001_data()
scheduler.start()
async def pre_process(websocket, message):
process_string = message.split(" ")
print("processed message", process_string)
if process_string[0] == "select":
if process_string[1] == "one_week_weather":
result = sql_one_weak_weather.select_where('LocationName', process_string[2])
for i in range(len(result)):
await websocket.send(str(result[i]))
elif process_string[1] == "hazard_weather":
result = sql_hazard_weather.select_where('LocationName', process_string[2])
for i in range(len(result)):
await websocket.send(str(result[i]))
elif process_string[0] == "selectAll":
if process_string[1] == "one_week_weather":
result = sql_one_weak_weather.select_form()
for i in range(len(result)):
await websocket.send(str(result[i]))
await websocket.send("data done")
elif process_string[1] == "hazard_weather":
result = sql_hazard_weather.select_form()
for i in range(len(result)):
await websocket.send(str(result[i]))
await websocket.send("data done")
elif process_string[0] == "exit":
print("Connection is closed", websocket, sep="\t")
await websocket.close()
elif process_string[0] == "ping":
await websocket.send("pong")
else:
print("Unknown command", process_string, file=sys.stderr)
server = websocket_server("websocket_server", 30003, pre_process, pre_process=True)
```
#### File: rst/directives/misc.py
```python
__docformat__ = 'reStructuredText'
import sys
import os.path
import re
import time
from docutils import io, nodes, statemachine, utils
from docutils.utils.error_reporting import SafeString, ErrorString
from docutils.utils.error_reporting import locale_encoding
from docutils.parsers.rst import Directive, convert_directive_function
from docutils.parsers.rst import directives, roles, states
from docutils.parsers.rst.directives.body import CodeBlock, NumberLines
from docutils.parsers.rst.roles import set_classes
from docutils.transforms import misc
class Include(Directive):
"""
Include content read from a separate source file.
Content may be parsed by the parser, or included as a literal
block. The encoding of the included file can be specified. Only
a part of the given file argument may be included by specifying
start and end line or text to match before and/or after the text
to be used.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'literal': directives.flag,
'code': directives.unchanged,
'encoding': directives.encoding,
'parser': directives.parser_name,
'tab-width': int,
'start-line': int,
'end-line': int,
'start-after': directives.unchanged_required,
'end-before': directives.unchanged_required,
# ignored except for 'literal' or 'code':
'number-lines': directives.unchanged, # integer or None
'class': directives.class_option,
'name': directives.unchanged}
standard_include_path = os.path.join(os.path.dirname(states.__file__),
'include')
def run(self):
"""Include a file as part of the content of this reST file."""
if not self.state.document.settings.file_insertion_enabled:
raise self.warning('"%s" directive disabled.' % self.name)
source = self.state_machine.input_lines.source(
self.lineno - self.state_machine.input_offset - 1)
source_dir = os.path.dirname(os.path.abspath(source))
path = directives.path(self.arguments[0])
if path.startswith('<') and path.endswith('>'):
path = os.path.join(self.standard_include_path, path[1:-1])
path = os.path.normpath(os.path.join(source_dir, path))
path = utils.relative_path(None, path)
path = nodes.reprunicode(path)
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
e_handler=self.state.document.settings.input_encoding_error_handler
tab_width = self.options.get(
'tab-width', self.state.document.settings.tab_width)
try:
self.state.document.settings.record_dependencies.add(path)
include_file = io.FileInput(source_path=path,
encoding=encoding,
error_handler=e_handler)
except UnicodeEncodeError as error:
raise self.severe(u'Problems with "%s" directive path:\n'
'Cannot encode input file path "%s" '
'(wrong locale?).' %
(self.name, SafeString(path)))
except IOError as error:
raise self.severe(u'Problems with "%s" directive path:\n%s.' %
(self.name, ErrorString(error)))
# Get to-be-included content
startline = self.options.get('start-line', None)
endline = self.options.get('end-line', None)
try:
if startline or (endline is not None):
lines = include_file.readlines()
rawtext = ''.join(lines[startline:endline])
else:
rawtext = include_file.read()
except UnicodeError as error:
raise self.severe(u'Problem with "%s" directive:\n%s' %
(self.name, ErrorString(error)))
# start-after/end-before: no restrictions on newlines in match-text,
# and no restrictions on matching inside lines vs. line boundaries
after_text = self.options.get('start-after', None)
if after_text:
# skip content in rawtext before *and incl.* a matching text
after_index = rawtext.find(after_text)
if after_index < 0:
raise self.severe('Problem with "start-after" option of "%s" '
'directive:\nText not found.' % self.name)
rawtext = rawtext[after_index + len(after_text):]
before_text = self.options.get('end-before', None)
if before_text:
# skip content in rawtext after *and incl.* a matching text
before_index = rawtext.find(before_text)
if before_index < 0:
raise self.severe('Problem with "end-before" option of "%s" '
'directive:\nText not found.' % self.name)
rawtext = rawtext[:before_index]
include_lines = statemachine.string2lines(rawtext, tab_width,
convert_whitespace=True)
for i, line in enumerate(include_lines):
if len(line) > self.state.document.settings.line_length_limit:
raise self.warning('"%s": line %d exceeds the'
' line-length-limit.' % (path, i+1))
if 'literal' in self.options:
# Don't convert tabs to spaces, if `tab_width` is negative.
if tab_width >= 0:
text = rawtext.expandtabs(tab_width)
else:
text = rawtext
literal_block = nodes.literal_block(rawtext, source=path,
classes=self.options.get('class', []))
literal_block.line = 1
self.add_name(literal_block)
if 'number-lines' in self.options:
try:
startline = int(self.options['number-lines'] or 1)
except ValueError:
raise self.error(':number-lines: with non-integer '
'start value')
endline = startline + len(include_lines)
if text.endswith('\n'):
text = text[:-1]
tokens = NumberLines([([], text)], startline, endline)
for classes, value in tokens:
if classes:
literal_block += nodes.inline(value, value,
classes=classes)
else:
literal_block += nodes.Text(value)
else:
literal_block += nodes.Text(text)
return [literal_block]
if 'code' in self.options:
self.options['source'] = path
# Don't convert tabs to spaces, if `tab_width` is negative:
if tab_width < 0:
include_lines = rawtext.splitlines()
codeblock = CodeBlock(self.name,
[self.options.pop('code')], # arguments
self.options,
include_lines, # content
self.lineno,
self.content_offset,
self.block_text,
self.state,
self.state_machine)
return codeblock.run()
if 'parser' in self.options:
parser = self.options['parser']()
# parse into a new (dummy) document
document = utils.new_document(path, self.state.document.settings)
parser.parse('\n'.join(include_lines), document)
return document.children
# include as rST source
#
# Prevent circular inclusion:
source = utils.relative_path(None, source)
clip_options = (startline, endline, before_text, after_text)
include_log = self.state.document.include_log
if not include_log: # new document:
# log entries: (<source>, <clip-options>, <insertion end index>)
include_log = [(source, (None,None,None,None), sys.maxsize/2)]
# cleanup: we may have passed the last inclusion(s):
include_log = [entry for entry in include_log
if entry[2] >= self.lineno]
if (path, clip_options) in [(pth, opt)
for (pth, opt, e) in include_log]:
raise self.warning('circular inclusion in "%s" directive: %s'
% (self.name, ' < '.join([path] + [pth for (pth, opt, e)
in include_log[::-1]])))
# include as input
self.state_machine.insert_input(include_lines, path)
# update include-log
include_log.append((path, clip_options, self.lineno))
self.state.document.include_log = [(pth, opt, e+len(include_lines)+2)
for (pth, opt, e) in include_log]
return []
class Raw(Directive):
"""
Pass through content unchanged
Content is included in output based on type argument
Content may be included inline (content section of directive) or
imported from a file or url.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'file': directives.path,
'url': directives.uri,
'encoding': directives.encoding}
has_content = True
def run(self):
if (not self.state.document.settings.raw_enabled
or (not self.state.document.settings.file_insertion_enabled
and ('file' in self.options
or 'url' in self.options))):
raise self.warning('"%s" directive disabled.' % self.name)
attributes = {'format': ' '.join(self.arguments[0].lower().split())}
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
e_handler=self.state.document.settings.input_encoding_error_handler
if self.content:
if 'file' in self.options or 'url' in self.options:
raise self.error(
'"%s" directive may not both specify an external file '
'and have content.' % self.name)
text = '\n'.join(self.content)
elif 'file' in self.options:
if 'url' in self.options:
raise self.error(
'The "file" and "url" options may not be simultaneously '
'specified for the "%s" directive.' % self.name)
source_dir = os.path.dirname(
os.path.abspath(self.state.document.current_source))
path = os.path.normpath(os.path.join(source_dir,
self.options['file']))
path = utils.relative_path(None, path)
try:
raw_file = io.FileInput(source_path=path,
encoding=encoding,
error_handler=e_handler)
# TODO: currently, raw input files are recorded as
# dependencies even if not used for the chosen output format.
self.state.document.settings.record_dependencies.add(path)
except IOError as error:
raise self.severe(u'Problems with "%s" directive path:\n%s.'
% (self.name, ErrorString(error)))
try:
text = raw_file.read()
except UnicodeError as error:
raise self.severe(u'Problem with "%s" directive:\n%s'
% (self.name, ErrorString(error)))
attributes['source'] = path
elif 'url' in self.options:
source = self.options['url']
# Do not import urllib2 at the top of the module because
# it may fail due to broken SSL dependencies, and it takes
# about 0.15 seconds to load.
if sys.version_info >= (3, 0):
from urllib.request import urlopen
from urllib.error import URLError
else:
from urllib2 import urlopen, URLError
try:
raw_text = urlopen(source).read()
except (URLError, IOError, OSError) as error:
raise self.severe(u'Problems with "%s" directive URL "%s":\n%s.'
% (self.name, self.options['url'], ErrorString(error)))
raw_file = io.StringInput(source=raw_text, source_path=source,
encoding=encoding,
error_handler=e_handler)
try:
text = raw_file.read()
except UnicodeError as error:
raise self.severe(u'Problem with "%s" directive:\n%s'
% (self.name, ErrorString(error)))
attributes['source'] = source
else:
# This will always fail because there is no content.
self.assert_has_content()
raw_node = nodes.raw('', text, **attributes)
(raw_node.source,
raw_node.line) = self.state_machine.get_source_and_line(self.lineno)
return [raw_node]
class Replace(Directive):
has_content = True
def run(self):
if not isinstance(self.state, states.SubstitutionDef):
raise self.error(
'Invalid context: the "%s" directive can only be used within '
'a substitution definition.' % self.name)
self.assert_has_content()
text = '\n'.join(self.content)
element = nodes.Element(text)
self.state.nested_parse(self.content, self.content_offset,
element)
# element might contain [paragraph] + system_message(s)
node = None
messages = []
for elem in element:
if not node and isinstance(elem, nodes.paragraph):
node = elem
elif isinstance(elem, nodes.system_message):
elem['backrefs'] = []
messages.append(elem)
else:
return [
self.state_machine.reporter.error(
'Error in "%s" directive: may contain a single paragraph '
'only.' % (self.name), line=self.lineno) ]
if node:
return messages + node.children
return messages
class Unicode(Directive):
r"""
Convert Unicode character codes (numbers) to characters. Codes may be
decimal numbers, hexadecimal numbers (prefixed by ``0x``, ``x``, ``\x``,
``U+``, ``u``, or ``\u``; e.g. ``U+262E``), or XML-style numeric character
entities (e.g. ``☮``). Text following ".." is a comment and is
ignored. Spaces are ignored, and any other text remains as-is.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'trim': directives.flag,
'ltrim': directives.flag,
'rtrim': directives.flag}
comment_pattern = re.compile(r'( |\n|^)\.\. ')
def run(self):
if not isinstance(self.state, states.SubstitutionDef):
raise self.error(
'Invalid context: the "%s" directive can only be used within '
'a substitution definition.' % self.name)
substitution_definition = self.state_machine.node
if 'trim' in self.options:
substitution_definition.attributes['ltrim'] = 1
substitution_definition.attributes['rtrim'] = 1
if 'ltrim' in self.options:
substitution_definition.attributes['ltrim'] = 1
if 'rtrim' in self.options:
substitution_definition.attributes['rtrim'] = 1
codes = self.comment_pattern.split(self.arguments[0])[0].split()
element = nodes.Element()
for code in codes:
try:
decoded = directives.unicode_code(code)
except ValueError as error:
raise self.error(u'Invalid character code: %s\n%s'
% (code, ErrorString(error)))
element += nodes.Text(decoded)
return element.children
class Class(Directive):
"""
Set a "class" attribute on the directive content or the next element.
When applied to the next element, a "pending" element is inserted, and a
transform does the work later.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
has_content = True
def run(self):
try:
class_value = directives.class_option(self.arguments[0])
except ValueError:
raise self.error(
'Invalid class attribute value for "%s" directive: "%s".'
% (self.name, self.arguments[0]))
node_list = []
if self.content:
container = nodes.Element()
self.state.nested_parse(self.content, self.content_offset,
container)
for node in container:
node['classes'].extend(class_value)
node_list.extend(container.children)
else:
pending = nodes.pending(
misc.ClassAttribute,
{'class': class_value, 'directive': self.name},
self.block_text)
self.state_machine.document.note_pending(pending)
node_list.append(pending)
return node_list
class Role(Directive):
has_content = True
argument_pattern = re.compile(r'(%s)\s*(\(\s*(%s)\s*\)\s*)?$'
% ((states.Inliner.simplename,) * 2))
def run(self):
"""Dynamically create and register a custom interpreted text role."""
if self.content_offset > self.lineno or not self.content:
raise self.error('"%s" directive requires arguments on the first '
'line.' % self.name)
args = self.content[0]
match = self.argument_pattern.match(args)
if not match:
raise self.error('"%s" directive arguments not valid role names: '
'"%s".' % (self.name, args))
new_role_name = match.group(1)
base_role_name = match.group(3)
messages = []
if base_role_name:
base_role, messages = roles.role(
base_role_name, self.state_machine.language, self.lineno,
self.state.reporter)
if base_role is None:
error = self.state.reporter.error(
'Unknown interpreted text role "%s".' % base_role_name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return messages + [error]
else:
base_role = roles.generic_custom_role
assert not hasattr(base_role, 'arguments'), (
'Supplemental directive arguments for "%s" directive not '
'supported (specified by "%r" role).' % (self.name, base_role))
try:
converted_role = convert_directive_function(base_role)
(arguments, options, content, content_offset) = (
self.state.parse_directive_block(
self.content[1:], self.content_offset, converted_role,
option_presets={}))
except states.MarkupError as detail:
error = self.state_machine.reporter.error(
'Error in "%s" directive:\n%s.' % (self.name, detail),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return messages + [error]
if 'class' not in options:
try:
options['class'] = directives.class_option(new_role_name)
except ValueError as detail:
error = self.state_machine.reporter.error(
u'Invalid argument for "%s" directive:\n%s.'
% (self.name, SafeString(detail)), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return messages + [error]
role = roles.CustomRole(new_role_name, base_role, options, content)
roles.register_local_role(new_role_name, role)
return messages
class DefaultRole(Directive):
"""Set the default interpreted text role."""
optional_arguments = 1
final_argument_whitespace = False
def run(self):
if not self.arguments:
if '' in roles._roles:
# restore the "default" default role
del roles._roles['']
return []
role_name = self.arguments[0]
role, messages = roles.role(role_name, self.state_machine.language,
self.lineno, self.state.reporter)
if role is None:
error = self.state.reporter.error(
'Unknown interpreted text role "%s".' % role_name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return messages + [error]
roles._roles[''] = role
return messages
class Title(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
def run(self):
self.state_machine.document['title'] = self.arguments[0]
return []
class Date(Directive):
has_content = True
def run(self):
if not isinstance(self.state, states.SubstitutionDef):
raise self.error(
'Invalid context: the "%s" directive can only be used within '
'a substitution definition.' % self.name)
format_str = '\n'.join(self.content) or '%Y-%m-%d'
if sys.version_info< (3, 0):
try:
format_str = format_str.encode(locale_encoding or 'utf-8')
except UnicodeEncodeError:
raise self.warning(u'Cannot encode date format string '
u'with locale encoding "%s".' % locale_encoding)
# @@@
# Use timestamp from the `SOURCE_DATE_EPOCH`_ environment variable?
# Pro: Docutils-generated documentation
# can easily be part of `reproducible software builds`__
#
# __ https://reproducible-builds.org/
#
# Con: Changes the specs, hard to predict behaviour,
#
# See also the discussion about \date \time \year in TeX
# http://tug.org/pipermail/tex-k/2016-May/002704.html
# source_date_epoch = os.environ.get('SOURCE_DATE_EPOCH')
# if (source_date_epoch):
# text = time.strftime(format_str,
# time.gmtime(int(source_date_epoch)))
# else:
text = time.strftime(format_str)
if sys.version_info< (3, 0):
# `text` is a byte string that may contain non-ASCII characters:
try:
text = text.decode(locale_encoding or 'utf-8')
except UnicodeDecodeError:
text = text.decode(locale_encoding or 'utf-8', 'replace')
raise self.warning(u'Error decoding "%s"'
u'with locale encoding "%s".' % (text, locale_encoding))
return [nodes.Text(text)]
class TestDirective(Directive):
"""This directive is useful only for testing purposes."""
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'option': directives.unchanged_required}
has_content = True
def run(self):
if self.content:
text = '\n'.join(self.content)
info = self.state_machine.reporter.info(
'Directive processed. Type="%s", arguments=%r, options=%r, '
'content:' % (self.name, self.arguments, self.options),
nodes.literal_block(text, text), line=self.lineno)
else:
info = self.state_machine.reporter.info(
'Directive processed. Type="%s", arguments=%r, options=%r, '
'content: None' % (self.name, self.arguments, self.options),
line=self.lineno)
return [info]
# Old-style, functional definition:
#
# def directive_test_function(name, arguments, options, content, lineno,
# content_offset, block_text, state, state_machine):
# """This directive is useful only for testing purposes."""
# if content:
# text = '\n'.join(content)
# info = state_machine.reporter.info(
# 'Directive processed. Type="%s", arguments=%r, options=%r, '
# 'content:' % (name, arguments, options),
# nodes.literal_block(text, text), line=lineno)
# else:
# info = state_machine.reporter.info(
# 'Directive processed. Type="%s", arguments=%r, options=%r, '
# 'content: None' % (name, arguments, options), line=lineno)
# return [info]
#
# directive_test_function.arguments = (0, 1, 1)
# directive_test_function.options = {'option': directives.unchanged_required}
# directive_test_function.content = 1
```
#### File: _internal/cli/progress_bars.py
```python
import itertools
import sys
from signal import SIGINT, default_int_handler, signal
from typing import Any, Dict, List
from pip._vendor.progress.bar import Bar, FillingCirclesBar, IncrementalBar
from pip._vendor.progress.spinner import Spinner
from pip._internal.utils.compat import WINDOWS
from pip._internal.utils.logging import get_indentation
from pip._internal.utils.misc import format_size
try:
from pip._vendor import colorama
# Lots of different errors can come from this, including SystemError and
# ImportError.
except Exception:
colorama = None
def _select_progress_class(preferred, fallback):
# type: (Bar, Bar) -> Bar
encoding = getattr(preferred.file, "encoding", None)
# If we don't know what encoding this file is in, then we'll just assume
# that it doesn't support unicode and use the ASCII bar.
if not encoding:
return fallback
# Collect all of the possible characters we want to use with the preferred
# bar.
characters = [
getattr(preferred, "empty_fill", ""),
getattr(preferred, "fill", ""),
]
characters += list(getattr(preferred, "phases", []))
# Try to decode the characters we're using for the bar using the encoding
# of the given file, if this works then we'll assume that we can use the
# fancier bar and if not we'll fall back to the plaintext bar.
try:
"".join(characters).encode(encoding)
except UnicodeEncodeError:
return fallback
else:
return preferred
_BaseBar = _select_progress_class(IncrementalBar, Bar) # type: Any
class InterruptibleMixin:
"""
Helper to ensure that self.finish() gets called on keyboard interrupt.
This allows downloads to be interrupted without leaving temporary state
(like hidden cursors) behind.
This class is similar to the progress library's existing SigIntMixin
helper, but as of version 1.2, that helper has the following problems:
1. It calls sys.exit().
2. It discards the existing SIGINT handler completely.
3. It leaves its own handler in place even after an uninterrupted finish,
which will have unexpected delayed effects if the user triggers an
unrelated keyboard interrupt some time after a progress-displaying
download has already completed, for example.
"""
def __init__(self, *args, **kwargs):
# type: (List[Any], Dict[Any, Any]) -> None
"""
Save the original SIGINT handler for later.
"""
# https://github.com/python/mypy/issues/5887
super().__init__(*args, **kwargs) # type: ignore
self.original_handler = signal(SIGINT, self.handle_sigint)
# If signal() returns None, the previous handler was not installed from
# Python, and we cannot restore it. This probably should not happen,
# but if it does, we must restore something sensible instead, at least.
# The least bad option should be Python's default SIGINT handler, which
# just raises KeyboardInterrupt.
if self.original_handler is None:
self.original_handler = default_int_handler
def finish(self):
# type: () -> None
"""
Restore the original SIGINT handler after finishing.
This should happen regardless of whether the progress display finishes
normally, or gets interrupted.
"""
super().finish() # type: ignore
signal(SIGINT, self.original_handler)
def handle_sigint(self, signum, frame): # type: ignore
"""
Call self.finish() before delegating to the original SIGINT handler.
This handler should only be in place while the progress display is
active.
"""
self.finish()
self.original_handler(signum, frame)
class SilentBar(Bar):
def update(self):
# type: () -> None
pass
class BlueEmojiBar(IncrementalBar):
suffix = "%(percent)d%%"
bar_prefix = " "
bar_suffix = " "
phases = ("\U0001F539", "\U0001F537", "\U0001F535")
class DownloadProgressMixin:
def __init__(self, *args, **kwargs):
# type: (List[Any], Dict[Any, Any]) -> None
# https://github.com/python/mypy/issues/5887
super().__init__(*args, **kwargs) # type: ignore
self.message = (" " * (get_indentation() + 2)) + self.message # type: str
@property
def downloaded(self):
# type: () -> str
return format_size(self.index) # type: ignore
@property
def download_speed(self):
# type: () -> str
# Avoid zero division errors...
if self.avg == 0.0: # type: ignore
return "..."
return format_size(1 / self.avg) + "/s" # type: ignore
@property
def pretty_eta(self):
# type: () -> str
if self.eta: # type: ignore
return f"eta {self.eta_td}" # type: ignore
return ""
def iter(self, it): # type: ignore
for x in it:
yield x
# B305 is incorrectly raised here
# https://github.com/PyCQA/flake8-bugbear/issues/59
self.next(len(x)) # noqa: B305
self.finish()
class WindowsMixin:
def __init__(self, *args, **kwargs):
# type: (List[Any], Dict[Any, Any]) -> None
# The Windows terminal does not support the hide/show cursor ANSI codes
# even with colorama. So we'll ensure that hide_cursor is False on
# Windows.
# This call needs to go before the super() call, so that hide_cursor
# is set in time. The base progress bar class writes the "hide cursor"
# code to the terminal in its init, so if we don't set this soon
# enough, we get a "hide" with no corresponding "show"...
if WINDOWS and self.hide_cursor: # type: ignore
self.hide_cursor = False
# https://github.com/python/mypy/issues/5887
super().__init__(*args, **kwargs) # type: ignore
# Check if we are running on Windows and we have the colorama module,
# if we do then wrap our file with it.
if WINDOWS and colorama:
self.file = colorama.AnsiToWin32(self.file) # type: ignore
# The progress code expects to be able to call self.file.isatty()
# but the colorama.AnsiToWin32() object doesn't have that, so we'll
# add it.
self.file.isatty = lambda: self.file.wrapped.isatty()
# The progress code expects to be able to call self.file.flush()
# but the colorama.AnsiToWin32() object doesn't have that, so we'll
# add it.
self.file.flush = lambda: self.file.wrapped.flush()
class BaseDownloadProgressBar(WindowsMixin, InterruptibleMixin, DownloadProgressMixin):
file = sys.stdout
message = "%(percent)d%%"
suffix = "%(downloaded)s %(download_speed)s %(pretty_eta)s"
class DefaultDownloadProgressBar(BaseDownloadProgressBar, _BaseBar):
pass
class DownloadSilentBar(BaseDownloadProgressBar, SilentBar):
pass
class DownloadBar(BaseDownloadProgressBar, Bar):
pass
class DownloadFillingCirclesBar(BaseDownloadProgressBar, FillingCirclesBar):
pass
class DownloadBlueEmojiProgressBar(BaseDownloadProgressBar, BlueEmojiBar):
pass
class DownloadProgressSpinner(
WindowsMixin, InterruptibleMixin, DownloadProgressMixin, Spinner
):
file = sys.stdout
suffix = "%(downloaded)s %(download_speed)s"
def next_phase(self):
# type: () -> str
if not hasattr(self, "_phaser"):
self._phaser = itertools.cycle(self.phases)
return next(self._phaser)
def update(self):
# type: () -> None
message = self.message % self
phase = self.next_phase()
suffix = self.suffix % self
line = "".join(
[
message,
" " if message else "",
phase,
" " if suffix else "",
suffix,
]
)
self.writeln(line)
BAR_TYPES = {
"off": (DownloadSilentBar, DownloadSilentBar),
"on": (DefaultDownloadProgressBar, DownloadProgressSpinner),
"ascii": (DownloadBar, DownloadProgressSpinner),
"pretty": (DownloadFillingCirclesBar, DownloadProgressSpinner),
"emoji": (DownloadBlueEmojiProgressBar, DownloadProgressSpinner),
}
def DownloadProgressProvider(progress_bar, max=None): # type: ignore
if max is None or max == 0:
return BAR_TYPES[progress_bar][1]().iter
else:
return BAR_TYPES[progress_bar][0](max=max).iter
```
#### File: _internal/network/utils.py
```python
from typing import Dict, Iterator
from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response
from pip._internal.exceptions import NetworkConnectionError
# The following comments and HTTP headers were originally added by
# <NAME> in git commit 22c562429a61bb77172039e480873fb239dd8c03.
#
# We use Accept-Encoding: identity here because requests defaults to
# accepting compressed responses. This breaks in a variety of ways
# depending on how the server is configured.
# - Some servers will notice that the file isn't a compressible file
# and will leave the file alone and with an empty Content-Encoding
# - Some servers will notice that the file is already compressed and
# will leave the file alone, adding a Content-Encoding: gzip header
# - Some servers won't notice anything at all and will take a file
# that's already been compressed and compress it again, and set
# the Content-Encoding: gzip header
# By setting this to request only the identity encoding we're hoping
# to eliminate the third case. Hopefully there does not exist a server
# which when given a file will notice it is already compressed and that
# you're not asking for a compressed file and will then decompress it
# before sending because if that's the case I don't think it'll ever be
# possible to make this work.
HEADERS = {'Accept-Encoding': 'identity'} # type: Dict[str, str]
def raise_for_status(resp):
# type: (Response) -> None
http_error_msg = ''
if isinstance(resp.reason, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# isn't utf-8, we fall back to iso-8859-1 for all other
# encodings.
try:
reason = resp.reason.decode('utf-8')
except UnicodeDecodeError:
reason = resp.reason.decode('iso-8859-1')
else:
reason = resp.reason
if 400 <= resp.status_code < 500:
http_error_msg = (
f'{resp.status_code} Client Error: {reason} for url: {resp.url}')
elif 500 <= resp.status_code < 600:
http_error_msg = (
f'{resp.status_code} Server Error: {reason} for url: {resp.url}')
if http_error_msg:
raise NetworkConnectionError(http_error_msg, response=resp)
def response_chunks(response, chunk_size=CONTENT_CHUNK_SIZE):
# type: (Response, int) -> Iterator[bytes]
"""Given a requests Response, provide the data chunks.
"""
try:
# Special case for urllib3.
for chunk in response.raw.stream(
chunk_size,
# We use decode_content=False here because we don't
# want urllib3 to mess with the raw bytes we get
# from the server. If we decompress inside of
# urllib3 then we cannot verify the checksum
# because the checksum will be of the compressed
# file. This breakage will only occur if the
# server adds a Content-Encoding header, which
# depends on how the server was configured:
# - Some servers will notice that the file isn't a
# compressible file and will leave the file alone
# and with an empty Content-Encoding
# - Some servers will notice that the file is
# already compressed and will leave the file
# alone and will add a Content-Encoding: gzip
# header
# - Some servers won't notice anything at all and
# will take a file that's already been compressed
# and compress it again and set the
# Content-Encoding: gzip header
#
# By setting this not to decode automatically we
# hope to eliminate problems with the second case.
decode_content=False,
):
yield chunk
except AttributeError:
# Standard file-like object.
while True:
chunk = response.raw.read(chunk_size)
if not chunk:
break
yield chunk
```
#### File: _internal/utils/models.py
```python
import operator
from typing import Any, Callable, Type
class KeyBasedCompareMixin:
"""Provides comparison capabilities that is based on a key"""
__slots__ = ["_compare_key", "_defining_class"]
def __init__(self, key, defining_class):
# type: (Any, Type[KeyBasedCompareMixin]) -> None
self._compare_key = key
self._defining_class = defining_class
def __hash__(self):
# type: () -> int
return hash(self._compare_key)
def __lt__(self, other):
# type: (Any) -> bool
return self._compare(other, operator.__lt__)
def __le__(self, other):
# type: (Any) -> bool
return self._compare(other, operator.__le__)
def __gt__(self, other):
# type: (Any) -> bool
return self._compare(other, operator.__gt__)
def __ge__(self, other):
# type: (Any) -> bool
return self._compare(other, operator.__ge__)
def __eq__(self, other):
# type: (Any) -> bool
return self._compare(other, operator.__eq__)
def _compare(self, other, method):
# type: (Any, Callable[[Any, Any], bool]) -> bool
if not isinstance(other, self._defining_class):
return NotImplemented
return method(self._compare_key, other._compare_key)
```
#### File: _vendor/distlib/metadata.py
```python
from __future__ import unicode_literals
import codecs
from email import message_from_file
import json
import logging
import re
from . import DistlibException, __version__
from .compat import StringIO, string_types, text_type
from .markers import interpret
from .util import extract_by_key, get_extras
from .version import get_scheme, PEP440_VERSION_RE
logger = logging.getLogger(__name__)
class MetadataMissingError(DistlibException):
"""A required metadata is missing"""
class MetadataConflictError(DistlibException):
"""Attempt to read or write metadata fields that are conflictual."""
class MetadataUnrecognizedVersionError(DistlibException):
"""Unknown metadata version number."""
class MetadataInvalidError(DistlibException):
"""A metadata value is invalid"""
# public API of this module
__all__ = ['Metadata', 'PKG_INFO_ENCODING', 'PKG_INFO_PREFERRED_VERSION']
# Encoding used for the PKG-INFO files
PKG_INFO_ENCODING = 'utf-8'
# preferred version. Hopefully will be changed
# to 1.2 once PEP 345 is supported everywhere
PKG_INFO_PREFERRED_VERSION = '1.1'
_LINE_PREFIX_1_2 = re.compile('\n \\|')
_LINE_PREFIX_PRE_1_2 = re.compile('\n ')
_241_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'License')
_314_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'License', 'Classifier', 'Download-URL', 'Obsoletes',
'Provides', 'Requires')
_314_MARKERS = ('Obsoletes', 'Provides', 'Requires', 'Classifier',
'Download-URL')
_345_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'Maintainer', 'Maintainer-email', 'License',
'Classifier', 'Download-URL', 'Obsoletes-Dist',
'Project-URL', 'Provides-Dist', 'Requires-Dist',
'Requires-Python', 'Requires-External')
_345_MARKERS = ('Provides-Dist', 'Requires-Dist', 'Requires-Python',
'Obsoletes-Dist', 'Requires-External', 'Maintainer',
'Maintainer-email', 'Project-URL')
_426_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'Maintainer', 'Maintainer-email', 'License',
'Classifier', 'Download-URL', 'Obsoletes-Dist',
'Project-URL', 'Provides-Dist', 'Requires-Dist',
'Requires-Python', 'Requires-External', 'Private-Version',
'Obsoleted-By', 'Setup-Requires-Dist', 'Extension',
'Provides-Extra')
_426_MARKERS = ('Private-Version', 'Provides-Extra', 'Obsoleted-By',
'Setup-Requires-Dist', 'Extension')
# See issue #106: Sometimes 'Requires' and 'Provides' occur wrongly in
# the metadata. Include them in the tuple literal below to allow them
# (for now).
_566_FIELDS = _426_FIELDS + ('Description-Content-Type',
'Requires', 'Provides')
_566_MARKERS = ('Description-Content-Type',)
_ALL_FIELDS = set()
_ALL_FIELDS.update(_241_FIELDS)
_ALL_FIELDS.update(_314_FIELDS)
_ALL_FIELDS.update(_345_FIELDS)
_ALL_FIELDS.update(_426_FIELDS)
_ALL_FIELDS.update(_566_FIELDS)
EXTRA_RE = re.compile(r'''extra\s*==\s*("([^"]+)"|'([^']+)')''')
def _version2fieldlist(version):
if version == '1.0':
return _241_FIELDS
elif version == '1.1':
return _314_FIELDS
elif version == '1.2':
return _345_FIELDS
elif version in ('1.3', '2.1'):
return _345_FIELDS + _566_FIELDS
elif version == '2.0':
return _426_FIELDS
raise MetadataUnrecognizedVersionError(version)
def _best_version(fields):
"""Detect the best version depending on the fields used."""
def _has_marker(keys, markers):
for marker in markers:
if marker in keys:
return True
return False
keys = []
for key, value in fields.items():
if value in ([], 'UNKNOWN', None):
continue
keys.append(key)
possible_versions = ['1.0', '1.1', '1.2', '1.3', '2.0', '2.1']
# first let's try to see if a field is not part of one of the version
for key in keys:
if key not in _241_FIELDS and '1.0' in possible_versions:
possible_versions.remove('1.0')
logger.debug('Removed 1.0 due to %s', key)
if key not in _314_FIELDS and '1.1' in possible_versions:
possible_versions.remove('1.1')
logger.debug('Removed 1.1 due to %s', key)
if key not in _345_FIELDS and '1.2' in possible_versions:
possible_versions.remove('1.2')
logger.debug('Removed 1.2 due to %s', key)
if key not in _566_FIELDS and '1.3' in possible_versions:
possible_versions.remove('1.3')
logger.debug('Removed 1.3 due to %s', key)
if key not in _566_FIELDS and '2.1' in possible_versions:
if key != 'Description': # In 2.1, description allowed after headers
possible_versions.remove('2.1')
logger.debug('Removed 2.1 due to %s', key)
if key not in _426_FIELDS and '2.0' in possible_versions:
possible_versions.remove('2.0')
logger.debug('Removed 2.0 due to %s', key)
# possible_version contains qualified versions
if len(possible_versions) == 1:
return possible_versions[0] # found !
elif len(possible_versions) == 0:
logger.debug('Out of options - unknown metadata set: %s', fields)
raise MetadataConflictError('Unknown metadata set')
# let's see if one unique marker is found
is_1_1 = '1.1' in possible_versions and _has_marker(keys, _314_MARKERS)
is_1_2 = '1.2' in possible_versions and _has_marker(keys, _345_MARKERS)
is_2_1 = '2.1' in possible_versions and _has_marker(keys, _566_MARKERS)
is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS)
if int(is_1_1) + int(is_1_2) + int(is_2_1) + int(is_2_0) > 1:
raise MetadataConflictError('You used incompatible 1.1/1.2/2.0/2.1 fields')
# we have the choice, 1.0, or 1.2, or 2.0
# - 1.0 has a broken Summary field but works with all tools
# - 1.1 is to avoid
# - 1.2 fixes Summary but has little adoption
# - 2.0 adds more features and is very new
if not is_1_1 and not is_1_2 and not is_2_1 and not is_2_0:
# we couldn't find any specific marker
if PKG_INFO_PREFERRED_VERSION in possible_versions:
return PKG_INFO_PREFERRED_VERSION
if is_1_1:
return '1.1'
if is_1_2:
return '1.2'
if is_2_1:
return '2.1'
return '2.0'
# This follows the rules about transforming keys as described in
# https://www.python.org/dev/peps/pep-0566/#id17
_ATTR2FIELD = {
name.lower().replace("-", "_"): name for name in _ALL_FIELDS
}
_FIELD2ATTR = {field: attr for attr, field in _ATTR2FIELD.items()}
_PREDICATE_FIELDS = ('Requires-Dist', 'Obsoletes-Dist', 'Provides-Dist')
_VERSIONS_FIELDS = ('Requires-Python',)
_VERSION_FIELDS = ('Version',)
_LISTFIELDS = ('Platform', 'Classifier', 'Obsoletes',
'Requires', 'Provides', 'Obsoletes-Dist',
'Provides-Dist', 'Requires-Dist', 'Requires-External',
'Project-URL', 'Supported-Platform', 'Setup-Requires-Dist',
'Provides-Extra', 'Extension')
_LISTTUPLEFIELDS = ('Project-URL',)
_ELEMENTSFIELD = ('Keywords',)
_UNICODEFIELDS = ('Author', 'Maintainer', 'Summary', 'Description')
_MISSING = object()
_FILESAFE = re.compile('[^A-Za-z0-9.]+')
def _get_name_and_version(name, version, for_filename=False):
"""Return the distribution name with version.
If for_filename is true, return a filename-escaped form."""
if for_filename:
# For both name and version any runs of non-alphanumeric or '.'
# characters are replaced with a single '-'. Additionally any
# spaces in the version string become '.'
name = _FILESAFE.sub('-', name)
version = _FILESAFE.sub('-', version.replace(' ', '.'))
return '%s-%s' % (name, version)
class LegacyMetadata(object):
"""The legacy metadata of a release.
Supports versions 1.0, 1.1, 1.2, 2.0 and 1.3/2.1 (auto-detected). You can
instantiate the class with one of these arguments (or none):
- *path*, the path to a metadata file
- *fileobj* give a file-like object with metadata as content
- *mapping* is a dict-like object
- *scheme* is a version scheme name
"""
# TODO document the mapping API and UNKNOWN default key
def __init__(self, path=None, fileobj=None, mapping=None,
scheme='default'):
if [path, fileobj, mapping].count(None) < 2:
raise TypeError('path, fileobj and mapping are exclusive')
self._fields = {}
self.requires_files = []
self._dependencies = None
self.scheme = scheme
if path is not None:
self.read(path)
elif fileobj is not None:
self.read_file(fileobj)
elif mapping is not None:
self.update(mapping)
self.set_metadata_version()
def set_metadata_version(self):
self._fields['Metadata-Version'] = _best_version(self._fields)
def _write_field(self, fileobj, name, value):
fileobj.write('%s: %s\n' % (name, value))
def __getitem__(self, name):
return self.get(name)
def __setitem__(self, name, value):
return self.set(name, value)
def __delitem__(self, name):
field_name = self._convert_name(name)
try:
del self._fields[field_name]
except KeyError:
raise KeyError(name)
def __contains__(self, name):
return (name in self._fields or
self._convert_name(name) in self._fields)
def _convert_name(self, name):
if name in _ALL_FIELDS:
return name
name = name.replace('-', '_').lower()
return _ATTR2FIELD.get(name, name)
def _default_value(self, name):
if name in _LISTFIELDS or name in _ELEMENTSFIELD:
return []
return 'UNKNOWN'
def _remove_line_prefix(self, value):
if self.metadata_version in ('1.0', '1.1'):
return _LINE_PREFIX_PRE_1_2.sub('\n', value)
else:
return _LINE_PREFIX_1_2.sub('\n', value)
def __getattr__(self, name):
if name in _ATTR2FIELD:
return self[name]
raise AttributeError(name)
#
# Public API
#
# dependencies = property(_get_dependencies, _set_dependencies)
def get_fullname(self, filesafe=False):
"""Return the distribution name with version.
If filesafe is true, return a filename-escaped form."""
return _get_name_and_version(self['Name'], self['Version'], filesafe)
def is_field(self, name):
"""return True if name is a valid metadata key"""
name = self._convert_name(name)
return name in _ALL_FIELDS
def is_multi_field(self, name):
name = self._convert_name(name)
return name in _LISTFIELDS
def read(self, filepath):
"""Read the metadata values from a file path."""
fp = codecs.open(filepath, 'r', encoding='utf-8')
try:
self.read_file(fp)
finally:
fp.close()
def read_file(self, fileob):
"""Read the metadata values from a file object."""
msg = message_from_file(fileob)
self._fields['Metadata-Version'] = msg['metadata-version']
# When reading, get all the fields we can
for field in _ALL_FIELDS:
if field not in msg:
continue
if field in _LISTFIELDS:
# we can have multiple lines
values = msg.get_all(field)
if field in _LISTTUPLEFIELDS and values is not None:
values = [tuple(value.split(',')) for value in values]
self.set(field, values)
else:
# single line
value = msg[field]
if value is not None and value != 'UNKNOWN':
self.set(field, value)
# PEP 566 specifies that the body be used for the description, if
# available
body = msg.get_payload()
self["Description"] = body if body else self["Description"]
# logger.debug('Attempting to set metadata for %s', self)
# self.set_metadata_version()
def write(self, filepath, skip_unknown=False):
"""Write the metadata fields to filepath."""
fp = codecs.open(filepath, 'w', encoding='utf-8')
try:
self.write_file(fp, skip_unknown)
finally:
fp.close()
def write_file(self, fileobject, skip_unknown=False):
"""Write the PKG-INFO format data to a file object."""
self.set_metadata_version()
for field in _version2fieldlist(self['Metadata-Version']):
values = self.get(field)
if skip_unknown and values in ('UNKNOWN', [], ['UNKNOWN']):
continue
if field in _ELEMENTSFIELD:
self._write_field(fileobject, field, ','.join(values))
continue
if field not in _LISTFIELDS:
if field == 'Description':
if self.metadata_version in ('1.0', '1.1'):
values = values.replace('\n', '\n ')
else:
values = values.replace('\n', '\n |')
values = [values]
if field in _LISTTUPLEFIELDS:
values = [','.join(value) for value in values]
for value in values:
self._write_field(fileobject, field, value)
def update(self, other=None, **kwargs):
"""Set metadata values from the given iterable `other` and kwargs.
Behavior is like `dict.update`: If `other` has a ``keys`` method,
they are looped over and ``self[key]`` is assigned ``other[key]``.
Else, ``other`` is an iterable of ``(key, value)`` iterables.
Keys that don't match a metadata field or that have an empty value are
dropped.
"""
def _set(key, value):
if key in _ATTR2FIELD and value:
self.set(self._convert_name(key), value)
if not other:
# other is None or empty container
pass
elif hasattr(other, 'keys'):
for k in other.keys():
_set(k, other[k])
else:
for k, v in other:
_set(k, v)
if kwargs:
for k, v in kwargs.items():
_set(k, v)
def set(self, name, value):
"""Control then set a metadata field."""
name = self._convert_name(name)
if ((name in _ELEMENTSFIELD or name == 'Platform') and
not isinstance(value, (list, tuple))):
if isinstance(value, string_types):
value = [v.strip() for v in value.split(',')]
else:
value = []
elif (name in _LISTFIELDS and
not isinstance(value, (list, tuple))):
if isinstance(value, string_types):
value = [value]
else:
value = []
if logger.isEnabledFor(logging.WARNING):
project_name = self['Name']
scheme = get_scheme(self.scheme)
if name in _PREDICATE_FIELDS and value is not None:
for v in value:
# check that the values are valid
if not scheme.is_valid_matcher(v.split(';')[0]):
logger.warning(
"'%s': '%s' is not valid (field '%s')",
project_name, v, name)
# FIXME this rejects UNKNOWN, is that right?
elif name in _VERSIONS_FIELDS and value is not None:
if not scheme.is_valid_constraint_list(value):
logger.warning("'%s': '%s' is not a valid version (field '%s')",
project_name, value, name)
elif name in _VERSION_FIELDS and value is not None:
if not scheme.is_valid_version(value):
logger.warning("'%s': '%s' is not a valid version (field '%s')",
project_name, value, name)
if name in _UNICODEFIELDS:
if name == 'Description':
value = self._remove_line_prefix(value)
self._fields[name] = value
def get(self, name, default=_MISSING):
"""Get a metadata field."""
name = self._convert_name(name)
if name not in self._fields:
if default is _MISSING:
default = self._default_value(name)
return default
if name in _UNICODEFIELDS:
value = self._fields[name]
return value
elif name in _LISTFIELDS:
value = self._fields[name]
if value is None:
return []
res = []
for val in value:
if name not in _LISTTUPLEFIELDS:
res.append(val)
else:
# That's for Project-URL
res.append((val[0], val[1]))
return res
elif name in _ELEMENTSFIELD:
value = self._fields[name]
if isinstance(value, string_types):
return value.split(',')
return self._fields[name]
def check(self, strict=False):
"""Check if the metadata is compliant. If strict is True then raise if
no Name or Version are provided"""
self.set_metadata_version()
# XXX should check the versions (if the file was loaded)
missing, warnings = [], []
for attr in ('Name', 'Version'): # required by PEP 345
if attr not in self:
missing.append(attr)
if strict and missing != []:
msg = 'missing required metadata: %s' % ', '.join(missing)
raise MetadataMissingError(msg)
for attr in ('Home-page', 'Author'):
if attr not in self:
missing.append(attr)
# checking metadata 1.2 (XXX needs to check 1.1, 1.0)
if self['Metadata-Version'] != '1.2':
return missing, warnings
scheme = get_scheme(self.scheme)
def are_valid_constraints(value):
for v in value:
if not scheme.is_valid_matcher(v.split(';')[0]):
return False
return True
for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints),
(_VERSIONS_FIELDS,
scheme.is_valid_constraint_list),
(_VERSION_FIELDS,
scheme.is_valid_version)):
for field in fields:
value = self.get(field, None)
if value is not None and not controller(value):
warnings.append("Wrong value for '%s': %s" % (field, value))
return missing, warnings
def todict(self, skip_missing=False):
"""Return fields as a dict.
Field names will be converted to use the underscore-lowercase style
instead of hyphen-mixed case (i.e. home_page instead of Home-page).
This is as per https://www.python.org/dev/peps/pep-0566/#id17.
"""
self.set_metadata_version()
fields = _version2fieldlist(self['Metadata-Version'])
data = {}
for field_name in fields:
if not skip_missing or field_name in self._fields:
key = _FIELD2ATTR[field_name]
if key != 'project_url':
data[key] = self[field_name]
else:
data[key] = [','.join(u) for u in self[field_name]]
return data
def add_requirements(self, requirements):
if self['Metadata-Version'] == '1.1':
# we can't have 1.1 metadata *and* Setuptools requires
for field in ('Obsoletes', 'Requires', 'Provides'):
if field in self:
del self[field]
self['Requires-Dist'] += requirements
# Mapping API
# TODO could add iter* variants
def keys(self):
return list(_version2fieldlist(self['Metadata-Version']))
def __iter__(self):
for key in self.keys():
yield key
def values(self):
return [self[key] for key in self.keys()]
def items(self):
return [(key, self[key]) for key in self.keys()]
def __repr__(self):
return '<%s %s %s>' % (self.__class__.__name__, self.name,
self.version)
METADATA_FILENAME = 'pydist.json'
WHEEL_METADATA_FILENAME = 'metadata.json'
LEGACY_METADATA_FILENAME = 'METADATA'
class Metadata(object):
"""
The metadata of a release. This implementation uses 2.0 (JSON)
metadata where possible. If not possible, it wraps a LegacyMetadata
instance which handles the key-value metadata format.
"""
METADATA_VERSION_MATCHER = re.compile(r'^\d+(\.\d+)*$')
NAME_MATCHER = re.compile('^[0-9A-Z]([0-9A-Z_.-]*[0-9A-Z])?$', re.I)
VERSION_MATCHER = PEP440_VERSION_RE
SUMMARY_MATCHER = re.compile('.{1,2047}')
METADATA_VERSION = '2.0'
GENERATOR = 'distlib (%s)' % __version__
MANDATORY_KEYS = {
'name': (),
'version': (),
'summary': ('legacy',),
}
INDEX_KEYS = ('name version license summary description author '
'author_email keywords platform home_page classifiers '
'download_url')
DEPENDENCY_KEYS = ('extras run_requires test_requires build_requires '
'dev_requires provides meta_requires obsoleted_by '
'supports_environments')
SYNTAX_VALIDATORS = {
'metadata_version': (METADATA_VERSION_MATCHER, ()),
'name': (NAME_MATCHER, ('legacy',)),
'version': (VERSION_MATCHER, ('legacy',)),
'summary': (SUMMARY_MATCHER, ('legacy',)),
}
__slots__ = ('_legacy', '_data', 'scheme')
def __init__(self, path=None, fileobj=None, mapping=None,
scheme='default'):
if [path, fileobj, mapping].count(None) < 2:
raise TypeError('path, fileobj and mapping are exclusive')
self._legacy = None
self._data = None
self.scheme = scheme
#import pdb; pdb.set_trace()
if mapping is not None:
try:
self._validate_mapping(mapping, scheme)
self._data = mapping
except MetadataUnrecognizedVersionError:
self._legacy = LegacyMetadata(mapping=mapping, scheme=scheme)
self.validate()
else:
data = None
if path:
with open(path, 'rb') as f:
data = f.read()
elif fileobj:
data = fileobj.read()
if data is None:
# Initialised with no args - to be added
self._data = {
'metadata_version': self.METADATA_VERSION,
'generator': self.GENERATOR,
}
else:
if not isinstance(data, text_type):
data = data.decode('utf-8')
try:
self._data = json.loads(data)
self._validate_mapping(self._data, scheme)
except ValueError:
# Note: MetadataUnrecognizedVersionError does not
# inherit from ValueError (it's a DistlibException,
# which should not inherit from ValueError).
# The ValueError comes from the json.load - if that
# succeeds and we get a validation error, we want
# that to propagate
self._legacy = LegacyMetadata(fileobj=StringIO(data),
scheme=scheme)
self.validate()
common_keys = set(('name', 'version', 'license', 'keywords', 'summary'))
none_list = (None, list)
none_dict = (None, dict)
mapped_keys = {
'run_requires': ('Requires-Dist', list),
'build_requires': ('Setup-Requires-Dist', list),
'dev_requires': none_list,
'test_requires': none_list,
'meta_requires': none_list,
'extras': ('Provides-Extra', list),
'modules': none_list,
'namespaces': none_list,
'exports': none_dict,
'commands': none_dict,
'classifiers': ('Classifier', list),
'source_url': ('Download-URL', None),
'metadata_version': ('Metadata-Version', None),
}
del none_list, none_dict
def __getattribute__(self, key):
common = object.__getattribute__(self, 'common_keys')
mapped = object.__getattribute__(self, 'mapped_keys')
if key in mapped:
lk, maker = mapped[key]
if self._legacy:
if lk is None:
result = None if maker is None else maker()
else:
result = self._legacy.get(lk)
else:
value = None if maker is None else maker()
if key not in ('commands', 'exports', 'modules', 'namespaces',
'classifiers'):
result = self._data.get(key, value)
else:
# special cases for PEP 459
sentinel = object()
result = sentinel
d = self._data.get('extensions')
if d:
if key == 'commands':
result = d.get('python.commands', value)
elif key == 'classifiers':
d = d.get('python.details')
if d:
result = d.get(key, value)
else:
d = d.get('python.exports')
if not d:
d = self._data.get('python.exports')
if d:
result = d.get(key, value)
if result is sentinel:
result = value
elif key not in common:
result = object.__getattribute__(self, key)
elif self._legacy:
result = self._legacy.get(key)
else:
result = self._data.get(key)
return result
def _validate_value(self, key, value, scheme=None):
if key in self.SYNTAX_VALIDATORS:
pattern, exclusions = self.SYNTAX_VALIDATORS[key]
if (scheme or self.scheme) not in exclusions:
m = pattern.match(value)
if not m:
raise MetadataInvalidError("'%s' is an invalid value for "
"the '%s' property" % (value,
key))
def __setattr__(self, key, value):
self._validate_value(key, value)
common = object.__getattribute__(self, 'common_keys')
mapped = object.__getattribute__(self, 'mapped_keys')
if key in mapped:
lk, _ = mapped[key]
if self._legacy:
if lk is None:
raise NotImplementedError
self._legacy[lk] = value
elif key not in ('commands', 'exports', 'modules', 'namespaces',
'classifiers'):
self._data[key] = value
else:
# special cases for PEP 459
d = self._data.setdefault('extensions', {})
if key == 'commands':
d['python.commands'] = value
elif key == 'classifiers':
d = d.setdefault('python.details', {})
d[key] = value
else:
d = d.setdefault('python.exports', {})
d[key] = value
elif key not in common:
object.__setattr__(self, key, value)
else:
if key == 'keywords':
if isinstance(value, string_types):
value = value.strip()
if value:
value = value.split()
else:
value = []
if self._legacy:
self._legacy[key] = value
else:
self._data[key] = value
@property
def name_and_version(self):
return _get_name_and_version(self.name, self.version, True)
@property
def provides(self):
if self._legacy:
result = self._legacy['Provides-Dist']
else:
result = self._data.setdefault('provides', [])
s = '%s (%s)' % (self.name, self.version)
if s not in result:
result.append(s)
return result
@provides.setter
def provides(self, value):
if self._legacy:
self._legacy['Provides-Dist'] = value
else:
self._data['provides'] = value
def get_requirements(self, reqts, extras=None, env=None):
"""
Base method to get dependencies, given a set of extras
to satisfy and an optional environment context.
:param reqts: A list of sometimes-wanted dependencies,
perhaps dependent on extras and environment.
:param extras: A list of optional components being requested.
:param env: An optional environment for marker evaluation.
"""
if self._legacy:
result = reqts
else:
result = []
extras = get_extras(extras or [], self.extras)
for d in reqts:
if 'extra' not in d and 'environment' not in d:
# unconditional
include = True
else:
if 'extra' not in d:
# Not extra-dependent - only environment-dependent
include = True
else:
include = d.get('extra') in extras
if include:
# Not excluded because of extras, check environment
marker = d.get('environment')
if marker:
include = interpret(marker, env)
if include:
result.extend(d['requires'])
for key in ('build', 'dev', 'test'):
e = ':%s:' % key
if e in extras:
extras.remove(e)
# A recursive call, but it should terminate since 'test'
# has been removed from the extras
reqts = self._data.get('%s_requires' % key, [])
result.extend(self.get_requirements(reqts, extras=extras,
env=env))
return result
@property
def dictionary(self):
if self._legacy:
return self._from_legacy()
return self._data
@property
def dependencies(self):
if self._legacy:
raise NotImplementedError
else:
return extract_by_key(self._data, self.DEPENDENCY_KEYS)
@dependencies.setter
def dependencies(self, value):
if self._legacy:
raise NotImplementedError
else:
self._data.update(value)
def _validate_mapping(self, mapping, scheme):
if mapping.get('metadata_version') != self.METADATA_VERSION:
raise MetadataUnrecognizedVersionError()
missing = []
for key, exclusions in self.MANDATORY_KEYS.items():
if key not in mapping:
if scheme not in exclusions:
missing.append(key)
if missing:
msg = 'Missing metadata items: %s' % ', '.join(missing)
raise MetadataMissingError(msg)
for k, v in mapping.items():
self._validate_value(k, v, scheme)
def validate(self):
if self._legacy:
missing, warnings = self._legacy.check(True)
if missing or warnings:
logger.warning('Metadata: missing: %s, warnings: %s',
missing, warnings)
else:
self._validate_mapping(self._data, self.scheme)
def todict(self):
if self._legacy:
return self._legacy.todict(True)
else:
result = extract_by_key(self._data, self.INDEX_KEYS)
return result
def _from_legacy(self):
assert self._legacy and not self._data
result = {
'metadata_version': self.METADATA_VERSION,
'generator': self.GENERATOR,
}
lmd = self._legacy.todict(True) # skip missing ones
for k in ('name', 'version', 'license', 'summary', 'description',
'classifier'):
if k in lmd:
if k == 'classifier':
nk = 'classifiers'
else:
nk = k
result[nk] = lmd[k]
kw = lmd.get('Keywords', [])
if kw == ['']:
kw = []
result['keywords'] = kw
keys = (('requires_dist', 'run_requires'),
('setup_requires_dist', 'build_requires'))
for ok, nk in keys:
if ok in lmd and lmd[ok]:
result[nk] = [{'requires': lmd[ok]}]
result['provides'] = self.provides
author = {}
maintainer = {}
return result
LEGACY_MAPPING = {
'name': 'Name',
'version': 'Version',
('extensions', 'python.details', 'license'): 'License',
'summary': 'Summary',
'description': 'Description',
('extensions', 'python.project', 'project_urls', 'Home'): 'Home-page',
('extensions', 'python.project', 'contacts', 0, 'name'): 'Author',
('extensions', 'python.project', 'contacts', 0, 'email'): 'Author-email',
'source_url': 'Download-URL',
('extensions', 'python.details', 'classifiers'): 'Classifier',
}
def _to_legacy(self):
def process_entries(entries):
reqts = set()
for e in entries:
extra = e.get('extra')
env = e.get('environment')
rlist = e['requires']
for r in rlist:
if not env and not extra:
reqts.add(r)
else:
marker = ''
if extra:
marker = 'extra == "%s"' % extra
if env:
if marker:
marker = '(%s) and %s' % (env, marker)
else:
marker = env
reqts.add(';'.join((r, marker)))
return reqts
assert self._data and not self._legacy
result = LegacyMetadata()
nmd = self._data
# import pdb; pdb.set_trace()
for nk, ok in self.LEGACY_MAPPING.items():
if not isinstance(nk, tuple):
if nk in nmd:
result[ok] = nmd[nk]
else:
d = nmd
found = True
for k in nk:
try:
d = d[k]
except (KeyError, IndexError):
found = False
break
if found:
result[ok] = d
r1 = process_entries(self.run_requires + self.meta_requires)
r2 = process_entries(self.build_requires + self.dev_requires)
if self.extras:
result['Provides-Extra'] = sorted(self.extras)
result['Requires-Dist'] = sorted(r1)
result['Setup-Requires-Dist'] = sorted(r2)
# TODO: any other fields wanted
return result
def write(self, path=None, fileobj=None, legacy=False, skip_unknown=True):
if [path, fileobj].count(None) != 1:
raise ValueError('Exactly one of path and fileobj is needed')
self.validate()
if legacy:
if self._legacy:
legacy_md = self._legacy
else:
legacy_md = self._to_legacy()
if path:
legacy_md.write(path, skip_unknown=skip_unknown)
else:
legacy_md.write_file(fileobj, skip_unknown=skip_unknown)
else:
if self._legacy:
d = self._from_legacy()
else:
d = self._data
if fileobj:
json.dump(d, fileobj, ensure_ascii=True, indent=2,
sort_keys=True)
else:
with codecs.open(path, 'w', 'utf-8') as f:
json.dump(d, f, ensure_ascii=True, indent=2,
sort_keys=True)
def add_requirements(self, requirements):
if self._legacy:
self._legacy.add_requirements(requirements)
else:
run_requires = self._data.setdefault('run_requires', [])
always = None
for entry in run_requires:
if 'environment' not in entry and 'extra' not in entry:
always = entry
break
if always is None:
always = { 'requires': requirements }
run_requires.insert(0, always)
else:
rset = set(always['requires']) | set(requirements)
always['requires'] = sorted(rset)
def __repr__(self):
name = self.name or '(no name)'
version = self.version or 'no version'
return '<%s %s %s (%s)>' % (self.__class__.__name__,
self.metadata_version, name, version)
```
#### File: html5lib/treeadapters/genshi.py
```python
from __future__ import absolute_import, division, unicode_literals
from genshi.core import QName, Attrs
from genshi.core import START, END, TEXT, COMMENT, DOCTYPE
def to_genshi(walker):
"""Convert a tree to a genshi tree
:arg walker: the treewalker to use to walk the tree to convert it
:returns: generator of genshi nodes
"""
text = []
for token in walker:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
text.append(token["data"])
elif text:
yield TEXT, "".join(text), (None, -1, -1)
text = []
if type in ("StartTag", "EmptyTag"):
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value)
for attr, value in token["data"].items()])
yield (START, (QName(name), attrs), (None, -1, -1))
if type == "EmptyTag":
type = "EndTag"
if type == "EndTag":
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
yield END, QName(name), (None, -1, -1)
elif type == "Comment":
yield COMMENT, token["data"], (None, -1, -1)
elif type == "Doctype":
yield DOCTYPE, (token["name"], token["publicId"],
token["systemId"]), (None, -1, -1)
else:
pass # FIXME: What to do?
if text:
yield TEXT, "".join(text), (None, -1, -1)
```
#### File: _vendor/pkg_resources/py31compat.py
```python
import os
import errno
import sys
from pip._vendor import six
def _makedirs_31(path, exist_ok=False):
try:
os.makedirs(path)
except OSError as exc:
if not exist_ok or exc.errno != errno.EEXIST:
raise
# rely on compatibility behavior until mode considerations
# and exists_ok considerations are disentangled.
# See https://github.com/pypa/setuptools/pull/1083#issuecomment-315168663
needs_makedirs = (
six.PY2 or
(3, 4) <= sys.version_info < (3, 4, 1)
)
makedirs = _makedirs_31 if needs_makedirs else os.makedirs
```
#### File: _vendor/resolvelib/providers.py
```python
class AbstractProvider(object):
"""Delegate class to provide requirement interface for the resolver."""
def identify(self, requirement_or_candidate):
"""Given a requirement, return an identifier for it.
This is used to identify a requirement, e.g. whether two requirements
should have their specifier parts merged.
"""
raise NotImplementedError
def get_preference(self, identifier, resolutions, candidates, information):
"""Produce a sort key for given requirement based on preference.
The preference is defined as "I think this requirement should be
resolved first". The lower the return value is, the more preferred
this group of arguments is.
:param identifier: An identifier as returned by ``identify()``. This
identifies the dependency matches of which should be returned.
:param resolutions: Mapping of candidates currently pinned by the
resolver. Each key is an identifier, and the value a candidate.
The candidate may conflict with requirements from ``information``.
:param candidates: Mapping of each dependency's possible candidates.
Each value is an iterator of candidates.
:param information: Mapping of requirement information of each package.
Each value is an iterator of *requirement information*.
A *requirement information* instance is a named tuple with two members:
* ``requirement`` specifies a requirement contributing to the current
list of candidates.
* ``parent`` specifies the candidate that provides (dependend on) the
requirement, or ``None`` to indicate a root requirement.
The preference could depend on a various of issues, including (not
necessarily in this order):
* Is this package pinned in the current resolution result?
* How relaxed is the requirement? Stricter ones should probably be
worked on first? (I don't know, actually.)
* How many possibilities are there to satisfy this requirement? Those
with few left should likely be worked on first, I guess?
* Are there any known conflicts for this requirement? We should
probably work on those with the most known conflicts.
A sortable value should be returned (this will be used as the ``key``
parameter of the built-in sorting function). The smaller the value is,
the more preferred this requirement is (i.e. the sorting function
is called with ``reverse=False``).
"""
raise NotImplementedError
def find_matches(self, identifier, requirements, incompatibilities):
"""Find all possible candidates that satisfy given constraints.
:param identifier: An identifier as returned by ``identify()``. This
identifies the dependency matches of which should be returned.
:param requirements: A mapping of requirements that all returned
candidates must satisfy. Each key is an identifier, and the value
an iterator of requirements for that dependency.
:param incompatibilities: A mapping of known incompatibilities of
each dependency. Each key is an identifier, and the value an
iterator of incompatibilities known to the resolver. All
incompatibilities *must* be excluded from the return value.
This should try to get candidates based on the requirements' types.
For VCS, local, and archive requirements, the one-and-only match is
returned, and for a "named" requirement, the index(es) should be
consulted to find concrete candidates for this requirement.
The return value should produce candidates ordered by preference; the
most preferred candidate should come first. The return type may be one
of the following:
* A callable that returns an iterator that yields candidates.
* An collection of candidates.
* An iterable of candidates. This will be consumed immediately into a
list of candidates.
"""
raise NotImplementedError
def is_satisfied_by(self, requirement, candidate):
"""Whether the given requirement can be satisfied by a candidate.
The candidate is guarenteed to have been generated from the
requirement.
A boolean should be returned to indicate whether ``candidate`` is a
viable solution to the requirement.
"""
raise NotImplementedError
def get_dependencies(self, candidate):
"""Get dependencies of a candidate.
This should return a collection of requirements that `candidate`
specifies as its dependencies.
"""
raise NotImplementedError
class AbstractResolver(object):
"""The thing that performs the actual resolution work."""
base_exception = Exception
def __init__(self, provider, reporter):
self.provider = provider
self.reporter = reporter
def resolve(self, requirements, **kwargs):
"""Take a collection of constraints, spit out the resolution result.
This returns a representation of the final resolution state, with one
guarenteed attribute ``mapping`` that contains resolved candidates as
values. The keys are their respective identifiers.
:param requirements: A collection of constraints.
:param kwargs: Additional keyword arguments that subclasses may accept.
:raises: ``self.base_exception`` or its subclass.
"""
raise NotImplementedError
```
#### File: pkginfo/tests/test_index.py
```python
import unittest
class IndexTests(unittest.TestCase):
def _getTargetClass(self):
from pkginfo.index import Index
return Index
def _makeOne(self):
return self._getTargetClass()()
def test_empty(self):
index = self._makeOne()
self.assertEqual(len(index), 0)
self.assertEqual(len(index.keys()), 0)
self.assertEqual(len(index.values()), 0)
self.assertEqual(len(index.items()), 0)
def _makeDummy(self):
from pkginfo.distribution import Distribution
class DummyDistribution(Distribution):
name = 'dummy'
version = '1.0'
return DummyDistribution()
def test___getitem___miss(self):
index = self._makeOne()
self.assertRaises(KeyError, index.__getitem__, 'nonesuch')
def test___setitem___value_not_dist(self):
class NotDistribution:
name = 'dummy'
version = '1.0'
dummy = NotDistribution()
index = self._makeOne()
self.assertRaises(ValueError, index.__setitem__, 'dummy-1.0', dummy)
def test___setitem___bad_key(self):
index = self._makeOne()
dummy = self._makeDummy()
self.assertRaises(ValueError, index.__setitem__, 'nonesuch', dummy)
def test___setitem___valid_key(self):
index = self._makeOne()
dummy = self._makeDummy()
index['dummy-1.0'] = dummy
self.assertTrue(index['dummy-1.0'] is dummy)
self.assertEqual(len(index), 1)
self.assertEqual(len(index.keys()), 1)
self.assertEqual(list(index.keys())[0], 'dummy-1.0')
self.assertEqual(len(index.values()), 1)
self.assertEqual(list(index.values())[0], dummy)
self.assertEqual(len(index.items()), 1)
self.assertEqual(list(index.items())[0], ('dummy-1.0', dummy))
def test_add_not_dist(self):
index = self._makeOne()
class NotDistribution:
name = 'dummy'
version = '1.0'
dummy = NotDistribution()
self.assertRaises(ValueError, index.add, dummy)
def test_add_valid_dist(self):
index = self._makeOne()
dummy = self._makeDummy()
index.add(dummy)
self.assertTrue(index['dummy-1.0'] is dummy)
self.assertEqual(len(index), 1)
self.assertEqual(len(index.keys()), 1)
self.assertEqual(list(index.keys())[0], 'dummy-1.0')
self.assertEqual(len(index.values()), 1)
self.assertEqual(list(index.values())[0], dummy)
self.assertEqual(len(index.items()), 1)
self.assertEqual(list(index.items())[0], ('dummy-1.0', dummy))
```
#### File: pygments/lexers/int_fiction.py
```python
import re
from pygments.lexer import RegexLexer, include, bygroups, using, \
this, default, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error, Generic
__all__ = ['Inform6Lexer', 'Inform6TemplateLexer', 'Inform7Lexer',
'Tads3Lexer']
class Inform6Lexer(RegexLexer):
"""
For `Inform 6 <http://inform-fiction.org/>`_ source code.
.. versionadded:: 2.0
"""
name = 'Inform 6'
aliases = ['inform6', 'i6']
filenames = ['*.inf']
flags = re.MULTILINE | re.DOTALL | re.UNICODE
_name = r'[a-zA-Z_]\w*'
# Inform 7 maps these four character classes to their ASCII
# equivalents. To support Inform 6 inclusions within Inform 7,
# Inform6Lexer maps them too.
_dash = '\\-\u2010-\u2014'
_dquote = '"\u201c\u201d'
_squote = "'\u2018\u2019"
_newline = '\\n\u0085\u2028\u2029'
tokens = {
'root': [
(r'\A(!%%[^%s]*[%s])+' % (_newline, _newline), Comment.Preproc,
'directive'),
default('directive')
],
'_whitespace': [
(r'\s+', Text),
(r'![^%s]*' % _newline, Comment.Single)
],
'default': [
include('_whitespace'),
(r'\[', Punctuation, 'many-values'), # Array initialization
(r':|(?=;)', Punctuation, '#pop'),
(r'<', Punctuation), # Second angle bracket in an action statement
default(('expression', '_expression'))
],
# Expressions
'_expression': [
include('_whitespace'),
(r'(?=sp\b)', Text, '#pop'),
(r'(?=[%s%s$0-9#a-zA-Z_])' % (_dquote, _squote), Text,
('#pop', 'value')),
(r'\+\+|[%s]{1,2}(?!>)|~~?' % _dash, Operator),
(r'(?=[()\[%s,?@{:;])' % _dash, Text, '#pop')
],
'expression': [
include('_whitespace'),
(r'\(', Punctuation, ('expression', '_expression')),
(r'\)', Punctuation, '#pop'),
(r'\[', Punctuation, ('#pop', 'statements', 'locals')),
(r'>(?=(\s+|(![^%s]*))*[>;])' % _newline, Punctuation),
(r'\+\+|[%s]{2}(?!>)' % _dash, Operator),
(r',', Punctuation, '_expression'),
(r'&&?|\|\|?|[=~><]?=|[%s]{1,2}>?|\.\.?[&#]?|::|[<>+*/%%]' % _dash,
Operator, '_expression'),
(r'(has|hasnt|in|notin|ofclass|or|provides)\b', Operator.Word,
'_expression'),
(r'sp\b', Name),
(r'\?~?', Name.Label, 'label?'),
(r'[@{]', Error),
default('#pop')
],
'_assembly-expression': [
(r'\(', Punctuation, ('#push', '_expression')),
(r'[\[\]]', Punctuation),
(r'[%s]>' % _dash, Punctuation, '_expression'),
(r'sp\b', Keyword.Pseudo),
(r';', Punctuation, '#pop:3'),
include('expression')
],
'_for-expression': [
(r'\)', Punctuation, '#pop:2'),
(r':', Punctuation, '#pop'),
include('expression')
],
'_keyword-expression': [
(r'(from|near|to)\b', Keyword, '_expression'),
include('expression')
],
'_list-expression': [
(r',', Punctuation, '#pop'),
include('expression')
],
'_object-expression': [
(r'has\b', Keyword.Declaration, '#pop'),
include('_list-expression')
],
# Values
'value': [
include('_whitespace'),
# Strings
(r'[%s][^@][%s]' % (_squote, _squote), String.Char, '#pop'),
(r'([%s])(@\{[0-9a-fA-F]*\})([%s])' % (_squote, _squote),
bygroups(String.Char, String.Escape, String.Char), '#pop'),
(r'([%s])(@.{2})([%s])' % (_squote, _squote),
bygroups(String.Char, String.Escape, String.Char), '#pop'),
(r'[%s]' % _squote, String.Single, ('#pop', 'dictionary-word')),
(r'[%s]' % _dquote, String.Double, ('#pop', 'string')),
# Numbers
(r'\$[+%s][0-9]*\.?[0-9]*([eE][+%s]?[0-9]+)?' % (_dash, _dash),
Number.Float, '#pop'),
(r'\$[0-9a-fA-F]+', Number.Hex, '#pop'),
(r'\$\$[01]+', Number.Bin, '#pop'),
(r'[0-9]+', Number.Integer, '#pop'),
# Values prefixed by hashes
(r'(##|#a\$)(%s)' % _name, bygroups(Operator, Name), '#pop'),
(r'(#g\$)(%s)' % _name,
bygroups(Operator, Name.Variable.Global), '#pop'),
(r'#[nw]\$', Operator, ('#pop', 'obsolete-dictionary-word')),
(r'(#r\$)(%s)' % _name, bygroups(Operator, Name.Function), '#pop'),
(r'#', Name.Builtin, ('#pop', 'system-constant')),
# System functions
(words((
'child', 'children', 'elder', 'eldest', 'glk', 'indirect', 'metaclass',
'parent', 'random', 'sibling', 'younger', 'youngest'), suffix=r'\b'),
Name.Builtin, '#pop'),
# Metaclasses
(r'(?i)(Class|Object|Routine|String)\b', Name.Builtin, '#pop'),
# Veneer routines
(words((
'Box__Routine', 'CA__Pr', 'CDefArt', 'CInDefArt', 'Cl__Ms',
'Copy__Primitive', 'CP__Tab', 'DA__Pr', 'DB__Pr', 'DefArt', 'Dynam__String',
'EnglishNumber', 'Glk__Wrap', 'IA__Pr', 'IB__Pr', 'InDefArt', 'Main__',
'Meta__class', 'OB__Move', 'OB__Remove', 'OC__Cl', 'OP__Pr', 'Print__Addr',
'Print__PName', 'PrintShortName', 'RA__Pr', 'RA__Sc', 'RL__Pr', 'R_Process',
'RT__ChG', 'RT__ChGt', 'RT__ChLDB', 'RT__ChLDW', 'RT__ChPR', 'RT__ChPrintA',
'RT__ChPrintC', 'RT__ChPrintO', 'RT__ChPrintS', 'RT__ChPS', 'RT__ChR',
'RT__ChSTB', 'RT__ChSTW', 'RT__ChT', 'RT__Err', 'RT__TrPS', 'RV__Pr',
'Symb__Tab', 'Unsigned__Compare', 'WV__Pr', 'Z__Region'),
prefix='(?i)', suffix=r'\b'),
Name.Builtin, '#pop'),
# Other built-in symbols
(words((
'call', 'copy', 'create', 'DEBUG', 'destroy', 'DICT_CHAR_SIZE',
'DICT_ENTRY_BYTES', 'DICT_IS_UNICODE', 'DICT_WORD_SIZE', 'false',
'FLOAT_INFINITY', 'FLOAT_NAN', 'FLOAT_NINFINITY', 'GOBJFIELD_CHAIN',
'GOBJFIELD_CHILD', 'GOBJFIELD_NAME', 'GOBJFIELD_PARENT',
'GOBJFIELD_PROPTAB', 'GOBJFIELD_SIBLING', 'GOBJ_EXT_START',
'GOBJ_TOTAL_LENGTH', 'Grammar__Version', 'INDIV_PROP_START', 'INFIX',
'infix__watching', 'MODULE_MODE', 'name', 'nothing', 'NUM_ATTR_BYTES', 'print',
'print_to_array', 'recreate', 'remaining', 'self', 'sender', 'STRICT_MODE',
'sw__var', 'sys__glob0', 'sys__glob1', 'sys__glob2', 'sys_statusline_flag',
'TARGET_GLULX', 'TARGET_ZCODE', 'temp__global2', 'temp__global3',
'temp__global4', 'temp_global', 'true', 'USE_MODULES', 'WORDSIZE'),
prefix='(?i)', suffix=r'\b'),
Name.Builtin, '#pop'),
# Other values
(_name, Name, '#pop')
],
# Strings
'dictionary-word': [
(r'[~^]+', String.Escape),
(r'[^~^\\@({%s]+' % _squote, String.Single),
(r'[({]', String.Single),
(r'@\{[0-9a-fA-F]*\}', String.Escape),
(r'@.{2}', String.Escape),
(r'[%s]' % _squote, String.Single, '#pop')
],
'string': [
(r'[~^]+', String.Escape),
(r'[^~^\\@({%s]+' % _dquote, String.Double),
(r'[({]', String.Double),
(r'\\', String.Escape),
(r'@(\\\s*[%s]\s*)*@((\\\s*[%s]\s*)*[0-9])*' %
(_newline, _newline), String.Escape),
(r'@(\\\s*[%s]\s*)*\{((\\\s*[%s]\s*)*[0-9a-fA-F])*'
r'(\\\s*[%s]\s*)*\}' % (_newline, _newline, _newline),
String.Escape),
(r'@(\\\s*[%s]\s*)*.(\\\s*[%s]\s*)*.' % (_newline, _newline),
String.Escape),
(r'[%s]' % _dquote, String.Double, '#pop')
],
'plain-string': [
(r'[^~^\\({\[\]%s]+' % _dquote, String.Double),
(r'[~^({\[\]]', String.Double),
(r'\\', String.Escape),
(r'[%s]' % _dquote, String.Double, '#pop')
],
# Names
'_constant': [
include('_whitespace'),
(_name, Name.Constant, '#pop'),
include('value')
],
'_global': [
include('_whitespace'),
(_name, Name.Variable.Global, '#pop'),
include('value')
],
'label?': [
include('_whitespace'),
(_name, Name.Label, '#pop'),
default('#pop')
],
'variable?': [
include('_whitespace'),
(_name, Name.Variable, '#pop'),
default('#pop')
],
# Values after hashes
'obsolete-dictionary-word': [
(r'\S\w*', String.Other, '#pop')
],
'system-constant': [
include('_whitespace'),
(_name, Name.Builtin, '#pop')
],
# Directives
'directive': [
include('_whitespace'),
(r'#', Punctuation),
(r';', Punctuation, '#pop'),
(r'\[', Punctuation,
('default', 'statements', 'locals', 'routine-name?')),
(words((
'abbreviate', 'endif', 'dictionary', 'ifdef', 'iffalse', 'ifndef', 'ifnot',
'iftrue', 'ifv3', 'ifv5', 'release', 'serial', 'switches', 'system_file',
'version'), prefix='(?i)', suffix=r'\b'),
Keyword, 'default'),
(r'(?i)(array|global)\b', Keyword,
('default', 'directive-keyword?', '_global')),
(r'(?i)attribute\b', Keyword, ('default', 'alias?', '_constant')),
(r'(?i)class\b', Keyword,
('object-body', 'duplicates', 'class-name')),
(r'(?i)(constant|default)\b', Keyword,
('default', 'expression', '_constant')),
(r'(?i)(end\b)(.*)', bygroups(Keyword, Text)),
(r'(?i)(extend|verb)\b', Keyword, 'grammar'),
(r'(?i)fake_action\b', Keyword, ('default', '_constant')),
(r'(?i)import\b', Keyword, 'manifest'),
(r'(?i)(include|link|origsource)\b', Keyword,
('default', 'before-plain-string?')),
(r'(?i)(lowstring|undef)\b', Keyword, ('default', '_constant')),
(r'(?i)message\b', Keyword, ('default', 'diagnostic')),
(r'(?i)(nearby|object)\b', Keyword,
('object-body', '_object-head')),
(r'(?i)property\b', Keyword,
('default', 'alias?', '_constant', 'property-keyword*')),
(r'(?i)replace\b', Keyword,
('default', 'routine-name?', 'routine-name?')),
(r'(?i)statusline\b', Keyword, ('default', 'directive-keyword?')),
(r'(?i)stub\b', Keyword, ('default', 'routine-name?')),
(r'(?i)trace\b', Keyword,
('default', 'trace-keyword?', 'trace-keyword?')),
(r'(?i)zcharacter\b', Keyword,
('default', 'directive-keyword?', 'directive-keyword?')),
(_name, Name.Class, ('object-body', '_object-head'))
],
# [, Replace, Stub
'routine-name?': [
include('_whitespace'),
(_name, Name.Function, '#pop'),
default('#pop')
],
'locals': [
include('_whitespace'),
(r';', Punctuation, '#pop'),
(r'\*', Punctuation),
(r'"', String.Double, 'plain-string'),
(_name, Name.Variable)
],
# Array
'many-values': [
include('_whitespace'),
(r';', Punctuation),
(r'\]', Punctuation, '#pop'),
(r':', Error),
default(('expression', '_expression'))
],
# Attribute, Property
'alias?': [
include('_whitespace'),
(r'alias\b', Keyword, ('#pop', '_constant')),
default('#pop')
],
# Class, Object, Nearby
'class-name': [
include('_whitespace'),
(r'(?=[,;]|(class|has|private|with)\b)', Text, '#pop'),
(_name, Name.Class, '#pop')
],
'duplicates': [
include('_whitespace'),
(r'\(', Punctuation, ('#pop', 'expression', '_expression')),
default('#pop')
],
'_object-head': [
(r'[%s]>' % _dash, Punctuation),
(r'(class|has|private|with)\b', Keyword.Declaration, '#pop'),
include('_global')
],
'object-body': [
include('_whitespace'),
(r';', Punctuation, '#pop:2'),
(r',', Punctuation),
(r'class\b', Keyword.Declaration, 'class-segment'),
(r'(has|private|with)\b', Keyword.Declaration),
(r':', Error),
default(('_object-expression', '_expression'))
],
'class-segment': [
include('_whitespace'),
(r'(?=[,;]|(class|has|private|with)\b)', Text, '#pop'),
(_name, Name.Class),
default('value')
],
# Extend, Verb
'grammar': [
include('_whitespace'),
(r'=', Punctuation, ('#pop', 'default')),
(r'\*', Punctuation, ('#pop', 'grammar-line')),
default('_directive-keyword')
],
'grammar-line': [
include('_whitespace'),
(r';', Punctuation, '#pop'),
(r'[/*]', Punctuation),
(r'[%s]>' % _dash, Punctuation, 'value'),
(r'(noun|scope)\b', Keyword, '=routine'),
default('_directive-keyword')
],
'=routine': [
include('_whitespace'),
(r'=', Punctuation, 'routine-name?'),
default('#pop')
],
# Import
'manifest': [
include('_whitespace'),
(r';', Punctuation, '#pop'),
(r',', Punctuation),
(r'(?i)global\b', Keyword, '_global'),
default('_global')
],
# Include, Link, Message
'diagnostic': [
include('_whitespace'),
(r'[%s]' % _dquote, String.Double, ('#pop', 'message-string')),
default(('#pop', 'before-plain-string?', 'directive-keyword?'))
],
'before-plain-string?': [
include('_whitespace'),
(r'[%s]' % _dquote, String.Double, ('#pop', 'plain-string')),
default('#pop')
],
'message-string': [
(r'[~^]+', String.Escape),
include('plain-string')
],
# Keywords used in directives
'_directive-keyword!': [
include('_whitespace'),
(words((
'additive', 'alias', 'buffer', 'class', 'creature', 'data', 'error', 'fatalerror',
'first', 'has', 'held', 'initial', 'initstr', 'last', 'long', 'meta', 'multi',
'multiexcept', 'multiheld', 'multiinside', 'noun', 'number', 'only', 'private',
'replace', 'reverse', 'scope', 'score', 'special', 'string', 'table', 'terminating',
'time', 'topic', 'warning', 'with'), suffix=r'\b'),
Keyword, '#pop'),
(r'static\b', Keyword),
(r'[%s]{1,2}>|[+=]' % _dash, Punctuation, '#pop')
],
'_directive-keyword': [
include('_directive-keyword!'),
include('value')
],
'directive-keyword?': [
include('_directive-keyword!'),
default('#pop')
],
'property-keyword*': [
include('_whitespace'),
(r'(additive|long)\b', Keyword),
default('#pop')
],
'trace-keyword?': [
include('_whitespace'),
(words((
'assembly', 'dictionary', 'expressions', 'lines', 'linker',
'objects', 'off', 'on', 'symbols', 'tokens', 'verbs'), suffix=r'\b'),
Keyword, '#pop'),
default('#pop')
],
# Statements
'statements': [
include('_whitespace'),
(r'\]', Punctuation, '#pop'),
(r'[;{}]', Punctuation),
(words((
'box', 'break', 'continue', 'default', 'give', 'inversion',
'new_line', 'quit', 'read', 'remove', 'return', 'rfalse', 'rtrue',
'spaces', 'string', 'until'), suffix=r'\b'),
Keyword, 'default'),
(r'(do|else)\b', Keyword),
(r'(font|style)\b', Keyword,
('default', 'miscellaneous-keyword?')),
(r'for\b', Keyword, ('for', '(?')),
(r'(if|switch|while)', Keyword,
('expression', '_expression', '(?')),
(r'(jump|save|restore)\b', Keyword, ('default', 'label?')),
(r'objectloop\b', Keyword,
('_keyword-expression', 'variable?', '(?')),
(r'print(_ret)?\b|(?=[%s])' % _dquote, Keyword, 'print-list'),
(r'\.', Name.Label, 'label?'),
(r'@', Keyword, 'opcode'),
(r'#(?![agrnw]\$|#)', Punctuation, 'directive'),
(r'<', Punctuation, 'default'),
(r'move\b', Keyword,
('default', '_keyword-expression', '_expression')),
default(('default', '_keyword-expression', '_expression'))
],
'miscellaneous-keyword?': [
include('_whitespace'),
(r'(bold|fixed|from|near|off|on|reverse|roman|to|underline)\b',
Keyword, '#pop'),
(r'(a|A|an|address|char|name|number|object|property|string|the|'
r'The)\b(?=(\s+|(![^%s]*))*\))' % _newline, Keyword.Pseudo,
'#pop'),
(r'%s(?=(\s+|(![^%s]*))*\))' % (_name, _newline), Name.Function,
'#pop'),
default('#pop')
],
'(?': [
include('_whitespace'),
(r'\(', Punctuation, '#pop'),
default('#pop')
],
'for': [
include('_whitespace'),
(r';', Punctuation, ('_for-expression', '_expression')),
default(('_for-expression', '_expression'))
],
'print-list': [
include('_whitespace'),
(r';', Punctuation, '#pop'),
(r':', Error),
default(('_list-expression', '_expression', '_list-expression', 'form'))
],
'form': [
include('_whitespace'),
(r'\(', Punctuation, ('#pop', 'miscellaneous-keyword?')),
default('#pop')
],
# Assembly
'opcode': [
include('_whitespace'),
(r'[%s]' % _dquote, String.Double, ('operands', 'plain-string')),
(_name, Keyword, 'operands')
],
'operands': [
(r':', Error),
default(('_assembly-expression', '_expression'))
]
}
def get_tokens_unprocessed(self, text):
# 'in' is either a keyword or an operator.
# If the token two tokens after 'in' is ')', 'in' is a keyword:
# objectloop(a in b)
# Otherwise, it is an operator:
# objectloop(a in b && true)
objectloop_queue = []
objectloop_token_count = -1
previous_token = None
for index, token, value in RegexLexer.get_tokens_unprocessed(self,
text):
if previous_token is Name.Variable and value == 'in':
objectloop_queue = [[index, token, value]]
objectloop_token_count = 2
elif objectloop_token_count > 0:
if token not in Comment and token not in Text:
objectloop_token_count -= 1
objectloop_queue.append((index, token, value))
else:
if objectloop_token_count == 0:
if objectloop_queue[-1][2] == ')':
objectloop_queue[0][1] = Keyword
while objectloop_queue:
yield objectloop_queue.pop(0)
objectloop_token_count = -1
yield index, token, value
if token not in Comment and token not in Text:
previous_token = token
while objectloop_queue:
yield objectloop_queue.pop(0)
def analyse_text(text):
"""We try to find a keyword which seem relatively common, unfortunately
there is a decent overlap with Smalltalk keywords otherwise here.."""
result = 0
if re.search('\borigsource\b', text, re.IGNORECASE):
result += 0.05
return result
class Inform7Lexer(RegexLexer):
"""
For `Inform 7 <http://inform7.com/>`_ source code.
.. versionadded:: 2.0
"""
name = 'Inform 7'
aliases = ['inform7', 'i7']
filenames = ['*.ni', '*.i7x']
flags = re.MULTILINE | re.DOTALL | re.UNICODE
_dash = Inform6Lexer._dash
_dquote = Inform6Lexer._dquote
_newline = Inform6Lexer._newline
_start = r'\A|(?<=[%s])' % _newline
# There are three variants of Inform 7, differing in how to
# interpret at signs and braces in I6T. In top-level inclusions, at
# signs in the first column are inweb syntax. In phrase definitions
# and use options, tokens in braces are treated as I7. Use options
# also interpret "{N}".
tokens = {}
token_variants = ['+i6t-not-inline', '+i6t-inline', '+i6t-use-option']
for level in token_variants:
tokens[level] = {
'+i6-root': list(Inform6Lexer.tokens['root']),
'+i6t-root': [ # For Inform6TemplateLexer
(r'[^%s]*' % Inform6Lexer._newline, Comment.Preproc,
('directive', '+p'))
],
'root': [
(r'(\|?\s)+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'[%s]' % _dquote, Generic.Heading,
('+main', '+titling', '+titling-string')),
default(('+main', '+heading?'))
],
'+titling-string': [
(r'[^%s]+' % _dquote, Generic.Heading),
(r'[%s]' % _dquote, Generic.Heading, '#pop')
],
'+titling': [
(r'\[', Comment.Multiline, '+comment'),
(r'[^%s.;:|%s]+' % (_dquote, _newline), Generic.Heading),
(r'[%s]' % _dquote, Generic.Heading, '+titling-string'),
(r'[%s]{2}|(?<=[\s%s])\|[\s%s]' % (_newline, _dquote, _dquote),
Text, ('#pop', '+heading?')),
(r'[.;:]|(?<=[\s%s])\|' % _dquote, Text, '#pop'),
(r'[|%s]' % _newline, Generic.Heading)
],
'+main': [
(r'(?i)[^%s:a\[(|%s]+' % (_dquote, _newline), Text),
(r'[%s]' % _dquote, String.Double, '+text'),
(r':', Text, '+phrase-definition'),
(r'(?i)\bas\b', Text, '+use-option'),
(r'\[', Comment.Multiline, '+comment'),
(r'(\([%s])(.*?)([%s]\))' % (_dash, _dash),
bygroups(Punctuation,
using(this, state=('+i6-root', 'directive'),
i6t='+i6t-not-inline'), Punctuation)),
(r'(%s|(?<=[\s;:.%s]))\|\s|[%s]{2,}' %
(_start, _dquote, _newline), Text, '+heading?'),
(r'(?i)[a(|%s]' % _newline, Text)
],
'+phrase-definition': [
(r'\s+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'(\([%s])(.*?)([%s]\))' % (_dash, _dash),
bygroups(Punctuation,
using(this, state=('+i6-root', 'directive',
'default', 'statements'),
i6t='+i6t-inline'), Punctuation), '#pop'),
default('#pop')
],
'+use-option': [
(r'\s+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'(\([%s])(.*?)([%s]\))' % (_dash, _dash),
bygroups(Punctuation,
using(this, state=('+i6-root', 'directive'),
i6t='+i6t-use-option'), Punctuation), '#pop'),
default('#pop')
],
'+comment': [
(r'[^\[\]]+', Comment.Multiline),
(r'\[', Comment.Multiline, '#push'),
(r'\]', Comment.Multiline, '#pop')
],
'+text': [
(r'[^\[%s]+' % _dquote, String.Double),
(r'\[.*?\]', String.Interpol),
(r'[%s]' % _dquote, String.Double, '#pop')
],
'+heading?': [
(r'(\|?\s)+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'[%s]{4}\s+' % _dash, Text, '+documentation-heading'),
(r'[%s]{1,3}' % _dash, Text),
(r'(?i)(volume|book|part|chapter|section)\b[^%s]*' % _newline,
Generic.Heading, '#pop'),
default('#pop')
],
'+documentation-heading': [
(r'\s+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'(?i)documentation\s+', Text, '+documentation-heading2'),
default('#pop')
],
'+documentation-heading2': [
(r'\s+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'[%s]{4}\s' % _dash, Text, '+documentation'),
default('#pop:2')
],
'+documentation': [
(r'(?i)(%s)\s*(chapter|example)\s*:[^%s]*' %
(_start, _newline), Generic.Heading),
(r'(?i)(%s)\s*section\s*:[^%s]*' % (_start, _newline),
Generic.Subheading),
(r'((%s)\t.*?[%s])+' % (_start, _newline),
using(this, state='+main')),
(r'[^%s\[]+|[%s\[]' % (_newline, _newline), Text),
(r'\[', Comment.Multiline, '+comment'),
],
'+i6t-not-inline': [
(r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline),
Comment.Preproc),
(r'(%s)@([%s]+|Purpose:)[^%s]*' % (_start, _dash, _newline),
Comment.Preproc),
(r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline),
Generic.Heading, '+p')
],
'+i6t-use-option': [
include('+i6t-not-inline'),
(r'(\{)(N)(\})', bygroups(Punctuation, Text, Punctuation))
],
'+i6t-inline': [
(r'(\{)(\S[^}]*)?(\})',
bygroups(Punctuation, using(this, state='+main'),
Punctuation))
],
'+i6t': [
(r'(\{[%s])(![^}]*)(\}?)' % _dash,
bygroups(Punctuation, Comment.Single, Punctuation)),
(r'(\{[%s])(lines)(:)([^}]*)(\}?)' % _dash,
bygroups(Punctuation, Keyword, Punctuation, Text,
Punctuation), '+lines'),
(r'(\{[%s])([^:}]*)(:?)([^}]*)(\}?)' % _dash,
bygroups(Punctuation, Keyword, Punctuation, Text,
Punctuation)),
(r'(\(\+)(.*?)(\+\)|\Z)',
bygroups(Punctuation, using(this, state='+main'),
Punctuation))
],
'+p': [
(r'[^@]+', Comment.Preproc),
(r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline),
Comment.Preproc, '#pop'),
(r'(%s)@([%s]|Purpose:)' % (_start, _dash), Comment.Preproc),
(r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline),
Generic.Heading),
(r'@', Comment.Preproc)
],
'+lines': [
(r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline),
Comment.Preproc),
(r'(%s)@([%s]|Purpose:)[^%s]*' % (_start, _dash, _newline),
Comment.Preproc),
(r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline),
Generic.Heading, '+p'),
(r'(%s)@\w*[ %s]' % (_start, _newline), Keyword),
(r'![^%s]*' % _newline, Comment.Single),
(r'(\{)([%s]endlines)(\})' % _dash,
bygroups(Punctuation, Keyword, Punctuation), '#pop'),
(r'[^@!{]+?([%s]|\Z)|.' % _newline, Text)
]
}
# Inform 7 can include snippets of Inform 6 template language,
# so all of Inform6Lexer's states are copied here, with
# modifications to account for template syntax. Inform7Lexer's
# own states begin with '+' to avoid name conflicts. Some of
# Inform6Lexer's states begin with '_': these are not modified.
# They deal with template syntax either by including modified
# states, or by matching r'' then pushing to modified states.
for token in Inform6Lexer.tokens:
if token == 'root':
continue
tokens[level][token] = list(Inform6Lexer.tokens[token])
if not token.startswith('_'):
tokens[level][token][:0] = [include('+i6t'), include(level)]
def __init__(self, **options):
level = options.get('i6t', '+i6t-not-inline')
if level not in self._all_tokens:
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
class Inform6TemplateLexer(Inform7Lexer):
"""
For `Inform 6 template
<http://inform7.com/sources/src/i6template/Woven/index.html>`_ code.
.. versionadded:: 2.0
"""
name = 'Inform 6 template'
aliases = ['i6t']
filenames = ['*.i6t']
def get_tokens_unprocessed(self, text, stack=('+i6t-root',)):
return Inform7Lexer.get_tokens_unprocessed(self, text, stack)
class Tads3Lexer(RegexLexer):
"""
For `TADS 3 <http://www.tads.org/>`_ source code.
"""
name = 'TADS 3'
aliases = ['tads3']
filenames = ['*.t']
flags = re.DOTALL | re.MULTILINE
_comment_single = r'(?://(?:[^\\\n]|\\+[\w\W])*$)'
_comment_multiline = r'(?:/\*(?:[^*]|\*(?!/))*\*/)'
_escape = (r'(?:\\(?:[\n\\<>"\'^v bnrt]|u[\da-fA-F]{,4}|x[\da-fA-F]{,2}|'
r'[0-3]?[0-7]{1,2}))')
_name = r'(?:[_a-zA-Z]\w*)'
_no_quote = r'(?=\s|\\?>)'
_operator = (r'(?:&&|\|\||\+\+|--|\?\?|::|[.,@\[\]~]|'
r'(?:[=+\-*/%!&|^]|<<?|>>?>?)=?)')
_ws = r'(?:\\|\s|%s|%s)' % (_comment_single, _comment_multiline)
_ws_pp = r'(?:\\\n|[^\S\n]|%s|%s)' % (_comment_single, _comment_multiline)
def _make_string_state(triple, double, verbatim=None, _escape=_escape):
if verbatim:
verbatim = ''.join(['(?:%s|%s)' % (re.escape(c.lower()),
re.escape(c.upper()))
for c in verbatim])
char = r'"' if double else r"'"
token = String.Double if double else String.Single
escaped_quotes = r'+|%s(?!%s{2})' % (char, char) if triple else r''
prefix = '%s%s' % ('t' if triple else '', 'd' if double else 's')
tag_state_name = '%sqt' % prefix
state = []
if triple:
state += [
(r'%s{3,}' % char, token, '#pop'),
(r'\\%s+' % char, String.Escape),
(char, token)
]
else:
state.append((char, token, '#pop'))
state += [
include('s/verbatim'),
(r'[^\\<&{}%s]+' % char, token)
]
if verbatim:
# This regex can't use `(?i)` because escape sequences are
# case-sensitive. `<\XMP>` works; `<\xmp>` doesn't.
state.append((r'\\?<(/|\\\\|(?!%s)\\)%s(?=[\s=>])' %
(_escape, verbatim),
Name.Tag, ('#pop', '%sqs' % prefix, tag_state_name)))
else:
state += [
(r'\\?<!([^><\\%s]|<(?!<)|\\%s%s|%s|\\.)*>?' %
(char, char, escaped_quotes, _escape), Comment.Multiline),
(r'(?i)\\?<listing(?=[\s=>]|\\>)', Name.Tag,
('#pop', '%sqs/listing' % prefix, tag_state_name)),
(r'(?i)\\?<xmp(?=[\s=>]|\\>)', Name.Tag,
('#pop', '%sqs/xmp' % prefix, tag_state_name)),
(r'\\?<([^\s=><\\%s]|<(?!<)|\\%s%s|%s|\\.)*' %
(char, char, escaped_quotes, _escape), Name.Tag,
tag_state_name),
include('s/entity')
]
state += [
include('s/escape'),
(r'\{([^}<\\%s]|<(?!<)|\\%s%s|%s|\\.)*\}' %
(char, char, escaped_quotes, _escape), String.Interpol),
(r'[\\&{}<]', token)
]
return state
def _make_tag_state(triple, double, _escape=_escape):
char = r'"' if double else r"'"
quantifier = r'{3,}' if triple else r''
state_name = '%s%sqt' % ('t' if triple else '', 'd' if double else 's')
token = String.Double if double else String.Single
escaped_quotes = r'+|%s(?!%s{2})' % (char, char) if triple else r''
return [
(r'%s%s' % (char, quantifier), token, '#pop:2'),
(r'(\s|\\\n)+', Text),
(r'(=)(\\?")', bygroups(Punctuation, String.Double),
'dqs/%s' % state_name),
(r"(=)(\\?')", bygroups(Punctuation, String.Single),
'sqs/%s' % state_name),
(r'=', Punctuation, 'uqs/%s' % state_name),
(r'\\?>', Name.Tag, '#pop'),
(r'\{([^}<\\%s]|<(?!<)|\\%s%s|%s|\\.)*\}' %
(char, char, escaped_quotes, _escape), String.Interpol),
(r'([^\s=><\\%s]|<(?!<)|\\%s%s|%s|\\.)+' %
(char, char, escaped_quotes, _escape), Name.Attribute),
include('s/escape'),
include('s/verbatim'),
include('s/entity'),
(r'[\\{}&]', Name.Attribute)
]
def _make_attribute_value_state(terminator, host_triple, host_double,
_escape=_escape):
token = (String.Double if terminator == r'"' else
String.Single if terminator == r"'" else String.Other)
host_char = r'"' if host_double else r"'"
host_quantifier = r'{3,}' if host_triple else r''
host_token = String.Double if host_double else String.Single
escaped_quotes = (r'+|%s(?!%s{2})' % (host_char, host_char)
if host_triple else r'')
return [
(r'%s%s' % (host_char, host_quantifier), host_token, '#pop:3'),
(r'%s%s' % (r'' if token is String.Other else r'\\?', terminator),
token, '#pop'),
include('s/verbatim'),
include('s/entity'),
(r'\{([^}<\\%s]|<(?!<)|\\%s%s|%s|\\.)*\}' %
(host_char, host_char, escaped_quotes, _escape), String.Interpol),
(r'([^\s"\'<%s{}\\&])+' % (r'>' if token is String.Other else r''),
token),
include('s/escape'),
(r'["\'\s&{<}\\]', token)
]
tokens = {
'root': [
('\ufeff', Text),
(r'\{', Punctuation, 'object-body'),
(r';+', Punctuation),
(r'(?=(argcount|break|case|catch|continue|default|definingobj|'
r'delegated|do|else|for|foreach|finally|goto|if|inherited|'
r'invokee|local|nil|new|operator|replaced|return|self|switch|'
r'targetobj|targetprop|throw|true|try|while)\b)', Text, 'block'),
(r'(%s)(%s*)(\()' % (_name, _ws),
bygroups(Name.Function, using(this, state='whitespace'),
Punctuation),
('block?/root', 'more/parameters', 'main/parameters')),
include('whitespace'),
(r'\++', Punctuation),
(r'[^\s!"%-(*->@-_a-z{-~]+', Error), # Averts an infinite loop
(r'(?!\Z)', Text, 'main/root')
],
'main/root': [
include('main/basic'),
default(('#pop', 'object-body/no-braces', 'classes', 'class'))
],
'object-body/no-braces': [
(r';', Punctuation, '#pop'),
(r'\{', Punctuation, ('#pop', 'object-body')),
include('object-body')
],
'object-body': [
(r';', Punctuation),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
(r':', Punctuation, ('classes', 'class')),
(r'(%s?)(%s*)(\()' % (_name, _ws),
bygroups(Name.Function, using(this, state='whitespace'),
Punctuation),
('block?', 'more/parameters', 'main/parameters')),
(r'(%s)(%s*)(\{)' % (_name, _ws),
bygroups(Name.Function, using(this, state='whitespace'),
Punctuation), 'block'),
(r'(%s)(%s*)(:)' % (_name, _ws),
bygroups(Name.Variable, using(this, state='whitespace'),
Punctuation),
('object-body/no-braces', 'classes', 'class')),
include('whitespace'),
(r'->|%s' % _operator, Punctuation, 'main'),
default('main/object-body')
],
'main/object-body': [
include('main/basic'),
(r'(%s)(%s*)(=?)' % (_name, _ws),
bygroups(Name.Variable, using(this, state='whitespace'),
Punctuation), ('#pop', 'more', 'main')),
default('#pop:2')
],
'block?/root': [
(r'\{', Punctuation, ('#pop', 'block')),
include('whitespace'),
(r'(?=[\[\'"<(:])', Text, # It might be a VerbRule macro.
('#pop', 'object-body/no-braces', 'grammar', 'grammar-rules')),
# It might be a macro like DefineAction.
default(('#pop', 'object-body/no-braces'))
],
'block?': [
(r'\{', Punctuation, ('#pop', 'block')),
include('whitespace'),
default('#pop')
],
'block/basic': [
(r'[;:]+', Punctuation),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
(r'default\b', Keyword.Reserved),
(r'(%s)(%s*)(:)' % (_name, _ws),
bygroups(Name.Label, using(this, state='whitespace'),
Punctuation)),
include('whitespace')
],
'block': [
include('block/basic'),
(r'(?!\Z)', Text, ('more', 'main'))
],
'block/embed': [
(r'>>', String.Interpol, '#pop'),
include('block/basic'),
(r'(?!\Z)', Text, ('more/embed', 'main'))
],
'main/basic': [
include('whitespace'),
(r'\(', Punctuation, ('#pop', 'more', 'main')),
(r'\[', Punctuation, ('#pop', 'more/list', 'main')),
(r'\{', Punctuation, ('#pop', 'more/inner', 'main/inner',
'more/parameters', 'main/parameters')),
(r'\*|\.{3}', Punctuation, '#pop'),
(r'(?i)0x[\da-f]+', Number.Hex, '#pop'),
(r'(\d+\.(?!\.)\d*|\.\d+)([eE][-+]?\d+)?|\d+[eE][-+]?\d+',
Number.Float, '#pop'),
(r'0[0-7]+', Number.Oct, '#pop'),
(r'\d+', Number.Integer, '#pop'),
(r'"""', String.Double, ('#pop', 'tdqs')),
(r"'''", String.Single, ('#pop', 'tsqs')),
(r'"', String.Double, ('#pop', 'dqs')),
(r"'", String.Single, ('#pop', 'sqs')),
(r'R"""', String.Regex, ('#pop', 'tdqr')),
(r"R'''", String.Regex, ('#pop', 'tsqr')),
(r'R"', String.Regex, ('#pop', 'dqr')),
(r"R'", String.Regex, ('#pop', 'sqr')),
# Two-token keywords
(r'(extern)(%s+)(object\b)' % _ws,
bygroups(Keyword.Reserved, using(this, state='whitespace'),
Keyword.Reserved)),
(r'(function|method)(%s*)(\()' % _ws,
bygroups(Keyword.Reserved, using(this, state='whitespace'),
Punctuation),
('#pop', 'block?', 'more/parameters', 'main/parameters')),
(r'(modify)(%s+)(grammar\b)' % _ws,
bygroups(Keyword.Reserved, using(this, state='whitespace'),
Keyword.Reserved),
('#pop', 'object-body/no-braces', ':', 'grammar')),
(r'(new)(%s+(?=(?:function|method)\b))' % _ws,
bygroups(Keyword.Reserved, using(this, state='whitespace'))),
(r'(object)(%s+)(template\b)' % _ws,
bygroups(Keyword.Reserved, using(this, state='whitespace'),
Keyword.Reserved), ('#pop', 'template')),
(r'(string)(%s+)(template\b)' % _ws,
bygroups(Keyword, using(this, state='whitespace'),
Keyword.Reserved), ('#pop', 'function-name')),
# Keywords
(r'(argcount|definingobj|invokee|replaced|targetobj|targetprop)\b',
Name.Builtin, '#pop'),
(r'(break|continue|goto)\b', Keyword.Reserved, ('#pop', 'label')),
(r'(case|extern|if|intrinsic|return|static|while)\b',
Keyword.Reserved),
(r'catch\b', Keyword.Reserved, ('#pop', 'catch')),
(r'class\b', Keyword.Reserved,
('#pop', 'object-body/no-braces', 'class')),
(r'(default|do|else|finally|try)\b', Keyword.Reserved, '#pop'),
(r'(dictionary|property)\b', Keyword.Reserved,
('#pop', 'constants')),
(r'enum\b', Keyword.Reserved, ('#pop', 'enum')),
(r'export\b', Keyword.Reserved, ('#pop', 'main')),
(r'(for|foreach)\b', Keyword.Reserved,
('#pop', 'more/inner', 'main/inner')),
(r'(function|method)\b', Keyword.Reserved,
('#pop', 'block?', 'function-name')),
(r'grammar\b', Keyword.Reserved,
('#pop', 'object-body/no-braces', 'grammar')),
(r'inherited\b', Keyword.Reserved, ('#pop', 'inherited')),
(r'local\b', Keyword.Reserved,
('#pop', 'more/local', 'main/local')),
(r'(modify|replace|switch|throw|transient)\b', Keyword.Reserved,
'#pop'),
(r'new\b', Keyword.Reserved, ('#pop', 'class')),
(r'(nil|true)\b', Keyword.Constant, '#pop'),
(r'object\b', Keyword.Reserved, ('#pop', 'object-body/no-braces')),
(r'operator\b', Keyword.Reserved, ('#pop', 'operator')),
(r'propertyset\b', Keyword.Reserved,
('#pop', 'propertyset', 'main')),
(r'self\b', Name.Builtin.Pseudo, '#pop'),
(r'template\b', Keyword.Reserved, ('#pop', 'template')),
# Operators
(r'(__objref|defined)(%s*)(\()' % _ws,
bygroups(Operator.Word, using(this, state='whitespace'),
Operator), ('#pop', 'more/__objref', 'main')),
(r'delegated\b', Operator.Word),
# Compiler-defined macros and built-in properties
(r'(__DATE__|__DEBUG|__LINE__|__FILE__|'
r'__TADS_MACRO_FORMAT_VERSION|__TADS_SYS_\w*|__TADS_SYSTEM_NAME|'
r'__TADS_VERSION_MAJOR|__TADS_VERSION_MINOR|__TADS3|__TIME__|'
r'construct|finalize|grammarInfo|grammarTag|lexicalParent|'
r'miscVocab|sourceTextGroup|sourceTextGroupName|'
r'sourceTextGroupOrder|sourceTextOrder)\b', Name.Builtin, '#pop')
],
'main': [
include('main/basic'),
(_name, Name, '#pop'),
default('#pop')
],
'more/basic': [
(r'\(', Punctuation, ('more/list', 'main')),
(r'\[', Punctuation, ('more', 'main')),
(r'\.{3}', Punctuation),
(r'->|\.\.', Punctuation, 'main'),
(r'(?=;)|[:)\]]', Punctuation, '#pop'),
include('whitespace'),
(_operator, Operator, 'main'),
(r'\?', Operator, ('main', 'more/conditional', 'main')),
(r'(is|not)(%s+)(in\b)' % _ws,
bygroups(Operator.Word, using(this, state='whitespace'),
Operator.Word)),
(r'[^\s!"%-_a-z{-~]+', Error) # Averts an infinite loop
],
'more': [
include('more/basic'),
default('#pop')
],
# Then expression (conditional operator)
'more/conditional': [
(r':(?!:)', Operator, '#pop'),
include('more')
],
# Embedded expressions
'more/embed': [
(r'>>', String.Interpol, '#pop:2'),
include('more')
],
# For/foreach loop initializer or short-form anonymous function
'main/inner': [
(r'\(', Punctuation, ('#pop', 'more/inner', 'main/inner')),
(r'local\b', Keyword.Reserved, ('#pop', 'main/local')),
include('main')
],
'more/inner': [
(r'\}', Punctuation, '#pop'),
(r',', Punctuation, 'main/inner'),
(r'(in|step)\b', Keyword, 'main/inner'),
include('more')
],
# Local
'main/local': [
(_name, Name.Variable, '#pop'),
include('whitespace')
],
'more/local': [
(r',', Punctuation, 'main/local'),
include('more')
],
# List
'more/list': [
(r'[,:]', Punctuation, 'main'),
include('more')
],
# Parameter list
'main/parameters': [
(r'(%s)(%s*)(?=:)' % (_name, _ws),
bygroups(Name.Variable, using(this, state='whitespace')), '#pop'),
(r'(%s)(%s+)(%s)' % (_name, _ws, _name),
bygroups(Name.Class, using(this, state='whitespace'),
Name.Variable), '#pop'),
(r'\[+', Punctuation),
include('main/basic'),
(_name, Name.Variable, '#pop'),
default('#pop')
],
'more/parameters': [
(r'(:)(%s*(?=[?=,:)]))' % _ws,
bygroups(Punctuation, using(this, state='whitespace'))),
(r'[?\]]+', Punctuation),
(r'[:)]', Punctuation, ('#pop', 'multimethod?')),
(r',', Punctuation, 'main/parameters'),
(r'=', Punctuation, ('more/parameter', 'main')),
include('more')
],
'more/parameter': [
(r'(?=[,)])', Text, '#pop'),
include('more')
],
'multimethod?': [
(r'multimethod\b', Keyword, '#pop'),
include('whitespace'),
default('#pop')
],
# Statements and expressions
'more/__objref': [
(r',', Punctuation, 'mode'),
(r'\)', Operator, '#pop'),
include('more')
],
'mode': [
(r'(error|warn)\b', Keyword, '#pop'),
include('whitespace')
],
'catch': [
(r'\(+', Punctuation),
(_name, Name.Exception, ('#pop', 'variables')),
include('whitespace')
],
'enum': [
include('whitespace'),
(r'token\b', Keyword, ('#pop', 'constants')),
default(('#pop', 'constants'))
],
'grammar': [
(r'\)+', Punctuation),
(r'\(', Punctuation, 'grammar-tag'),
(r':', Punctuation, 'grammar-rules'),
(_name, Name.Class),
include('whitespace')
],
'grammar-tag': [
include('whitespace'),
(r'"""([^\\"<]|""?(?!")|\\"+|\\.|<(?!<))+("{3,}|<<)|'
r'R"""([^\\"]|""?(?!")|\\"+|\\.)+"{3,}|'
r"'''([^\\'<]|''?(?!')|\\'+|\\.|<(?!<))+('{3,}|<<)|"
r"R'''([^\\']|''?(?!')|\\'+|\\.)+'{3,}|"
r'"([^\\"<]|\\.|<(?!<))+("|<<)|R"([^\\"]|\\.)+"|'
r"'([^\\'<]|\\.|<(?!<))+('|<<)|R'([^\\']|\\.)+'|"
r"([^)\s\\/]|/(?![/*]))+|\)", String.Other, '#pop')
],
'grammar-rules': [
include('string'),
include('whitespace'),
(r'(\[)(%s*)(badness)' % _ws,
bygroups(Punctuation, using(this, state='whitespace'), Keyword),
'main'),
(r'->|%s|[()]' % _operator, Punctuation),
(_name, Name.Constant),
default('#pop:2')
],
':': [
(r':', Punctuation, '#pop')
],
'function-name': [
(r'(<<([^>]|>>>|>(?!>))*>>)+', String.Interpol),
(r'(?=%s?%s*[({])' % (_name, _ws), Text, '#pop'),
(_name, Name.Function, '#pop'),
include('whitespace')
],
'inherited': [
(r'<', Punctuation, ('#pop', 'classes', 'class')),
include('whitespace'),
(_name, Name.Class, '#pop'),
default('#pop')
],
'operator': [
(r'negate\b', Operator.Word, '#pop'),
include('whitespace'),
(_operator, Operator),
default('#pop')
],
'propertyset': [
(r'\(', Punctuation, ('more/parameters', 'main/parameters')),
(r'\{', Punctuation, ('#pop', 'object-body')),
include('whitespace')
],
'template': [
(r'(?=;)', Text, '#pop'),
include('string'),
(r'inherited\b', Keyword.Reserved),
include('whitespace'),
(r'->|\?|%s' % _operator, Punctuation),
(_name, Name.Variable)
],
# Identifiers
'class': [
(r'\*|\.{3}', Punctuation, '#pop'),
(r'object\b', Keyword.Reserved, '#pop'),
(r'transient\b', Keyword.Reserved),
(_name, Name.Class, '#pop'),
include('whitespace'),
default('#pop')
],
'classes': [
(r'[:,]', Punctuation, 'class'),
include('whitespace'),
(r'>', Punctuation, '#pop'),
default('#pop')
],
'constants': [
(r',+', Punctuation),
(r';', Punctuation, '#pop'),
(r'property\b', Keyword.Reserved),
(_name, Name.Constant),
include('whitespace')
],
'label': [
(_name, Name.Label, '#pop'),
include('whitespace'),
default('#pop')
],
'variables': [
(r',+', Punctuation),
(r'\)', Punctuation, '#pop'),
include('whitespace'),
(_name, Name.Variable)
],
# Whitespace and comments
'whitespace': [
(r'^%s*#(%s|[^\n]|(?<=\\)\n)*\n?' % (_ws_pp, _comment_multiline),
Comment.Preproc),
(_comment_single, Comment.Single),
(_comment_multiline, Comment.Multiline),
(r'\\+\n+%s*#?|\n+|([^\S\n]|\\)+' % _ws_pp, Text)
],
# Strings
'string': [
(r'"""', String.Double, 'tdqs'),
(r"'''", String.Single, 'tsqs'),
(r'"', String.Double, 'dqs'),
(r"'", String.Single, 'sqs')
],
's/escape': [
(r'\{\{|\}\}|%s' % _escape, String.Escape)
],
's/verbatim': [
(r'<<\s*(as\s+decreasingly\s+likely\s+outcomes|cycling|else|end|'
r'first\s+time|one\s+of|only|or|otherwise|'
r'(sticky|(then\s+)?(purely\s+)?at)\s+random|stopping|'
r'(then\s+)?(half\s+)?shuffled|\|\|)\s*>>', String.Interpol),
(r'<<(%%(_(%s|\\?.)|[\-+ ,#]|\[\d*\]?)*\d*\.?\d*(%s|\\?.)|'
r'\s*((else|otherwise)\s+)?(if|unless)\b)?' % (_escape, _escape),
String.Interpol, ('block/embed', 'more/embed', 'main'))
],
's/entity': [
(r'(?i)&(#(x[\da-f]+|\d+)|[a-z][\da-z]*);?', Name.Entity)
],
'tdqs': _make_string_state(True, True),
'tsqs': _make_string_state(True, False),
'dqs': _make_string_state(False, True),
'sqs': _make_string_state(False, False),
'tdqs/listing': _make_string_state(True, True, 'listing'),
'tsqs/listing': _make_string_state(True, False, 'listing'),
'dqs/listing': _make_string_state(False, True, 'listing'),
'sqs/listing': _make_string_state(False, False, 'listing'),
'tdqs/xmp': _make_string_state(True, True, 'xmp'),
'tsqs/xmp': _make_string_state(True, False, 'xmp'),
'dqs/xmp': _make_string_state(False, True, 'xmp'),
'sqs/xmp': _make_string_state(False, False, 'xmp'),
# Tags
'tdqt': _make_tag_state(True, True),
'tsqt': _make_tag_state(True, False),
'dqt': _make_tag_state(False, True),
'sqt': _make_tag_state(False, False),
'dqs/tdqt': _make_attribute_value_state(r'"', True, True),
'dqs/tsqt': _make_attribute_value_state(r'"', True, False),
'dqs/dqt': _make_attribute_value_state(r'"', False, True),
'dqs/sqt': _make_attribute_value_state(r'"', False, False),
'sqs/tdqt': _make_attribute_value_state(r"'", True, True),
'sqs/tsqt': _make_attribute_value_state(r"'", True, False),
'sqs/dqt': _make_attribute_value_state(r"'", False, True),
'sqs/sqt': _make_attribute_value_state(r"'", False, False),
'uqs/tdqt': _make_attribute_value_state(_no_quote, True, True),
'uqs/tsqt': _make_attribute_value_state(_no_quote, True, False),
'uqs/dqt': _make_attribute_value_state(_no_quote, False, True),
'uqs/sqt': _make_attribute_value_state(_no_quote, False, False),
# Regular expressions
'tdqr': [
(r'[^\\"]+', String.Regex),
(r'\\"*', String.Regex),
(r'"{3,}', String.Regex, '#pop'),
(r'"', String.Regex)
],
'tsqr': [
(r"[^\\']+", String.Regex),
(r"\\'*", String.Regex),
(r"'{3,}", String.Regex, '#pop'),
(r"'", String.Regex)
],
'dqr': [
(r'[^\\"]+', String.Regex),
(r'\\"?', String.Regex),
(r'"', String.Regex, '#pop')
],
'sqr': [
(r"[^\\']+", String.Regex),
(r"\\'?", String.Regex),
(r"'", String.Regex, '#pop')
]
}
def get_tokens_unprocessed(self, text, **kwargs):
pp = r'^%s*#%s*' % (self._ws_pp, self._ws_pp)
if_false_level = 0
for index, token, value in (
RegexLexer.get_tokens_unprocessed(self, text, **kwargs)):
if if_false_level == 0: # Not in a false #if
if (token is Comment.Preproc and
re.match(r'%sif%s+(0|nil)%s*$\n?' %
(pp, self._ws_pp, self._ws_pp), value)):
if_false_level = 1
else: # In a false #if
if token is Comment.Preproc:
if (if_false_level == 1 and
re.match(r'%sel(if|se)\b' % pp, value)):
if_false_level = 0
elif re.match(r'%sif' % pp, value):
if_false_level += 1
elif re.match(r'%sendif\b' % pp, value):
if_false_level -= 1
else:
token = Comment
yield index, token, value
def analyse_text(text):
"""This is a rather generic descriptive language without strong
identifiers. It looks like a 'GameMainDef' has to be present,
and/or a 'versionInfo' with an 'IFID' field."""
result = 0
if '__TADS' in text or 'GameMainDef' in text:
result += 0.2
# This is a fairly unique keyword which is likely used in source as well
if 'versionInfo' in text and 'IFID' in text:
result += 0.1
return result
```
#### File: requests_toolbelt/adapters/host_header_ssl.py
```python
from requests.adapters import HTTPAdapter
class HostHeaderSSLAdapter(HTTPAdapter):
"""
A HTTPS Adapter for Python Requests that sets the hostname for certificate
verification based on the Host header.
This allows requesting the IP address directly via HTTPS without getting
a "hostname doesn't match" exception.
Example usage:
>>> s.mount('https://', HostHeaderSSLAdapter())
>>> s.get("https://172.16.31.10", headers={"Host": "example.org"})
"""
def send(self, request, **kwargs):
# HTTP headers are case-insensitive (RFC 7230)
host_header = None
for header in request.headers:
if header.lower() == "host":
host_header = request.headers[header]
break
connection_pool_kwargs = self.poolmanager.connection_pool_kw
if host_header:
connection_pool_kwargs["assert_hostname"] = host_header
elif "assert_hostname" in connection_pool_kwargs:
# an assert_hostname from a previous request may have been left
connection_pool_kwargs.pop("assert_hostname", None)
return super(HostHeaderSSLAdapter, self).send(request, **kwargs)
```
#### File: requests_toolbelt/auth/http_proxy_digest.py
```python
import re
from requests import cookies, utils
from . import _digest_auth_compat as auth
class HTTPProxyDigestAuth(auth.HTTPDigestAuth):
"""HTTP digest authentication between proxy
:param stale_rejects: The number of rejects indicate that:
the client may wish to simply retry the request
with a new encrypted response, without reprompting the user for a
new username and password. i.e., retry build_digest_header
:type stale_rejects: int
"""
_pat = re.compile(r'digest ', flags=re.IGNORECASE)
def __init__(self, *args, **kwargs):
super(HTTPProxyDigestAuth, self).__init__(*args, **kwargs)
self.stale_rejects = 0
self.init_per_thread_state()
@property
def stale_rejects(self):
thread_local = getattr(self, '_thread_local', None)
if thread_local is None:
return self._stale_rejects
return thread_local.stale_rejects
@stale_rejects.setter
def stale_rejects(self, value):
thread_local = getattr(self, '_thread_local', None)
if thread_local is None:
self._stale_rejects = value
else:
thread_local.stale_rejects = value
def init_per_thread_state(self):
try:
super(HTTPProxyDigestAuth, self).init_per_thread_state()
except AttributeError:
# If we're not on requests 2.8.0+ this method does not exist
pass
def handle_407(self, r, **kwargs):
"""Handle HTTP 407 only once, otherwise give up
:param r: current response
:returns: responses, along with the new response
"""
if r.status_code == 407 and self.stale_rejects < 2:
s_auth = r.headers.get("proxy-authenticate")
if s_auth is None:
raise IOError(
"proxy server violated RFC 7235:"
"407 response MUST contain header proxy-authenticate")
elif not self._pat.match(s_auth):
return r
self.chal = utils.parse_dict_header(
self._pat.sub('', s_auth, count=1))
# if we present the user/passwd and still get rejected
# http://tools.ietf.org/html/rfc2617#section-3.2.1
if ('Proxy-Authorization' in r.request.headers and
'stale' in self.chal):
if self.chal['stale'].lower() == 'true': # try again
self.stale_rejects += 1
# wrong user/passwd
elif self.chal['stale'].lower() == 'false':
raise IOError("User or password is invalid")
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.close()
prep = r.request.copy()
cookies.extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Proxy-Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
else: # give up authenticate
return r
def __call__(self, r):
self.init_per_thread_state()
# if we have nonce, then just use it, otherwise server will tell us
if self.last_nonce:
r.headers['Proxy-Authorization'] = self.build_digest_header(
r.method, r.url
)
r.register_hook('response', self.handle_407)
return r
```
#### File: requests_toolbelt/downloadutils/tee.py
```python
import io
_DEFAULT_CHUNKSIZE = 65536
__all__ = ['tee', 'tee_to_file', 'tee_to_bytearray']
def _tee(response, callback, chunksize, decode_content):
for chunk in response.raw.stream(amt=chunksize,
decode_content=decode_content):
callback(chunk)
yield chunk
def tee(response, fileobject, chunksize=_DEFAULT_CHUNKSIZE,
decode_content=None):
"""Stream the response both to the generator and a file.
This will stream the response body while writing the bytes to
``fileobject``.
Example usage:
.. code-block:: python
resp = requests.get(url, stream=True)
with open('save_file', 'wb') as save_file:
for chunk in tee(resp, save_file):
# do stuff with chunk
.. code-block:: python
import io
resp = requests.get(url, stream=True)
fileobject = io.BytesIO()
for chunk in tee(resp, fileobject):
# do stuff with chunk
:param response: Response from requests.
:type response: requests.Response
:param fileobject: Writable file-like object.
:type fileobject: file, io.BytesIO
:param int chunksize: (optional), Size of chunk to attempt to stream.
:param bool decode_content: (optional), If True, this will decode the
compressed content of the response.
:raises: TypeError if the fileobject wasn't opened with the right mode
or isn't a BytesIO object.
"""
# We will be streaming the raw bytes from over the wire, so we need to
# ensure that writing to the fileobject will preserve those bytes. On
# Python3, if the user passes an io.StringIO, this will fail, so we need
# to check for BytesIO instead.
if not ('b' in getattr(fileobject, 'mode', '') or
isinstance(fileobject, io.BytesIO)):
raise TypeError('tee() will write bytes directly to this fileobject'
', it must be opened with the "b" flag if it is a file'
' or inherit from io.BytesIO.')
return _tee(response, fileobject.write, chunksize, decode_content)
def tee_to_file(response, filename, chunksize=_DEFAULT_CHUNKSIZE,
decode_content=None):
"""Stream the response both to the generator and a file.
This will open a file named ``filename`` and stream the response body
while writing the bytes to the opened file object.
Example usage:
.. code-block:: python
resp = requests.get(url, stream=True)
for chunk in tee_to_file(resp, 'save_file'):
# do stuff with chunk
:param response: Response from requests.
:type response: requests.Response
:param str filename: Name of file in which we write the response content.
:param int chunksize: (optional), Size of chunk to attempt to stream.
:param bool decode_content: (optional), If True, this will decode the
compressed content of the response.
"""
with open(filename, 'wb') as fd:
for chunk in tee(response, fd, chunksize, decode_content):
yield chunk
def tee_to_bytearray(response, bytearr, chunksize=_DEFAULT_CHUNKSIZE,
decode_content=None):
"""Stream the response both to the generator and a bytearray.
This will stream the response provided to the function, add them to the
provided :class:`bytearray` and yield them to the user.
.. note::
This uses the :meth:`bytearray.extend` by default instead of passing
the bytearray into the ``readinto`` method.
Example usage:
.. code-block:: python
b = bytearray()
resp = requests.get(url, stream=True)
for chunk in tee_to_bytearray(resp, b):
# do stuff with chunk
:param response: Response from requests.
:type response: requests.Response
:param bytearray bytearr: Array to add the streamed bytes to.
:param int chunksize: (optional), Size of chunk to attempt to stream.
:param bool decode_content: (optional), If True, this will decode the
compressed content of the response.
"""
if not isinstance(bytearr, bytearray):
raise TypeError('tee_to_bytearray() expects bytearr to be a '
'bytearray')
return _tee(response, bytearr.extend, chunksize, decode_content)
```
#### File: requests_toolbelt/threaded/__init__.py
```python
from . import pool
from .._compat import queue
def map(requests, **kwargs):
r"""Simple interface to the threaded Pool object.
This function takes a list of dictionaries representing requests to make
using Sessions in threads and returns a tuple where the first item is
a generator of successful responses and the second is a generator of
exceptions.
:param list requests:
Collection of dictionaries representing requests to make with the Pool
object.
:param \*\*kwargs:
Keyword arguments that are passed to the
:class:`~requests_toolbelt.threaded.pool.Pool` object.
:returns: Tuple of responses and exceptions from the pool
:rtype: (:class:`~requests_toolbelt.threaded.pool.ThreadResponse`,
:class:`~requests_toolbelt.threaded.pool.ThreadException`)
"""
if not (requests and all(isinstance(r, dict) for r in requests)):
raise ValueError('map expects a list of dictionaries.')
# Build our queue of requests
job_queue = queue.Queue()
for request in requests:
job_queue.put(request)
# Ensure the user doesn't try to pass their own job_queue
kwargs['job_queue'] = job_queue
threadpool = pool.Pool(**kwargs)
threadpool.join_all()
return threadpool.responses(), threadpool.exceptions()
```
#### File: site-packages/setuptools/launch.py
```python
import tokenize
import sys
def run():
"""
Run the script in sys.argv[1] as if it had
been invoked naturally.
"""
__builtins__
script_name = sys.argv[1]
namespace = dict(
__file__=script_name,
__name__='__main__',
__doc__=None,
)
sys.argv[:] = sys.argv[1:]
open_ = getattr(tokenize, 'open', open)
with open_(script_name) as fid:
script = fid.read()
norm_script = script.replace('\\r\\n', '\\n')
code = compile(norm_script, script_name, 'exec')
exec(code, namespace)
if __name__ == '__main__':
run()
```
#### File: win32ctypes/tests/test_win32cred.py
```python
from __future__ import absolute_import
import os
import sys
import unittest
import win32cred
from win32ctypes.core._winerrors import ERROR_NOT_FOUND
from win32ctypes.pywin32.pywintypes import error
from win32ctypes.pywin32.win32cred import (
CredDelete, CredRead, CredWrite,
CRED_PERSIST_ENTERPRISE, CRED_TYPE_GENERIC)
from win32ctypes.tests import compat
# find the pywin32 version
version_file = os.path.join(
os.path.dirname(os.path.dirname(win32cred.__file__)), 'pywin32.version.txt')
if os.path.exists(version_file):
with open(version_file) as handle:
pywin32_build = handle.read().strip()
else:
pywin32_build = None
class TestCred(compat.TestCase):
@unittest.skipIf(
pywin32_build == "223" and sys.version_info[:2] == (3,7),
"pywin32 version 223 bug with CredRead (mhammond/pywin32#1232)")
def test_write_to_pywin32(self):
username = u"john"
password = u"<PASSWORD>"
comment = u"Created by MiniPyWin32Cred test suite"
target = "{0}@{1}".format(username, password)
credentials = {"Type": CRED_TYPE_GENERIC,
"TargetName": target,
"UserName": username,
"CredentialBlob": password,
"Comment": comment,
"Persist": CRED_PERSIST_ENTERPRISE}
CredWrite(credentials)
res = win32cred.CredRead(
TargetName=target, Type=CRED_TYPE_GENERIC)
self.assertEqual(res["Type"], CRED_TYPE_GENERIC)
self.assertEqual(res["UserName"], username)
self.assertEqual(res["TargetName"], target)
self.assertEqual(res["Comment"], comment)
self.assertEqual(
res["CredentialBlob"].decode('utf-16'), password)
def test_read_from_pywin32(self):
username = "john"
password = "<PASSWORD>"
comment = u"Created by MiniPyWin32Cred test suite"
target = u"{0}@{1}".format(username, password)
r_credentials = {
u"Type": CRED_TYPE_GENERIC,
u"TargetName": target,
u"UserName": username,
u"CredentialBlob": password,
u"Comment": comment,
u"Persist": CRED_PERSIST_ENTERPRISE}
win32cred.CredWrite(r_credentials)
credentials = CredRead(target, CRED_TYPE_GENERIC)
# XXX: the fact that we have to decode the password when reading, but
# not encode when writing is a bit strange, but that's what pywin32
# seems to do as well, and we try to be backward compatible here.
self.assertEqual(credentials["UserName"], username)
self.assertEqual(credentials["TargetName"], target)
self.assertEqual(credentials["Comment"], comment)
self.assertEqual(
credentials["CredentialBlob"].decode("utf-16"), password)
def test_read_write(self):
username = "john"
password = "<PASSWORD>"
comment = u"Created by MiniPyWin32Cred test suite"
target = u"{0}@{1}".format(username, password)
r_credentials = {
u"Type": CRED_TYPE_GENERIC,
u"TargetName": target,
u"UserName": username,
u"CredentialBlob": password,
u"Comment": comment,
u"Persist": CRED_PERSIST_ENTERPRISE}
CredWrite(r_credentials)
credentials = CredRead(target, CRED_TYPE_GENERIC)
# XXX: the fact that we have to decode the password when reading, but
# not encode when writing is a bit strange, but that's what pywin32
# seems to do as well, and we try to be backward compatible here.
self.assertEqual(credentials["UserName"], username)
self.assertEqual(credentials["TargetName"], target)
self.assertEqual(credentials["Comment"], comment)
self.assertEqual(
credentials["CredentialBlob"].decode("utf-16"), password)
def test_read_doesnt_exists(self):
target = "Floupi_dont_exists@MiniPyWin"
with self.assertRaises(error) as ctx:
CredRead(target, CRED_TYPE_GENERIC)
self.assertTrue(ctx.exception.winerror, ERROR_NOT_FOUND)
def test_delete_simple(self):
username = "john"
password = "<PASSWORD>"
comment = "Created by MiniPyWin32Cred test suite"
target = "{0}@{1}".format(username, password)
r_credentials = {
"Type": CRED_TYPE_GENERIC,
"TargetName": target,
"UserName": username,
"CredentialBlob": password,
"Comment": comment,
"Persist": CRED_PERSIST_ENTERPRISE}
CredWrite(r_credentials, 0)
credentials = CredRead(target, CRED_TYPE_GENERIC)
self.assertTrue(credentials is not None)
CredDelete(target, CRED_TYPE_GENERIC)
with self.assertRaises(error) as ctx:
CredRead(target, CRED_TYPE_GENERIC)
self.assertEqual(ctx.exception.winerror, ERROR_NOT_FOUND)
self.assertEqual(ctx.exception.funcname, "CredRead")
def test_delete_doesnt_exists(self):
target = u"Floupi_doesnt_exists@MiniPyWin32"
with self.assertRaises(error) as ctx:
CredDelete(target, CRED_TYPE_GENERIC)
self.assertEqual(ctx.exception.winerror, ERROR_NOT_FOUND)
self.assertEqual(ctx.exception.funcname, "CredDelete")
if __name__ == '__main__':
unittest.main()
```
#### File: WeboscketWrapper_JE/test/client_test.py
```python
from je_websocket import websocket_client
def receive_f(connect_websocket):
print("f")
connect_websocket.close()
commands = {"f": receive_f}
client = websocket_client("ws://localhost:30001", commands)
```
|
{
"source": "JE-Chen/Python_JEAutoControl",
"score": 2
}
|
#### File: linux_with_x11/keyboard/x11_linux_keyboard_control.py
```python
import sys
import time
from je_auto_control.utils.exception.exceptions import AutoControlException
from je_auto_control.utils.exception.exception_tag import linux_import_error
if sys.platform not in ["linux", "linux2"]:
raise AutoControlException(linux_import_error)
from je_auto_control.linux_with_x11.core.utils.x11_linux_display import display
from Xlib.ext.xtest import fake_input
from Xlib import X
def press_key(keycode: int):
"""
:param keycode which keycode we want to press
"""
try:
time.sleep(0.01)
fake_input(display, X.KeyPress, keycode)
display.sync()
except struct.error as error:
print(repr(error), file=sys.stderr)
def release_key(keycode: int):
"""
:param keycode which keycode we want to release
"""
try:
time.sleep(0.01)
fake_input(display, X.KeyRelease, keycode)
display.sync()
except struct.error as error:
print(repr(error), file=sys.stderr)
```
#### File: osx/mouse/osx_mouse.py
```python
import sys
from je_auto_control.utils.exception.exception_tag import osx_import_error
from je_auto_control.utils.exception.exceptions import AutoControlException
if sys.platform not in ["darwin"]:
raise AutoControlException(osx_import_error)
import time
import Quartz
from je_auto_control.osx.core.utils.osx_vk import osx_mouse_left
from je_auto_control.osx.core.utils.osx_vk import osx_mouse_middle
from je_auto_control.osx.core.utils.osx_vk import osx_mouse_right
def position():
"""
get mouse current position
"""
return (Quartz.NSEvent.mouseLocation().x, Quartz.NSEvent.mouseLocation().y)
def mouse_event(event, x: int, y: int, mouse_button: int):
"""
:param event which event we want to use
:param x event x
:param y event y
:param mouse_button which mouse button will use event
"""
curr_event = Quartz.CGEventCreateMouseEvent(None, event, (x, y), mouse_button)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, curr_event)
def set_position(x: int, y: int):
"""
:param x we want to set mouse x position
:param y we want to set mouse y position
"""
mouse_event(Quartz.kCGEventMouseMoved, x, y, 0)
def press_mouse(x: int, y: int, mouse_button: int):
"""
:param x event x
:param y event y
:param mouse_button which mouse button press
"""
if mouse_button is osx_mouse_left:
mouse_event(Quartz.kCGEventLeftMouseDown, x, y, Quartz.kCGMouseButtonLeft)
elif mouse_button is osx_mouse_middle:
mouse_event(Quartz.kCGEventOtherMouseDown, x, y, Quartz.kCGMouseButtonCenter)
elif mouse_button is osx_mouse_right:
mouse_event(Quartz.kCGEventRightMouseDown, x, y, Quartz.kCGMouseButtonRight)
def release_mouse(x: int, y: int, mouse_button: int):
"""
:param x event x
:param y event y
:param mouse_button which mouse button release
"""
if mouse_button is osx_mouse_left:
mouse_event(Quartz.kCGEventLeftMouseUp, x, y, Quartz.kCGMouseButtonLeft)
elif mouse_button is osx_mouse_middle:
mouse_event(Quartz.kCGEventOtherMouseUp, x, y, Quartz.kCGMouseButtonCenter)
elif mouse_button is osx_mouse_right:
mouse_event(Quartz.kCGEventRightMouseUp, x, y, Quartz.kCGMouseButtonRight)
def click_mouse(x: int, y: int, mouse_button: int):
"""
:param x event x
:param y event y
:param mouse_button which mouse button click
"""
if mouse_button is osx_mouse_left:
press_mouse(x, y, mouse_button)
time.sleep(.001)
release_mouse(x, y, mouse_button)
elif mouse_button is osx_mouse_middle:
press_mouse(x, y, mouse_button)
time.sleep(.001)
release_mouse(x, y, mouse_button)
elif mouse_button is osx_mouse_right:
press_mouse(x, y, mouse_button)
time.sleep(.001)
release_mouse(x, y, mouse_button)
def scroll(scroll_value: int):
"""
:param scroll_value scroll count
"""
scroll_value = int(scroll_value)
for do_scroll in range(abs(scroll_value)):
scroll_event = Quartz.CGEventCreateScrollWheelEvent(
None,
0,
1,
1 if scroll_value >= 0 else -1
)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, scroll_event)
```
#### File: utils/critical_exit/critcal_exit.py
```python
import _thread
import sys
from threading import Thread
from je_auto_control.utils.exception.exception_tag import je_auto_control_critical_exit_error
from je_auto_control.utils.exception.exceptions import AutoControlException
from je_auto_control.wrapper.auto_control_keyboard import keys_table
from je_auto_control.wrapper.platform_wrapper import keyboard_check
class CriticalExit(Thread):
def __init__(self, default_daemon=True):
super().__init__()
self.setDaemon(default_daemon)
self._exit_check_key = keys_table.get("f7")
def set_critical_key(self, keycode: [int, str] = None):
"""
:param keycode which keycode we want to check is press ?
"""
if type(keycode) is int:
self._exit_check_key = keycode
else:
self._exit_check_key = keys_table.get(keycode)
def run(self):
"""
listener keycode _exit_check_key
"""
try:
while True:
if keyboard_check.check_key_is_press(self._exit_check_key):
_thread.interrupt_main()
except AutoControlException:
_thread.interrupt_main()
raise AutoControlException(je_auto_control_critical_exit_error)
def init_critical_exit(self):
"""
should only use this to start critical exit
may this function will add more
"""
critical_thread = self
critical_thread.start()
```
#### File: windows/listener/win32_keyboard_listener.py
```python
import sys
from je_auto_control.utils.exception.exception_tag import windows_import_error
from je_auto_control.utils.exception.exceptions import AutoControlException
if sys.platform not in ["win32", "cygwin", "msys"]:
raise AutoControlException(windows_import_error)
from ctypes import *
from ctypes.wintypes import MSG
from threading import Thread
from queue import Queue
user32 = windll.user32
kernel32 = windll.kernel32
wm_keydown = 0x100
class Win32KeyboardListener(Thread):
def __init__(self):
super().__init__()
self.setDaemon(True)
self.hooked = None
self.record_queue = None
self.record_flag = False
self.hook_event_code_int = 13
def _set_win32_hook(self, point):
self.hooked = user32.SetWindowsHookExA(
self.hook_event_code_int,
point,
0,
0
)
if not self.hooked:
return False
return True
def _remove_win32_hook_proc(self):
if self.hooked is None:
return
user32.UnhookWindowsHookEx(self.hooked)
self.hooked = None
def _win32_hook_proc(self, code, w_param, l_param):
if w_param is not wm_keydown:
return user32.CallNextHookEx(self.hooked, code, w_param, l_param)
if self.record_flag is True:
# int to hex
temp = hex(l_param[0] & 0xFFFFFFFF)
self.record_queue.put(("type_key", int(temp, 16)))
return user32.CallNextHookEx(self.hooked, code, w_param, l_param)
def _get_function_pointer(self, function):
win_function = WINFUNCTYPE(c_int, c_int, c_int, POINTER(c_void_p))
return win_function(function)
def _start_listener(self):
pointer = self._get_function_pointer(self._win32_hook_proc)
self._set_win32_hook(pointer)
message = MSG()
user32.GetMessageA(byref(message), 0, 0, 0)
def record(self, want_to_record_queue):
self.record_flag = True
self.record_queue = want_to_record_queue
self.start()
def stop_record(self):
self.record_flag = False
self._remove_win32_hook_proc()
return self.record_queue
def run(self):
self._start_listener()
if __name__ == "__main__":
win32_keyboard_listener = Win32KeyboardListener()
record_queue = Queue()
win32_keyboard_listener.record(record_queue)
from time import sleep
sleep(3)
temp = win32_keyboard_listener.stop_record()
for i in temp.queue:
print(i)
```
#### File: je_auto_control/wrapper/auto_control_record.py
```python
import sys
from je_auto_control.utils.action_executor.action_executor import execute_action
from je_auto_control.utils.exception.exception_tag import macos_record_error
from je_auto_control.utils.exception.exceptions import AutoControlException
from je_auto_control.utils.exception.exceptions import AutoControlJsonActionException
from je_auto_control.wrapper.platform_wrapper import recorder
def record():
if sys.platform == "darwin":
raise AutoControlException(macos_record_error)
return recorder.record()
def stop_record():
if sys.platform == "darwin":
raise AutoControlException(macos_record_error)
action_queue = recorder.stop_record()
if action_queue is None:
raise AutoControlJsonActionException
action_list = list(action_queue.queue)
new_list = list()
for action in action_list:
if action[0] == "type_key":
new_list.append([action[0], dict([["keycode", action[1]]])])
else:
new_list.append([action[0], dict(zip(["mouse_keycode", "x", "y"], [action[0], action[1], action[2]]))])
return new_list
if __name__ == "__main__":
record()
from time import sleep
sleep(5)
record_result = stop_record()
print(record_result)
execute_action(record_result)
sleep(2)
```
|
{
"source": "JE-Chen/Python-NLP-JE",
"score": 2
}
|
#### File: Python-NLP-JE/Models/NLP_Main.py
```python
import os
import jieba
import logging
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from ckiptagger import WS, POS, NER
from hanziconv import HanziConv
class NLP_Main():
def __init__(self):
self.ws = WS("./data", disable_cuda=False)
self.pos = POS("./data", disable_cuda=False)
self.ner = NER("./data", disable_cuda=False)
#斷詞(WS)、詞性標記(POS)、命名實體識別(NER)。
# ---------------------------------------------------------------------------------
#斷詞(WS)
def NLP_WS(self,text):
return self.ws([text])
# 斷詞(WS)並儲存
def Ws_Save(self):
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
output = open('wiki_ws.txt', 'w', encoding='utf-8')
with open('wiki_seg.txt', 'r', encoding='utf-8') as content:
for texts_num, line in enumerate(content):
line = line.strip('\n')
wordss = self.ws([line])
for words in wordss:
for word in words:
if(word!=' '):
output.write(word+'\t')
print(word)
else:
output.write('\n')
if (texts_num + 1) % 10000 == 0:
logging.info("已完成前 %d 行的斷詞" % (texts_num + 1))
output.close()
# ---------------------------------------------------------------------------------
#詞性標記(POS)
def NLP_POS(self,text):
return self.pos(self.NLP_WS(text))
#命名實體識別(NER)
def NLP_NER(self,text):
return self.ner(self.NLP_WS(text), self.NLP_POS(text))
# ---------------------------------------------------------------------------------
'''TF-IDF 提取法
是一種常用於資訊檢索的加權技術,一種統計方法。用於評估在一個文件集中一個詞對某份文件的重要性,一個詞對文件越重要越有可能成為關鍵詞。
TF-IDF演算法由兩部分組成:TF演算法以及IDF演算法。
TF演算法是統計一個詞在一篇文件中出現的頻率(詞頻)。
IDF演算法則是統計一個詞在文件集的多少個文件中出現,即是如果一個詞在越少的文件中出現,則其對文件的區分能力也越強。
'''
def Extract_Tag_TF_IDF(self,text):
Array =[]
for x,w in jieba.analyse.extract_tags(text,withWeight=True):
Array.append((str(x)+': '+str(w)))
return Array
'''TextRank 演算法
TextRank 的前身為Google所開發的PageRank
PageRank的主要功用是用於衡量網站之間的重要性,透過網頁之間的連結以及各個網頁的投票計算出其重要性。
TextRank則是透過文章中去尋找其中重要的詞或句子。
'''
def Extract_Tag_TextRank(self, text):
Array = []
for x,w in jieba.analyse.textrank(text,withWeight=True):
Array.append((str(x)+': '+str(w)))
return Array
'''權重值
權重是一個相對的概念,是針對某一指標而言。
某一指標的權重是指該指標在整體評價中的相對重要程度。
打個比方說, 一件事情你給它打100分,你的老闆給它打60分, 如果平均則是(100+60)/2=80分。
但因為老闆說的話分量比你重, 假如老闆的權重是2, 你是1, 這時求平均值就是加權平均了, 結果是(100*1 +60*2)/(1+2)=73.3分
'''
#---------------------------------------------------------------------------------
#轉換簡體至繁體並存檔
def Transform_ZhTw_Save(self,File_Name,Next_FileName):
FileRead=[]
with open(File_Name,'rb') as RawFile:
for line in RawFile:
FileRead.append(HanziConv.toTraditional(line))
with open(Next_FileName,'wb') as Next_File:
for i in range(len(FileRead)):
for j in range(len(FileRead[i])):
Next_File.write(FileRead[i][j].encode('utf-8'))
#轉換簡體至繁體
def Transform_ZhTw(self,Text):
return HanziConv.toTraditional(Text)
#轉換繁體至簡體
def Transform_Ch(self,Text):
return HanziConv.toSimplified(Text)
```
|
{
"source": "JE-Chen/Python-OPENCV-JE",
"score": 4
}
|
#### File: je_open_cv/modules/face_detection.py
```python
import cv2
'''
OpenCV comes with a trainer as well as detector.
If you want to train your own classifier for any object like car, planes etc.
you can use OpenCV to create one.
Its full details are given here: Cascade Classifier Training.
OpenCV already contains many pre-trained classifiers for face, eyes, smile etc.
Those XML files are stored in opencv/data/haarcascades/ folder.
Let’s create face and eye detector with OpenCV.
'''
def detection(image):
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
image = cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
roi_color = image[y:y + h, x:x + w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
return image
```
#### File: je_open_cv/modules/feature.py
```python
import cv2
import numpy as np
'''
特徵點檢測
'''
'''
As usual, we have to create an ORB object with the function, cv2.ORB() or using feature2d common interface.
It has a number of optional parameters.
Most useful ones are nFeatures which denotes maximum number of features to be retained (by default 500),
scoreType which denotes whether Harris score or FAST score to rank the features (by default, Harris score) etc.
Another parameter, WTA_K decides number of points that produce each element of the oriented BRIEF descriptor.
By default it is two, ie selects two points at a time.
In that case, for matching, NORM_HAMMING distance is used.
If WTA_K is 3 or 4, which takes 3 or 4 points to produce BRIEF descriptor, then matching distance is defined by NORM_HAMMING2.
詳情:
https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_feature2d/py_orb/py_orb.html#orb
'''
def orb_feature(image):
image = cv2.imread(image, 0)
ORB = cv2.ORB_create()
kp = ORB.detect(image, None)
kp, des = ORB.compute(image, kp)
image = cv2.drawKeypoints(image, kp, image, color=(0, 255, 0), flags=0)
return image
def harris(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
dst = cv2.cornerHarris(gray, 2, 3, 0.04)
# result is dilated for marking the corners, not important
dst = cv2.dilate(dst, None)
# Threshold for an optimal value, it may vary depending on the image.
image[dst > 0.01 * dst.max()] = [0, 0, 255]
return image
```
#### File: je_open_cv/modules/fps.py
```python
import cv2
'''
計算及更改執行效率
'''
def get_fps(function):
time1 = cv2.getTickCount()
function()
time2 = cv2.getTickCount()
return (time2 - time1) / cv2.getTickFrequency()
'''
Many of the OpenCV functions are optimized using SSE2, AVX etc.
It contains unoptimized code also.
So if our system support these features, we should exploit them (almost all modern day processors support them).
It is enabled by default while compiling. So OpenCV runs the optimized code if it is enabled, else it runs the unoptimized code.
You can use cv2.useOptimized() to check if it is enabled/disabled and cv2.setUseOptimized() to enable/disable it.
Let’s see a simple example.
'''
def set_optimized():
cv2.setUseOptimized(not cv2.useOptimized())
```
#### File: je_open_cv/modules/image_operations.py
```python
import cv2
'''
基本圖像處理用
'''
# 取得圖像 行 列 通道數
def get_image_properties(image):
total = [image.shape, image.size, image.dtype]
return total
def get_image_shape(image):
return image.shape
# 取得 圖片大小
def get_image_size(image):
return image.size
# 取得圖片類型
def get_image_type(image):
return image.dtype
# 分割通道
def split_image(image):
B, G, R = cv2.split(image)
return [B, G, R]
'''
The B,G,R channels of an image can be split into their individual planes when needed. Then,
the individual channels can be merged back together to form a BGR image again. This can be performed by:
b = img[:,:,0]
Suppose, you want to make all the red pixels to zero, you need not split like this and put it equal to zero.
You can simply use Numpy indexing which is faster.
img[:,:,2] = 0
'''
# 組合通道
def merge_image(B, G, R):
return cv2.merge((B, G, R))
# 合併2張圖片 採用透明度
def image_Blending(image1, image1_Alpha, image2, image2_Alpha):
return cv2.addWeighted(image1, image1_Alpha, image2, image2_Alpha, 0)
```
#### File: je_open_cv/modules/io.py
```python
import cv2
'''
基本的輸出入圖片
'''
def read_image(image, flag=1):
return cv2.imread(image, flag)
def show_image(image, window_name='image'):
cv2.imshow(window_name, image)
cv2.waitKey()
cv2.destroyAllWindows()
def output_image(image, file_name='image'):
cv2.imwrite(file_name, image)
```
#### File: je_open_cv/modules/smoothing.py
```python
import cv2
import numpy as np
'''
平滑圖像
對圖像進行濾波 會變模糊
'''
'''
As for one-dimensional signals, images also can be filtered with various low-pass filters (LPF), high-pass filters (HPF), etc.
A LPF helps in removing noise, or blurring the image. A HPF filters helps in finding edges in an image.
OpenCV provides a function, cv2.filter2D(), to convolve a kernel with an image.
As an example, we will try an averaging filter on an image.
kernel = 5x5
Filtering with the above kernel results in the following being performed:
for each pixel, a 5x5 window is centered on this pixel, all pixels falling within this window are summed up,
and the result is then divided by 25.
This equates to computing the average of the pixel values inside that window.
This operation is performed for all the pixels in the image to produce the output filtered image.
2D Convolution ( image Filtering )
'''
def convolution2_d(image, kernel=(5, 5)):
return cv2.filter2D(image, -1, np.ones(kernel, np.float32) / 25)
'''
image blurring is achieved by convolving the image with a low-pass filter kernel.
It is useful for removing noise.
It actually removes high frequency content (e.g: noise, edges)
from the image resulting in edges being blurred when this is filter is applied.
(Well, there are blurring techniques which do not blur edges). OpenCV provides mainly four types of blurring techniques.
1. Averaging
This is done by convolving the image with a normalized box filter.
It simply takes the average of all the pixels under kernel area and replaces the central element with this average.
This is done by the function cv2.blur() or cv2.boxFilter().
Check the docs for more details about the kernel.
We should specify the width and height of kernel.
'''
def averaging(image, kernel=(5, 5)):
return cv2.blur(image, kernel)
'''
In this approach, instead of a box filter consisting of equal filter coefficients, a Gaussian kernel is used.
It is done with the function, cv2.GaussianBlur().
We should specify the width and height of the kernel which should be positive and odd.
We also should specify the standard deviation in the X and Y directions, sigmaX and sigmaY respectively.
If only sigmaX is specified, sigmaY is taken as equal to sigmaX.
If both are given as zeros, they are calculated from the kernel size. Gaussian filtering is highly effective in removing Gaussian noise from the image.
'''
def gaussian_blur(image, kernel=(5, 5)):
return cv2.GaussianBlur(image, kernel, 0)
'''
Here, the function cv2.medianBlur() computes the median of all the pixels
under the kernel window and the central pixel is replaced with this median value.
This is highly effective in removing salt-and-pepper noise.
One interesting thing to note is that, in the Gaussian and box filters,
the filtered value for the central element can be a value which may not exist in the original image.
However this is not the case in median filtering, since the central element is always replaced by some pixel value in the image.
This reduces the noise effectively. The kernel size must be a positive odd integer.
'''
def median_blur(image, kernel=5):
return cv2.medianBlur(image, kernel)
'''
This is not the case for the bilateral filter, cv2.bilateralFilter(),
which was defined for, and is highly effective at noise removal while preserving edges.
But the operation is slower compared to other filters.
We already saw that a Gaussian filter takes the a neighborhood around the pixel and finds its Gaussian weighted average.
This Gaussian filter is a function of space alone, that is, nearby pixels are considered while filtering.
It does not consider whether pixels have almost the same intensity value and does not consider
whether the pixel lies on an edge or not.
The resulting effect is that Gaussian filters tend to blur edges, which is undesirable.
The bilateral filter also uses a Gaussian filter in the space domain,
but it also uses one more (multiplicative) Gaussian filter component which is a function of pixel intensity differences.
The Gaussian function of space makes sure that only pixels are ‘spatial neighbors’ are considered for filtering,
while the Gaussian component applied in the intensity domain (a Gaussian function of intensity differences) ensures
that only those pixels with intensities similar to that of the central pixel (‘intensity neighbors’) are included
to compute the blurred intensity value. As a result, this method preserves edges, since for pixels lying near edges,
neighboring pixels placed on the other side of the edge,
and therefore exhibiting large intensity variations when compared to the central pixel, will not be included for blurring.
'''
def bilateral(image):
return cv2.bilateralFilter(image, 9, 75, 75)
```
#### File: je_open_cv/modules/template_detection.py
```python
import cv2
import numpy as np
'''
對圖片的物件進行尋找
'''
'''
原理:
輸入兩張影像,分別為 image、template
不斷滑動 template,得到 image 上各個位置的比較值,比較值代表相似程度
然後將 image 左上角位置,作為 result 比較值的存放位置
完成後可得到 result
可用 minMaxLoc() 函式,找出結果圖的最大或最小值,定位出搜尋位置
限制 :
物體有旋轉時,會找不到
物體大小改變時,會找不到
參數
image-被尋找的圖片-必須為 8-bit or 32-bit
template-尋找的物品圖片
size 不能大於 image,且格式需一致
method-比對的方法
result-比較的結果,格式為 numpy.ndarray (dtype=float32)-可傳入想儲存結果的 array
CV_TM_SQDIFF : 平方差,越小越相似
CV_TM_SQDIFF_NORMED : 正規化平方差,越小越相似 保證當 pixel 亮度都乘上同一係數時,相似度不變
CV_TM_CCORR : 相關係數,越大越相似
CV_TM_CCORR_NORMED : 正規化相關係數,越大越相似 保證當 pixel 亮度都乘上同一係數時,相似度不變
CV_TM_CCOEFF : 去掉直流成份的相關係數,越大越相似
CV_TM_CCOEFF_NORMED : 正規化 去掉直流成份的相關係數 保證當 pixel 亮度都乘上同一係數時,相似度不變
計算出的相關係數被限制在了 -1 到 1 之間
1 表示完全相同
-1 表示亮度正好相反
0 表示没有線性相關
詳情 :
https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_template_matching/py_template_matching.html?highlight=matchtemplate
'''
def ignore_same_image(image, points, threshold, width, height, draw_image=False):
flag = False
image_points_list = []
for left, top in points:
for image_x_y in image_points_list:
if ((left - image_x_y[0]) ** 2 + (top - image_x_y[1]) ** 2) < threshold ** 2:
break
else:
right = left + width
bottom = top + height
image_data_tuple = left, top, right, bottom
if draw_image:
draw_detect(image, (left, top), right, bottom)
image_points_list.append(image_data_tuple)
flag = True
return flag, image_points_list
def draw_detect(image, points, right, bottom):
cv2.rectangle(image, points, (right, bottom), (0, 0, 255), 2)
def detect(image, template, detect_threshold=1, draw_image=False):
image_points_tuple = ()
w, h = template.shape[::-1]
flag = False
res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
threshold = detect_threshold
loc = np.where(res >= threshold)
for points in zip(*loc[::-1]):
right = points[0] + w
bottom = points[1] + h
image_points_tuple = points[0], points[1], right, bottom
if draw_image:
draw_detect(image, points, right, bottom)
flag = True
break
if draw_image:
return flag, image_points_tuple, image
else:
return flag, image_points_tuple
def detect_multi(image, template, detect_threshold=1, draw_image=False):
width, height = template.shape[::-1]
res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
threshold = detect_threshold
loc = np.where(res >= threshold)
points = zip(*loc[::-1])
if draw_image:
return image, ignore_same_image(image, points, min(template.shape[0], template.shape[1]), width, height,
draw_image)
else:
return ignore_same_image(image, points, min(template.shape[0], template.shape[1]), width, height)
# 尋找圖中的物件
def find_object(image, template, detect_threshold=1, draw_image=False):
if type(image) is str:
image = cv2.imread(image, 0)
else:
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY)
if type(template) is str:
template = cv2.imread(template, 0)
else:
template = cv2.cvtColor(np.array(template), cv2.COLOR_RGB2GRAY)
return detect(image=image, template=template, detect_threshold=detect_threshold, draw_image=draw_image)
'''
尋找圖中的多個重複物件
which occurs only once in the image.
Suppose you are searching for an object which has multiple occurances, cv2.
minMaxLoc() won’t give you all the locations. In that case, we will use thresholding.
'''
def find_multi_object(image, template, detect_threshold=1, draw_image=False):
if type(image) is str:
image = cv2.imread(image, 0)
else:
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY)
if type(template) is str:
template = cv2.imread(template, 0)
else:
template = cv2.cvtColor(np.array(template), cv2.COLOR_RGB2GRAY)
return detect_multi(image=image, template=template, detect_threshold=detect_threshold, draw_image=draw_image)
```
#### File: je_open_cv/modules/ui.py
```python
import cv2
'''
用OpenCV做的模塊
'''
'''
switch_function 要有一個參數 會發送目前位置給Switch_Function的參數
'''
def add_trackbar(canvas_name, switch_function, track_name='name', start=0, end=255):
cv2.createTrackbar(track_name, canvas_name, start, end, switch_function)
def get_trackbar_pos(track_name, canvas_name):
return cv2.getTrackbarPos(track_name, canvas_name)
```
#### File: Python-OPENCV-JE/test/template_detection_pil_test.py
```python
import cv2
from PIL import ImageGrab
from je_open_cv import template_detection
def find_image(image, draw_image=False):
grab_image = ImageGrab.grab()
return template_detection.find_object(grab_image, image, detect_threshold=0.9, draw_image=draw_image)
image_data_array = find_image("../test1.png", draw_image=True)
print(image_data_array)
if image_data_array[0] is True:
height = image_data_array[1][2] - image_data_array[1][0]
width = image_data_array[1][3] - image_data_array[1][1]
center = [int(height / 2), int(width / 2)]
print(center)
cv2.imshow("test", image_data_array[2])
cv2.waitKey(0)
cv2.destroyAllWindows()
```
|
{
"source": "JE-Chen/Python-WebCrawler-JE",
"score": 3
}
|
#### File: Python-WebCrawler-JE/Models/Google_Image_Crawler.py
```python
import time
from time import sleep
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.action_chains import ActionChains
# Google 圖片爬蟲
class Google_Image_Crawler:
def __init__(self, Keyword='Discord'):
self.options = Options()
# 關閉瀏覽器跳出訊息
prefs = {
'profile.default_content_setting_values':
{
'notifications': 2
}
}
self.options.add_experimental_option('prefs', prefs)
# options.add_argument("--headless") # 不開啟實體瀏覽器背景執行
self.options.add_argument("--incognito") # 開啟無痕模式
# define url using search term
self.Keyword = Keyword
# get img and url
self.Img_Url = []
def Start_Browser(self):
# create webdriver
browser = webdriver.Chrome(options=self.options)
self.browser = browser
# ----------------------------------------------------------------------------------------------
# 開始搜尋Google
def Scrape_Pic_NoSave(self):
try:
self.Click_Button()
self.Scroll_Down()
self.Click_Button()
self.Scroll_Down()
Return_Url = self.Find_Image_NoSave()
except:
self.Click_Button()
self.Scroll_Down()
self.Click_Button()
self.Scroll_Down()
Return_Url = self.Find_Image_NoSave()
try:
self.browser.quit()
# close driver
self.browser.close()
except:
pass
return Return_Url
# ----------------------------------------------------------------------------------------------
# 開始搜尋Google
def Scrape_Pic_Save(self):
try:
self.Click_Button()
self.Scroll_Down()
self.Click_Button()
self.Scroll_Down()
Return_Url = self.Find_Image_Save()
except:
self.Click_Button()
self.Scroll_Down()
self.Click_Button()
self.Scroll_Down()
Return_Url = self.Find_Image_Save()
try:
self.browser.quit()
# close driver
self.browser.close()
except:
pass
return Return_Url
# ----------------------------------------------------------------------------------------------
# 往下滑
def Scroll_Down(self, roll=7):
for i in range(roll):
self.browser.execute_script("window.scrollTo(0, document.body.scrollHeight);")
sleep(3)
# ----------------------------------------------------------------------------------------------
# 點擊更多圖片按鈕
def Click_Button(self):
time.sleep(1)
button = self.browser.find_element_by_class_name(u"mye4qd")
self.browser.implicitly_wait(1)
if (button.is_displayed()):
ActionChains(self.browser).move_to_element(button).click(button).perform()
else:
pass
# ----------------------------------------------------------------------------------------------
# 取得BSoup
def Get_Soup(self):
html_source = self.browser.page_source
soup = BeautifulSoup(html_source, 'html.parser')
return soup
# ----------------------------------------------------------------------------------------------
# 取得圖片並存檔連結與圖片
def Find_Image_Save(self):
count = 0
for img in (self.Get_Soup().find_all('img')):
try:
self.Img_Url.append(img['src'])
except:
pass
# 取得所有img標籤的元素
for item in (self.Get_Soup().find_all('img')):
if (str(item.get('src')).startswith('http')):
# 取得圖片 並判斷是不是http開頭
if (count == len(self.Img_Url) - 1):
print('此無更多圖片可抓')
break
else:
# 印出取得的圖片網址
print(item.get('src'))
html = requests.get(item.get('src'))
img_name = str(count + 1) + '.png'
with open(img_name, 'wb') as file:
file.write(html.content)
file.flush()
file.close() # close file
if (item.get('src') != None and html.content != None):
print('第 %d 張' % (count + 1))
count += 1
# return list of all img urls found in page
output = open('Image_Url.txt', 'w', encoding='utf-8')
for x in range(len(self.Img_Url)):
output.write(str(self.Img_Url[x]) + '\n')
output.close()
print('結束抓圖 已被偵測')
self.browser.close()
return self.Img_Url
# ----------------------------------------------------------------------------------------------
# 取得圖片連結 不存檔
def Find_Image_NoSave(self):
count = 0
for img in (self.Get_Soup().find_all('img')):
try:
if (str(img.get('src')).startswith('http')):
self.Img_Url.append(img['src'])
except:
pass
# 取得所有img標籤的元素
for item in (self.Get_Soup().find_all('img')):
if (str(item.get('src')).startswith('http')):
# 取得圖片 並判斷是不是http開頭
if (count == len(self.Img_Url) - 1):
print('此無更多圖片可抓')
break
else:
# 印出取得的圖片網址
print(item.get('src'))
html = requests.get(item.get('src'))
img_name = str(count + 1) + '.png'
if (item.get('src') != None and html.content != None):
print('第 %d 張' % (count + 1))
count += 1
# return list of all img urls found in page
print('結束抓圖 已被偵測')
self.browser.close()
return self.Img_Url
# ----------------------------------------------------------------------------------------------
# 設置Keyword
def Set_Keyword(self, Keyword):
self.Keyword = Keyword
# ----------------------------------------------------------------------------------------------
# 呼叫上面搜尋
def Start_Crawler(self, Mode=1):
self.Start_Browser()
searchUrl = "https://www.google.com/search?q={}&site=webhp&tbm=isch".format(self.Keyword)
# get url
self.browser.get(searchUrl)
if (Mode == 1):
Return_Url = self.Scrape_Pic_NoSave()
else:
Return_Url = self.Scrape_Pic_Save()
return Return_Url
```
#### File: Python-WebCrawler-JE/Models/PTT_Crawler.py
```python
import requests
from bs4 import BeautifulSoup
class PTT_Crawler:
'''
'https://www.ptt.cc//bbs/', # 總版
'https://www.ptt.cc//about.html', # ptt 關於資訊
'https://www.ptt.cc//contact.html', # ptt 聯絡資訊
'https://www.ptt.cc//bbs/hotboards.html', # 連至總版 網址不同
'https://www.ptt.cc//cls/1', # 群組
'''
Ptt_Link = [
'https://www.ptt.cc//bbs/Gossiping/index.html', # 無法 Fix
'https://www.ptt.cc//bbs/C_Chat/index.html', # C_Chat版
'https://www.ptt.cc//bbs/Stock/index.html', # Stock版
'https://www.ptt.cc//bbs/Lifeismoney/index.html', # Lifeismoney版
'https://www.ptt.cc//bbs/NSwitch/index.html', # NSwitch版
'https://www.ptt.cc//bbs/LoL/index.html',
'https://www.ptt.cc//bbs/sex/index.html', # 無法 Fix
'https://www.ptt.cc//bbs/NBA/index.html',
'https://www.ptt.cc//bbs/Baseball/index.html',
'https://www.ptt.cc//bbs/movie/index.html',
'https://www.ptt.cc//bbs/car/index.html',
'https://www.ptt.cc//bbs/KoreaDrama/index.html',
'https://www.ptt.cc//bbs/Beauty/index.html', # 無法
'https://www.ptt.cc//bbs/MobileComm/index.html',
'https://www.ptt.cc//bbs/WomenTalk/index.html',
'https://www.ptt.cc//bbs/BabyMother/index.html',
'https://www.ptt.cc//bbs/e-shopping/index.html',
'https://www.ptt.cc//bbs/Boy-Girl/index.html',
'https://www.ptt.cc//bbs/AllTogether/index.html',
'https://www.ptt.cc//bbs/Tech_Job/index.html',
'https://www.ptt.cc//bbs/PlayStation/index.html',
'https://www.ptt.cc//bbs/TW_Entertain/index.html',
'https://www.ptt.cc//bbs/joke/index.html',
'https://www.ptt.cc//bbs/home-sale/index.html',
'https://www.ptt.cc//bbs/ToS/index.html',
'https://www.ptt.cc//bbs/Steam/index.html',
'https://www.ptt.cc//bbs/PC_Shopping/index.html',
'https://www.ptt.cc//bbs/AnimalForest/index.html',
'https://www.ptt.cc//bbs/KR_Entertain/index.html',
'https://www.ptt.cc//bbs/iOS/index.html',
'https://www.ptt.cc//bbs/japanavgirls/index.html', # 無法
'https://www.ptt.cc//bbs/TaiwanDrama/index.html',
'https://www.ptt.cc//bbs/marriage/index.html',
'https://www.ptt.cc//bbs/Tainan/index.html',
'https://www.ptt.cc//bbs/HatePolitics/index.html', # 無法
'https://www.ptt.cc//bbs/KoreaStar/index.html',
'https://www.ptt.cc//bbs/marvel/index.html',
'https://www.ptt.cc//bbs/BeautySalon/index.html',
'https://www.ptt.cc//bbs/creditcard/index.html',
'https://www.ptt.cc//bbs/Gamesale/index.html',
'https://www.ptt.cc//bbs/MakeUp/index.html',
'https://www.ptt.cc//bbs/Kaohsiung/index.html',
'https://www.ptt.cc//bbs/CFantasy/index.html',
'https://www.ptt.cc//bbs/TaichungBun/index.html',
'https://www.ptt.cc//bbs/EAseries/index.html',
'https://www.ptt.cc//bbs/HardwareSale/index.html',
'https://www.ptt.cc//bbs/ONE_PIECE/index.html',
'https://www.ptt.cc//bbs/Japandrama/index.html',
'https://www.ptt.cc//bbs/basketballTW/index.html',
'https://www.ptt.cc//bbs/StupidClown/index.html',
'https://www.ptt.cc//bbs/Hearthstone/index.html',
'https://www.ptt.cc//bbs/PokemonGO/index.html',
'https://www.ptt.cc//bbs/China-Drama/index.html',
'https://www.ptt.cc//bbs/Salary/index.html',
'https://www.ptt.cc//bbs/YuanChuang/index.html',
'https://www.ptt.cc//bbs/CVS/index.html',
'https://www.ptt.cc//bbs/SportLottery/index.html', # 無法
'https://www.ptt.cc//bbs/AC_In/index.html', # 無法
'https://www.ptt.cc//bbs/Hsinchu/index.html',
'https://www.ptt.cc//bbs/AzurLane/index.html',
'https://www.ptt.cc//bbs/Japan_Travel/index.html',
'https://www.ptt.cc//bbs/mobilesales/index.html',
'https://www.ptt.cc//bbs/biker/index.html',
'https://www.ptt.cc//bbs/Brand/index.html', # 無法
'https://www.ptt.cc//bbs/MuscleBeach/index.html',
'https://www.ptt.cc//bbs/Food/index.html',
'https://www.ptt.cc//bbs/Headphone/index.html',
'https://www.ptt.cc//bbs/BuyTogether/index.html', # 無法
'https://www.ptt.cc//bbs/GetMarry/index.html',
'https://www.ptt.cc//bbs/CarShop/index.html',
'https://www.ptt.cc//bbs/Palmar_Drama/index.html',
'https://www.ptt.cc//bbs/cookclub/index.html',
'https://www.ptt.cc//bbs/forsale/index.html',
'https://www.ptt.cc//bbs/PCReDive/index.html',
'https://www.ptt.cc//bbs/PathofExile/index.html',
'https://www.ptt.cc//bbs/TWICE/index.html',
'https://www.ptt.cc//bbs/Wanted/index.html',
'https://www.ptt.cc//bbs/MH/index.html',
'https://www.ptt.cc//bbs/feminine_sex/index.html', # 無法
'https://www.ptt.cc//bbs/MacShop/index.html',
'https://www.ptt.cc//bbs/FATE_GO/index.html',
'https://www.ptt.cc//bbs/PuzzleDragon/index.html',
'https://www.ptt.cc//bbs/WOW/index.html',
'https://www.ptt.cc//bbs/BabyProducts/index.html',
'https://www.ptt.cc//bbs/KoreanPop/index.html',
'https://www.ptt.cc//bbs/E-appliance/index.html',
'https://www.ptt.cc//bbs/Elephants/index.html',
'https://www.ptt.cc//bbs/watch/index.html',
'https://www.ptt.cc//bbs/Bank_Service/index.html',
'https://www.ptt.cc//bbs/GBF/index.html',
'https://www.ptt.cc//bbs/lesbian/index.html',
'https://www.ptt.cc//bbs/MobilePay/index.html',
'https://www.ptt.cc//bbs/Soft_Job/index.html',
'https://www.ptt.cc//bbs/JP_Entertain/index.html',
'https://www.ptt.cc//bbs/gay/index.html',
'https://www.ptt.cc//bbs/Finance/index.html',
'https://www.ptt.cc//bbs/cat/index.html',
'https://www.ptt.cc//bbs/Examination/index.html',
'https://www.ptt.cc//bbs/SuperJunior/index.html',
'https://www.ptt.cc//bbs/Aviation/index.html',
'https://www.ptt.cc//bbs/ArenaOfValor/index.html',
'https://www.ptt.cc//bbs/DSLR/index.html',
'https://www.ptt.cc//bbs/Gov_owned/index.html',
'https://www.ptt.cc//bbs/HelpBuy/index.html',
'https://www.ptt.cc//bbs/graduate/index.html',
'https://www.ptt.cc//bbs/medstudent/index.html',
'https://www.ptt.cc//bbs/NBA_Film/index.html',
'https://www.ptt.cc//bbs/hypermall/index.html',
'https://www.ptt.cc//bbs/part-time/index.html',
'https://www.ptt.cc//bbs/give/index.html',
'https://www.ptt.cc//bbs/BB-Love/index.html', # 無法
'https://www.ptt.cc//bbs/DC_SALE/index.html',
'https://www.ptt.cc//bbs/book/index.html',
'https://www.ptt.cc//bbs/nb-shopping/index.html',
'https://www.ptt.cc//bbs/SuperBike/index.html', # 無法
'https://www.ptt.cc//bbs/PublicServan/index.html',
'https://www.ptt.cc//bbs/Key_Mou_Pad/index.html',
'https://www.ptt.cc//bbs/CN_Entertain/index.html',
'https://www.ptt.cc//bbs/Taoyuan/index.html',
'https://www.ptt.cc//bbs/Nogizaka46/index.html',
'https://www.ptt.cc//bbs/KanColle/index.html',
'https://www.ptt.cc//bbs/Mobile-game/index.html',
'https://www.ptt.cc//bbs/FITNESS/index.html',
'https://www.ptt.cc//bbs/Foreign_Inv/index.html',
'https://www.ptt.cc//bbs/studyabroad/index.html',
'https://www.ptt.cc//bbs/Option/index.html',
'https://www.ptt.cc//bbs/facelift/index.html',
'https://www.ptt.cc//bbs/BB_Online/index.html'
]
# ----------------------------------------------------------------------------------------------
# 取得有幾篇
def Get_Page_Number(self, content):
start_index = content.find('index')
end_index = content.find('.html')
page_number = content[start_index + 5: end_index]
return (len(page_number) + 1)
# 取得連結的陣列表
def Get_Ptt_Link(self):
return self.Ptt_Link
# 取得連結
def Get_Ptt_Link_index(self, index):
return self.Ptt_Link[index]
# ----------------------------------------------------------------------------------------------
'''依照ptt版的url 搜尋文章 低於 push_rate 評分的不出現
title : 標題
url : 連結
rate : 推文
回傳 { 'title':title, 'url':url, 'rate':rate }
'''
def Craw_Page_Rate(self, url='https://www.ptt.cc/bbs/C_Chat/index.html', push_rate=10):
print('Start parsing \t' + url)
rs = requests.session()
res = rs.get(url, cookies={'over18': '1'})
soup_ = BeautifulSoup(res.text, 'lxml')
article_seq = []
for r_ent in soup_.find_all(class_="r-ent"):
try:
# 先得到每篇文章的篇url
link = r_ent.find('a')['href']
if link:
# 確定得到url再去抓 標題 以及 推文數
title = r_ent.find(class_="title").text.strip()
rate = r_ent.find(class_="nrec").text
url = 'https://www.ptt.cc' + link
if rate:
rate = 100 if rate.startswith('爆') else rate
rate = -1 * int(rate[1]) if rate.startswith('X') else rate
else:
rate = 0
# 比對推文數
if int(rate) >= push_rate:
article_seq.append({
'title': title,
'url': url,
'rate': rate,
})
except Exception as e:
# print('crawPage function error:',r_ent.find(class_="title").text.strip())
print('本文已被刪除', e)
Total = ''
for i in range(len(article_seq)):
Total += article_seq[i]['title'] + '\n'
Total += (article_seq[i]['url']) + '\n'
Total += ('推文數 : ' + article_seq[i]['rate']) + '\n'
Total += ('-----------------------------------------------------') + '\n'
return Total
# ----------------------------------------------------------------------------------------------
# 依照ptt版的url 搜尋文章
def Crawl_Page(self, url='https://www.ptt.cc/bbs/C_Chat/index.html'):
print('Start parsing \t' + url)
rs = requests.session()
res = rs.get(url, cookies={'over18': '1'})
soup = BeautifulSoup(res.text, 'lxml')
article_gossiping_seq = []
for r_ent in soup.find_all(class_="r-ent"):
try:
# 先得到每篇文章的篇url
link = r_ent.find('a')['href']
if link:
# 確定得到url再去抓 標題 以及 推文數
title = r_ent.find(class_="title").text.strip()
url = 'https://www.ptt.cc' + link
article_gossiping_seq.append({
'url': url,
'title': title
})
except Exception as e:
# print u'crawPage function error:',r_ent.find(class_="title").text.strip()
# print('本文已被刪除')
print('delete', e)
Total = ''
for i in range(len(article_gossiping_seq)):
Total += article_gossiping_seq[i]['title'] + '\n'
Total += (article_gossiping_seq[i]['url']) + '\n'
Total += ('-----------------------------------------------------') + '\n'
return Total
# ----------------------------------------------------------------------------------------------
# ptt 熱門
def Ptt_Hot(self):
target_url = 'http://disp.cc/b/PttHot'
print('Start parsing Ptt_Hot....')
rs = requests.session()
res = rs.get(target_url, cookies={'over18': '1'})
soup = BeautifulSoup(res.text, 'lxml')
content = ""
for data in soup.select('#list div.row2 div span.listTitle'):
title = data.text
link = "http://disp.cc/b/" + data.find('a')['href']
if data.find('a')['href'] == "796-59l9":
break
content += '{}\n{}\n\n'.format(title, link)
return content
# ----------------------------------------------------------------------------------------------
# 取得每一個版的連結
def Get_All_Ptt_Board(self):
url = 'https://www.ptt.cc/bbs/index.html'
print('Start parsing \t' + url)
rs = requests.session()
res = rs.get(url, cookies={'over18': '1'})
soup = BeautifulSoup(res.text, 'lxml')
Total = ''
for b in soup.find_all('a'):
x = b.get_text().split()
if (len(x) < 2):
continue
else:
count = 0
for add in x:
if (count == 0):
Total += '版名: ' + add + ' '
if (count == 1):
Total += '今日文章數: ' + add + ' '
if (count == 2):
Total += '分類: ' + add + ' '
if (count == 3):
Total += '公告: ' + add + ' '
elif (count > 3):
Total += add + ' '
count += 1
Total += '\n'
Total += ('https://www.ptt.cc/' + b.get('href')) + '\n'
return Total
# 取得 控制頁面選項: 最舊/上頁/下頁/最新
def Get_Page_Options(self, url='https://www.ptt.cc/bbs/C_Chat/index.html'):
url = url
rs = requests.session()
res = rs.get(url, cookies={'over18': '1'})
soup = BeautifulSoup(res.text, 'lxml')
# 控制頁面選項: 最舊/上頁/下頁/最新
Page_Options = []
for controls in soup.select('.action-bar a.btn.wide'):
link = str(controls.get('href'))
Page_Options.append('https://www.ptt.cc' + link)
return Page_Options
# ----------------------------------------------------------------------------------------------
'''依照ptt版的url 搜尋文章 低於 push_rate 評分的不出現
title : 標題
url : 連結
rate : 推文
author : 作者
date : 日期
回傳 {title':title, 'url':url, 'rate':rate ,'author':author ,'date': date}
'''
def Craw_Page_All_Data(self, url='https://www.ptt.cc/bbs/C_Chat/index.html', push_rate=10):
print('Start parsing \t' + url)
rs = requests.session()
res = rs.get(url, cookies={'over18': '1'})
soup_ = BeautifulSoup(res.text, 'lxml')
article_seq = []
for r_ent in soup_.find_all(class_="r-ent"):
try:
# 先得到每篇文章的篇url
link = r_ent.find('a')['href']
if link:
# 確定得到url再去抓 標題 以及 推文數
title = r_ent.find(class_="title").text.strip()
rate = r_ent.find(class_="nrec").text
url = 'https://www.ptt.cc' + link
author = r_ent.find(class_="author").text
date = r_ent.find(class_="date").text
if rate:
rate = 100 if rate.startswith('爆') else rate
rate = -1 * int(rate[1]) if rate.startswith('X') else rate
else:
rate = 0
# 比對推文數
if int(rate) >= push_rate:
article_seq.append({
'title': title,
'url': url,
'rate': rate,
'author': author,
'date': date
})
except Exception as e:
# print('crawPage function error:',r_ent.find(class_="title").text.strip())
print('本文已被刪除', e)
Total = ''
for i in range(len(article_seq)):
Total += article_seq[i]['title'] + '\n'
Total += (article_seq[i]['url']) + '\n'
Total += ('推文數 : ' + article_seq[i]['rate']) + '\n'
Total += ('作者 : ' + article_seq[i]['author']) + '\n'
Total += ('日期 : ' + article_seq[i]['date']) + '\n'
Total += ('-----------------------------------------------------') + '\n'
return Total
# ----------------------------------------------------------------------------------------------
```
|
{
"source": "JE-Chen/SeleniumWrapper_JE",
"score": 2
}
|
#### File: test_object/test_object_record/test_object_record_class.py
```python
from je_web_runner.utils.test_object.test_object_class import TestObject
class TestObjectRecord(object):
def __init__(self):
self.test_object_record_dict = dict()
def clean_record(self):
self.test_object_record_dict = dict()
def save_test_object(self, test_object_name: str, object_type=None, **kwargs):
test_object = TestObject(test_object_name, object_type)
self.test_object_record_dict.update({test_object.test_object_name: test_object})
def remove_test_object(self, test_object_name: str):
return self.test_object_record_dict.pop(test_object_name, False)
test_object_record = TestObjectRecord()
```
|
{
"source": "JECINTA534521/Blog-project",
"score": 2
}
|
#### File: Blog-project/app/email.py
```python
from .import mail
from flask_mail import Message
from flask import render_template
def mail_message(subject,template,to,**kwargs):
sender_email='<EMAIL>'
email=Message(subject,sender=sender_email,recipients=[to])
email.body=render_template(template + '.txt',**kwargs)
```
|
{
"source": "JECINTA534521/Jess-instagram-project",
"score": 2
}
|
#### File: Jess-instagram-project/users/models.py
```python
import os
from django.db import models
from django.contrib.auth.models import User
from PIL import Image
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
website = models.CharField(max_length=100, null=True, blank=True)
bio = models.CharField(max_length=150, null=True, blank=True)
image = models.ImageField(default='default.jpg', upload_to='profile_pics')
followers = models.ManyToManyField(User, blank=True, related_name='user_followers')
def __str__(self):
return f'{self.user.username} Profile'
# Save checks exif information for cellphone photos to see what orientation the
# photo was taken in, then rotates the image to be upright. images are reduced
# to an output of 200px x 200px to save room on the server.
def save(self, **kwargs):
super().save()
img = Image.open(self.image.path)
exif = img._getexif()
orientation_key = 274
if exif and orientation_key in exif:
orientation = exif[orientation_key]
rotate_values = {
3: Image.ROTATE_180,
6: Image.ROTATE_270,
8: Image.ROTATE_90
}
if orientation in rotate_values:
img = img.transpose(rotate_values[orientation])
output_size = (200, 200)
img.thumbnail(output_size)
img.save(self.image.path)
```
|
{
"source": "JECINTA534521/my-gallery",
"score": 2
}
|
#### File: my-gallery/photo/models.py
```python
from django.db import models
class Category(models.Model):
name = models.CharField(max_length=30)
def save_category(self):
self.save()
def delete_category(self):
self.delete()
def __str__(self):
return self.name
class Location(models.Model):
loc_name = models.CharField(max_length=30)
def save_loc(self):
self.save()
def delete_loc(self):
self.delete()
def __str__(self):
return self.loc_name
class Image(models.Model):
image = models.ImageField(upload_to='photos/')
image_name = models.CharField(max_length=30)
image_descprition = models.CharField(max_length=150)
location = models.ForeignKey(Location,on_delete=models.CASCADE)
category = models.ForeignKey(Category,on_delete=models.CASCADE)
@classmethod
def get_image_by_id(cls,id):
image = cls.objects.get(id = id)
return image
@classmethod
def search_image(cls,categorys):
images = cls.objects.filter(category__name = categorys)
return images
@classmethod
def filter_by_location(cls,location):
images_locs = cls.objects.filter(location__loc_name=location)
return images_locs
def save_image(self):
self.save()
def delete_image(self):
self.delete()
def __str__(self):
return self.image_name
class Meta:
ordering = ['image_name']
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.