repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
ericmjl/influenza-reassortment-detector | full_affmat.py | 1 | 1077 | import pandas as pd
import sys
class FullAffmatCompiler(object):
"""docstring for FullAffmatCompiler"""
def __init__(self, handle):
super(FullAffmatCompiler, self).__init__()
self.handle = handle
self.summed_affmat = pd.DataFrame()
self.current_df = None
self.affmats = dict()
def run(self):
for segment in range(1,9):
print('Currently processing segment {0}'.format(segment))
self.affmats[segment] = self.read_affmat(segment)
self.summed_affmat = self.affmats[1] + self.affmats[2] + self.affmats[3] + self.affmats[4] + self.affmats[5] + self.affmats[6] + self.affmats[7] + self.affmats[8]
self.summed_affmat.to_hdf(path_or_buf='{0} Summed Affmats.h5'.format(self.handle), key='full', mode='w')
def read_affmat(self, segment):
key = 'segment{0}'.format(segment)
return pd.read_hdf('{0} Thresholded Segment Affmats.h5'.format(self.handle), key=key)
if __name__ == '__main__':
handle = sys.argv[1]
fac = FullAffmatCompiler(handle)
fac.run() | mit | -9,035,768,609,521,674,000 | 34.933333 | 170 | 0.627669 | false |
widdowquinn/THAPBI | ITS_region_genomic_coverage/get_genes_from_GFF.py | 1 | 1976 | #!/usr/bin/env python
#author: Peter Thorpe September 2016. The James Hutton Insitute,Dundee,UK.
#Title:
#script to get the gene columns only from GFF"
#imports
import os
import sys
from sys import stdin,argv
import sys
import datetime
from optparse import OptionParser
###########################################################################
def write_out_ITS_GFF(gff, out):
"""function parse and print GFF lines that
correspond to gene only """
gff_file = open(gff, "r")
out_file = open(out, "w")
for line in gff_file:
if line.startswith("#"):
continue
assert len(line.split("\t")) ==9 ,"GFF fields wrong length should be 9"
scaffold,aug,cds_type,start,stop,e,f,g,gene_info = line.split("\t")
if cds_type =="gene":
out_file.write(line)
gff_file.close()
out_file.close()
###########################################################################
if "-v" in sys.argv or "--version" in sys.argv:
print ("v0.0.1")
sys.exit(0)
usage = """Use as follows:
Title:
script to get the gene columns only from GFF
$ get_genes_from_GFF.py --gff gff.out -o out.gff
"""
parser = OptionParser(usage=usage)
parser.add_option("-g", "--gff", dest="gff", default=None,
help="predicted gene in gff3 format",
metavar="FILE")
parser.add_option("-o", "--out_file", dest="out_file",
default="ITS_GFF.out",
help="output line corresponding to genes only.")
(options, args) = parser.parse_args()
gff = options.gff
out_file = options.out_file
#run the program
if not os.path.isfile(gff):
print("sorry, couldn't open the file: " + ex.strerror + "\n")
print ("current working directory is :", os.getcwd() + "\n")
print ("files are :", [f for f in os.listdir('.')])
sys_exit("\n\nInput blast file not found: %s" % gff)
# call the top function
write_out_ITS_GFF(gff, out_file)
| mit | 6,784,813,885,300,704,000 | 23.395062 | 79 | 0.566296 | false |
GermanRuizMarcos/Classical-Composer-Classification | code_8/classification_1.py | 1 | 8408 | '''
AUDIO CLASSICAL COMPOSER IDENTIFICATION BASED ON:
A SPECTRAL BANDWISE FEATURE-BASED SYSTEM
'''
import essentia
from essentia.standard import *
import glob
import numpy as np
import arff
from essentia.standard import *
from scipy import stats
# Dataset creation with specific attributes (spectral features) and a specific class (composer's name)
'''
Audio files trasformed into the frequency domain through a 1024-sample STFT with 50% overlap.
The spectrum is divided into 50 mel-spaced bands.
'''
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/periods/baroque/*.wav")
fft = FFT()
melbands = MelBands(numberBands = 50)
flatness = FlatnessDB()
rolloff = RollOff()
centroid = SpectralCentroidTime()
flux = Flux()
energy = EnergyBand()
zero = ZeroCrossingRate()
spectrum = Spectrum()
w = Windowing(type = 'hann')
mfcc = MFCC()
f = open('definitive_train.txt', 'wb')
f.write('@RELATION "composer dataset"\n')
f.write('\n')
f.write('@ATTRIBUTE filename STRING\n')
f.write('@ATTRIBUTE MFCC-0 REAL\n')
f.write('@ATTRIBUTE MFCC-1 REAL\n')
f.write('@ATTRIBUTE MFCC-2 REAL\n')
f.write('@ATTRIBUTE MFCC-3 REAL\n')
f.write('@ATTRIBUTE MFCC-4 REAL\n')
f.write('@ATTRIBUTE MFCC-5 REAL\n')
f.write('@ATTRIBUTE MFCC-6 REAL\n')
f.write('@ATTRIBUTE MFCC-7 REAL\n')
f.write('@ATTRIBUTE MFCC-8 REAL\n')
f.write('@ATTRIBUTE MFCC-9 REAL\n')
f.write('@ATTRIBUTE MFCC-10 REAL\n')
f.write('@ATTRIBUTE MFCC-11 REAL\n')
f.write('@ATTRIBUTE MFCC-12 REAL\n')
f.write('@ATTRIBUTE flatness-mean REAL\n')
f.write('@ATTRIBUTE flatness-variance REAL\n')
f.write('@ATTRIBUTE rolloff-mean REAL\n')
f.write('@ATTRIBUTE rolloff-variance REAL\n')
f.write('@ATTRIBUTE centroid-mean REAL\n')
f.write('@ATTRIBUTE centroid-variance REAL\n')
f.write('@ATTRIBUTE flux-mean REAL\n')
f.write('@ATTRIBUTE flux-variance REAL\n')
f.write('@ATTRIBUTE energy-mean REAL\n')
f.write('@ATTRIBUTE energy-variance REAL\n')
f.write('@ATTRIBUTE ZCR-mean REAL\n')
f.write('@ATTRIBUTE ZCR-variance REAL\n')
f.write('@ATTRIBUTE flatness-std REAL\n')
f.write('@ATTRIBUTE flatness-hmean REAL\n')
f.write('@ATTRIBUTE period {baroque, classical, romantic}\n')
f.write('\n')
f.write('@DATA\n')
for audio_file in dirList:
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
stft = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
stft.append(fft(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
composer = 'bach'
period = 'baroque'
f.write('%s' %audio_file.split('/')[-1].split('(')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%s' %period)
f.write('\n')
# 2
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/periods/classical/*.wav")
for audio_file in dirList:
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
composer = 'beethoven'
period = 'classical'
f.write('%s' %audio_file.split('/')[-1].split('(')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%s' %period)
f.write('\n')
# 3
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/periods/romantic/*.wav")
for audio_file in dirList:
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
composer = 'chopin'
period = 'romantic'
f.write('%s' %audio_file.split('/')[-1].split('(')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%s' %period)
f.write('\n')
f.write('%\n')
f.write('%\n')
f.write('%\n')
f.close()
| gpl-3.0 | 4,417,720,493,671,316,500 | 23.16092 | 144 | 0.614534 | false |
juharris/tensorflow | tensorflow/contrib/layers/python/layers/target_column.py | 1 | 19116 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TargetColumn abstract a single head in the model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib import losses
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.framework import deprecated
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
@deprecated(
"2016-11-12",
"This file will be removed after the deprecation date."
"Please switch to "
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
def regression_target(label_name=None,
weight_column_name=None,
target_dimension=1):
"""Creates a _TargetColumn for linear regression.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
target_dimension: dimension of the target for multilabels.
Returns:
An instance of _TargetColumn
"""
return _RegressionTargetColumn(loss_fn=_mean_squared_loss,
label_name=label_name,
weight_column_name=weight_column_name,
target_dimension=target_dimension)
# TODO(zakaria): Add logistic_regression_target
@deprecated(
"2016-11-12",
"This file will be removed after the deprecation date."
"Please switch to "
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
def multi_class_target(n_classes, label_name=None, weight_column_name=None):
"""Creates a _TargetColumn for multi class single label classification.
The target column uses softmax cross entropy loss.
Args:
n_classes: Integer, number of classes, must be >= 2
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
Returns:
An instance of _MultiClassTargetColumn.
Raises:
ValueError: if n_classes is < 2
"""
if n_classes < 2:
raise ValueError("n_classes must be > 1 for classification.")
if n_classes == 2:
loss_fn = _log_loss_with_two_classes
else:
loss_fn = _softmax_cross_entropy_loss
return _MultiClassTargetColumn(loss_fn=loss_fn,
n_classes=n_classes,
label_name=label_name,
weight_column_name=weight_column_name)
@deprecated(
"2016-11-12",
"This file will be removed after the deprecation date."
"Please switch to "
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
def binary_svm_target(label_name=None, weight_column_name=None):
"""Creates a _TargetColumn for binary classification with SVMs.
The target column uses binary hinge loss.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
Returns:
An instance of _TargetColumn.
"""
return _BinarySvmTargetColumn(label_name=label_name,
weight_column_name=weight_column_name)
@deprecated(
"2016-11-12",
"This file will be removed after the deprecation date."
"Please switch to "
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
class ProblemType(object):
UNSPECIFIED = 0
CLASSIFICATION = 1
LINEAR_REGRESSION = 2
LOGISTIC_REGRESSION = 3
class _TargetColumn(object):
"""_TargetColumn is the abstraction for a single head in a model.
Args:
loss_fn: a function that returns the loss tensor.
num_label_columns: Integer, number of label columns.
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
Raises:
ValueError: if loss_fn or n_classes are missing.
"""
def __init__(self, loss_fn, num_label_columns, label_name,
weight_column_name, problem_type):
if not loss_fn:
raise ValueError("loss_fn must be provided")
if num_label_columns is None: # n_classes can be 0
raise ValueError("num_label_columns must be provided")
self._loss_fn = loss_fn
self._num_label_columns = num_label_columns
self._label_name = label_name
self._weight_column_name = weight_column_name
self._problem_type = problem_type
def logits_to_predictions(self, logits, proba=False):
# Abstrat, Subclasses must implement.
raise NotImplementedError()
def get_eval_ops(self, features, logits, targets, metrics=None):
"""Returns eval op."""
raise NotImplementedError
@property
def label_name(self):
return self._label_name
@property
def weight_column_name(self):
return self._weight_column_name
@property
def num_label_columns(self):
return self._num_label_columns
def get_weight_tensor(self, features):
if not self._weight_column_name:
return None
else:
return array_ops.reshape(
math_ops.to_float(features[self._weight_column_name]),
shape=(-1,))
@property
def problem_type(self):
return self._problem_type
def _weighted_loss(self, loss, weight_tensor):
"""Returns cumulative weighted loss."""
unweighted_loss = array_ops.reshape(loss, shape=(-1,))
weighted_loss = math_ops.mul(unweighted_loss,
array_ops.reshape(
weight_tensor, shape=(-1,)))
return weighted_loss
def training_loss(self, logits, target, features, name="training_loss"):
"""Returns training loss tensor for this head.
Training loss is different from the loss reported on the tensorboard as we
should respect the example weights when computing the gradient.
L = sum_{i} w_{i} * l_{i} / B
where B is the number of examples in the batch, l_{i}, w_{i} are individual
losses, and example weight.
Args:
logits: logits, a float tensor.
target: either a tensor for labels or in multihead case, a dict of string
to target tensor.
features: features dict.
name: Op name.
Returns:
Loss tensor.
"""
target = target[self.name] if isinstance(target, dict) else target
loss_unweighted = self._loss_fn(logits, target)
weight_tensor = self.get_weight_tensor(features)
if weight_tensor is None:
return math_ops.reduce_mean(loss_unweighted, name=name)
loss_weighted = self._weighted_loss(loss_unweighted, weight_tensor)
return math_ops.reduce_mean(loss_weighted, name=name)
def loss(self, logits, target, features):
"""Returns loss tensor for this head.
The loss returned is the weighted average.
L = sum_{i} w_{i} * l_{i} / sum_{i} w_{i}
Args:
logits: logits, a float tensor.
target: either a tensor for labels or in multihead case, a dict of string
to target tensor.
features: features dict.
Returns:
Loss tensor.
"""
target = target[self.name] if isinstance(target, dict) else target
loss_unweighted = self._loss_fn(logits, target)
weight_tensor = self.get_weight_tensor(features)
if weight_tensor is None:
return math_ops.reduce_mean(loss_unweighted, name="loss")
loss_weighted = self._weighted_loss(loss_unweighted, weight_tensor)
return math_ops.div(
math_ops.reduce_sum(loss_weighted),
math_ops.to_float(math_ops.reduce_sum(weight_tensor)),
name="loss")
class _RegressionTargetColumn(_TargetColumn):
"""_TargetColumn for regression."""
def __init__(self, loss_fn, label_name, weight_column_name, target_dimension):
super(_RegressionTargetColumn, self).__init__(
loss_fn=loss_fn,
num_label_columns=target_dimension,
label_name=label_name,
weight_column_name=weight_column_name,
problem_type=ProblemType.LINEAR_REGRESSION)
def logits_to_predictions(self, logits, proba=False):
if self.num_label_columns == 1:
return array_ops.squeeze(logits, squeeze_dims=[1])
return logits
def get_eval_ops(self, features, logits, targets, metrics=None):
loss = self.loss(logits, targets, features)
result = {"loss": metrics_lib.streaming_mean(loss)}
if metrics:
predictions = self.logits_to_predictions(logits, proba=False)
result.update(_run_metrics(predictions, targets, metrics,
self.get_weight_tensor(features)))
return result
class _MultiClassTargetColumn(_TargetColumn):
"""_TargetColumn for classification."""
# TODO(zakaria): support multilabel.
def __init__(self, loss_fn, n_classes, label_name, weight_column_name):
if n_classes < 2:
raise ValueError("n_classes must be >= 2")
super(_MultiClassTargetColumn, self).__init__(
loss_fn=loss_fn,
num_label_columns=1 if n_classes == 2 else n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
problem_type=ProblemType.CLASSIFICATION)
def logits_to_predictions(self, logits, proba=False):
if self.num_label_columns == 1:
logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
if proba:
return nn.softmax(logits)
else:
return math_ops.argmax(logits, 1)
def _default_eval_metrics(self):
if self._num_label_columns == 1:
return get_default_binary_metrics_for_eval(thresholds=[.5])
return {}
def get_eval_ops(self, features, logits, targets, metrics=None):
loss = self.loss(logits, targets, features)
result = {"loss": metrics_lib.streaming_mean(loss)}
# Adds default metrics.
if metrics is None:
# TODO(b/29366811): This currently results in both an "accuracy" and an
# "accuracy/threshold_0.500000_mean" metric for binary classification.
metrics = {("accuracy", "classes"): metrics_lib.streaming_accuracy}
predictions = math_ops.sigmoid(logits)
targets_float = math_ops.to_float(targets)
default_metrics = self._default_eval_metrics()
for metric_name, metric_op in default_metrics.items():
result[metric_name] = metric_op(predictions, targets_float)
class_metrics = {}
proba_metrics = {}
for name, metric_op in six.iteritems(metrics):
if isinstance(name, tuple):
if len(name) != 2:
raise ValueError("Ignoring metric {}. It returned a tuple with "
"len {}, expected 2.".format(name, len(name)))
else:
if name[1] not in ["classes", "probabilities"]:
raise ValueError("Ignoring metric {}. The 2nd element of its "
"name should be either 'classes' or "
"'probabilities'.".format(name))
elif name[1] == "classes":
class_metrics[name[0]] = metric_op
else:
proba_metrics[name[0]] = metric_op
elif isinstance(name, str):
class_metrics[name] = metric_op
else:
raise ValueError("Ignoring metric {}. Its name is not in the correct "
"form.".format(name))
if class_metrics:
class_predictions = self.logits_to_predictions(logits, proba=False)
result.update(_run_metrics(class_predictions, targets, class_metrics,
self.get_weight_tensor(features)))
if proba_metrics:
predictions = self.logits_to_predictions(logits, proba=True)
result.update(_run_metrics(predictions, targets, proba_metrics,
self.get_weight_tensor(features)))
return result
class _BinarySvmTargetColumn(_MultiClassTargetColumn):
"""_TargetColumn for binary classification using SVMs."""
def __init__(self, label_name, weight_column_name):
def loss_fn(logits, target):
check_shape_op = control_flow_ops.Assert(
math_ops.less_equal(array_ops.rank(target), 2),
["target's shape should be either [batch_size, 1] or [batch_size]"])
with ops.control_dependencies([check_shape_op]):
target = array_ops.reshape(
target, shape=[array_ops.shape(target)[0], 1])
return losses.hinge_loss(logits, target)
super(_BinarySvmTargetColumn, self).__init__(
loss_fn=loss_fn,
n_classes=2,
label_name=label_name,
weight_column_name=weight_column_name)
def logits_to_predictions(self, logits, proba=False):
if proba:
raise ValueError(
"logits to probabilities is not supported for _BinarySvmTargetColumn")
logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
return math_ops.argmax(logits, 1)
# TODO(zakaria): use contrib losses.
def _mean_squared_loss(logits, target):
# To prevent broadcasting inside "-".
if len(target.get_shape()) == 1:
target = array_ops.expand_dims(target, dim=[1])
logits.get_shape().assert_is_compatible_with(target.get_shape())
return math_ops.square(logits - math_ops.to_float(target))
def _log_loss_with_two_classes(logits, target):
# sigmoid_cross_entropy_with_logits requires [batch_size, 1] target.
if len(target.get_shape()) == 1:
target = array_ops.expand_dims(target, dim=[1])
loss_vec = nn.sigmoid_cross_entropy_with_logits(logits,
math_ops.to_float(target))
return loss_vec
def _softmax_cross_entropy_loss(logits, target):
# Check that we got int32/int64 for classification.
if (not target.dtype.is_compatible_with(dtypes.int64) and
not target.dtype.is_compatible_with(dtypes.int32)):
raise ValueError("Target's dtype should be int32, int64 or compatible. "
"Instead got %s." % target.dtype)
# sparse_softmax_cross_entropy_with_logits requires [batch_size] target.
if len(target.get_shape()) == 2:
target = array_ops.squeeze(target, squeeze_dims=[1])
loss_vec = nn.sparse_softmax_cross_entropy_with_logits(logits, target)
return loss_vec
def _run_metrics(predictions, targets, metrics, weights):
result = {}
targets = math_ops.cast(targets, predictions.dtype)
for name, metric in six.iteritems(metrics or {}):
if weights is not None:
result[name] = metric(predictions, targets, weights=weights)
else:
result[name] = metric(predictions, targets)
return result
@deprecated(
"2016-11-12",
"This file will be removed after the deprecation date."
"Please switch to "
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
def get_default_binary_metrics_for_eval(thresholds):
"""Returns a dictionary of basic metrics for logistic regression.
Args:
thresholds: List of floating point thresholds to use for accuracy,
precision, and recall metrics. If None, defaults to [0.5].
Returns:
Dictionary mapping metrics string names to metrics functions.
"""
metrics = {}
metrics[_MetricKeys.PREDICTION_MEAN] = _predictions_streaming_mean
metrics[_MetricKeys.TARGET_MEAN] = _targets_streaming_mean
# Also include the streaming mean of the label as an accuracy baseline, as
# a reminder to users.
metrics[_MetricKeys.ACCURACY_BASELINE] = _targets_streaming_mean
metrics[_MetricKeys.AUC] = _streaming_auc
for threshold in thresholds:
metrics[_MetricKeys.ACCURACY_MEAN % threshold] = _accuracy_at_threshold(
threshold)
# Precision for positive examples.
metrics[_MetricKeys.PRECISION_MEAN % threshold] = _streaming_at_threshold(
metrics_lib.streaming_precision_at_thresholds, threshold)
# Recall for positive examples.
metrics[_MetricKeys.RECALL_MEAN % threshold] = _streaming_at_threshold(
metrics_lib.streaming_recall_at_thresholds, threshold)
return metrics
def _float_weights_or_none(weights):
if weights is None:
return None
return math_ops.to_float(weights)
def _targets_streaming_mean(unused_predictions, targets, weights=None):
return metrics_lib.streaming_mean(targets, weights=weights)
def _predictions_streaming_mean(predictions, unused_targets, weights=None):
return metrics_lib.streaming_mean(predictions, weights=weights)
def _streaming_auc(predictions, targets, weights=None):
return metrics_lib.streaming_auc(predictions, targets,
weights=_float_weights_or_none(weights))
def _accuracy_at_threshold(threshold):
def _accuracy_metric(predictions, targets, weights=None):
threshold_predictions = math_ops.to_float(
math_ops.greater_equal(predictions, threshold))
return metrics_lib.streaming_accuracy(predictions=threshold_predictions,
labels=targets,
weights=weights)
return _accuracy_metric
def _streaming_at_threshold(streaming_metrics_fn, threshold):
def _streaming_metrics(predictions, targets, weights=None):
precision_tensor, update_op = streaming_metrics_fn(
predictions, labels=targets, thresholds=[threshold],
weights=_float_weights_or_none(weights))
return array_ops.squeeze(precision_tensor), update_op
return _streaming_metrics
class _MetricKeys(object):
AUC = "auc"
PREDICTION_MEAN = "labels/prediction_mean"
TARGET_MEAN = "labels/actual_target_mean"
ACCURACY_BASELINE = "accuracy/baseline_target_mean"
ACCURACY_MEAN = "accuracy/threshold_%f_mean"
PRECISION_MEAN = "precision/positive_threshold_%f_mean"
RECALL_MEAN = "recall/positive_threshold_%f_mean"
| apache-2.0 | 887,826,027,601,003,500 | 35.411429 | 80 | 0.672003 | false |
ClusterWhisperer/clusterstats | tests/test_http.py | 1 | 8802 | """Testcases for the clusterstats module."""
import unittest
import json
from pprint import pprint
import httpretty
from requests import HTTPError, Timeout
import pandas as pd
from clusterstats import http
from clusterstats import stats
class ClusterStatsTest(unittest.TestCase):
def test_read_servers(self):
"""Test the _read_servers private method"""
self.assertEquals(len(http._read_servers("data/servers.txt")), 1000)
def test_transform_hostname_to_http_endpoint(self):
"""Test the _transform_hostname_to_http_endpoint"""
hosts=["server1", "server2"]
expected_out=["http://server1/status", "http://server2/status"]
self.assertEquals(http._transform_hostname_to_http_endpoint(hosts), expected_out)
@httpretty.activate
def test_http_OK(self):
"""Test Http Connectivity - Success scenario"""
url='http://myserver/status'
content=('{"Application":"Webapp2","Version":"0.0.2",'
'"Uptime":8102471691,"Request_Count":4134752620,'
'"Error_Count":2772072365,"Success_Count":1362680255}')
httpretty.register_uri(
method=httpretty.GET,
uri=url,
status=200,
body=content,
content_type="application/json"
)
result=self._connect(url)
self.assertEquals(result[0], 0)
self.assertTrue(result[1]["Application"] == "Webapp2")
@httpretty.activate
def test_http_404(self):
"""Test HTTP Error Condition"""
def exception_callback(request, uri, headers):
raise HTTPError("404 Page Not found.")
url='http://myserver/status'
httpretty.register_uri(
method=httpretty.GET,
uri=url,
status=404,
body=exception_callback,
content_type="application/json"
)
result=self._connect(url)
self.assertEquals(-1,result[0])
@httpretty.activate
def test_http_timeout(self):
"""Test Timeout Condition"""
def exception_callback(request, uri, headers):
raise Timeout("Connection Timeout.")
url='http://myserver/status'
httpretty.register_uri(
method=httpretty.GET,
uri=url,
status=504,
body=exception_callback,
content_type="application/json"
)
result=self._connect(url)
self.assertEquals(-1,result[0])
@httpretty.activate
def test_json_error(self):
"""Test ValueError if JSON not returned."""
url='http://myserver/status'
content="Hello World"
httpretty.register_uri(
method=httpretty.GET,
uri=url,
status=200,
body=content,
content_type="application/json"
)
result=self._connect(url)
self.assertEquals(-1,result[0])
@httpretty.activate
def test_http_retries(self):
"""Test HTTP Session connection retries """
def exception_callback(request, uri, headers):
raise Timeout("Connection Timeout. - 2")
url='http://myserver/status'
content=('{"Application":"Webapp2","Version":"0.0.2",'
'"Uptime":8102471691,"Request_Count":4134752620,'
'"Error_Count":2772072365,"Success_Count":1362680255}')
httpretty.register_uri(
method=httpretty.GET,
uri=url,
responses=[
httpretty.Response(body=exception_callback, status=504),
httpretty.Response(body=exception_callback, status=504),
httpretty.Response(body=content, status=200, content_type="application/json"),
])
result=self._connect(url)
self.assertEquals(result[0], 0)
self.assertTrue(result[1]["Application"] == "Webapp2")
@httpretty.activate
def test_query_status(self):
""" Test query_status """
url1='http://myserver1/status'
url2='http://myserver2/status'
url3='http://myserver3/status'
url4='http://myserver4/status'
url5='http://myserver5/status'
content=('{"Application":"Webapp2","Version":"0.0.2",'
'"Uptime":8102471691,"Request_Count":4134752620,'
'"Error_Count":2772072365,"Success_Count":1362680255}')
bad_content="Hello World..."
httpretty.register_uri(
method=httpretty.GET,
uri=url1,
status=200,
body=content,
content_type="application/json"
)
httpretty.register_uri(
method=httpretty.GET,
uri=url2,
status=200,
body=content,
content_type="application/json"
)
httpretty.register_uri(
method=httpretty.GET,
uri=url3,
status=200,
body=content,
content_type="application/json"
)
httpretty.register_uri(
method=httpretty.GET,
uri=url4,
status=200,
body=content,
content_type="application/json"
)
httpretty.register_uri(
method=httpretty.GET,
uri=url5,
status=200,
body=bad_content,
content_type="application/json"
)
results=http.query_status([url1, url2, url3, url4, url5], 3, 2, 3)
##expecting success count = 4 and failure = 1
success_list=filter(lambda x: x[0] == 0, results)
failure_list=filter(lambda x: x[0] == -1, results)
self.assertTrue(len(success_list) == 4)
self.assertTrue(len(failure_list) == 1)
def test_calc_qos(self):
"""Test Calculating QoS """
self.assertTrue(stats.calc_qos(100,99), 99.0)
def test_check_qos(self):
"""Test Check QoS method """
self.assertTrue(stats.check_qos(99.0, 100, 99))
self.assertFalse(stats.check_qos(99.1, 100, 99))
def test_calc_stats(self):
"""Testing Stats Calculation"""
d = [{"Application":"Webapp1","Version":"1.2.1","Uptime":9634484391,"Request_Count":7729359104,
"Error_Count":3394574268,"Success_Count":4334784836},
{"Application":"Webapp1","Version":"1.2.1","Uptime":9634484391,"Request_Count":7729359104,
"Error_Count":3394574268,"Success_Count":4334784836},
{"Application":"Database2","Version":"0.1.0","Uptime":8982039907,"Request_Count":2174448763,
"Error_Count":2001963223,"Success_Count":172485540}]
df = stats.calc_stats(d, ['Application', 'Version'], 'Success_Count', stats.OPERATOR_ADD)
self.assertTrue(df.shape[0], 2) ## expecting two rows.
@httpretty.activate
def test_success_flow(self):
"""Integration test of the http results and stats calculation."""
url1='http://myserver1/status'
url2='http://myserver2/status'
url3='http://myserver3/status'
content=('{"Application":"Webapp2","Version":"0.0.2",'
'"Uptime":8102471691,"Request_Count":4134752620,'
'"Error_Count":2772072365,"Success_Count":1362680255}')
content2=('{"Application":"Database2","Version":"0.0.2",'
'"Uptime":8102471691,"Request_Count":172485540,'
'"Error_Count":2772072365,"Success_Count":1362680255}')
httpretty.register_uri(
method=httpretty.GET,
uri=url1,
status=200,
body=content,
content_type="application/json"
)
httpretty.register_uri(
method=httpretty.GET,
uri=url2,
status=200,
body=content,
content_type="application/json"
)
httpretty.register_uri(
method=httpretty.GET,
uri=url3,
status=200,
body=content2,
content_type="application/json"
)
results=http.query_status([url1, url2, url3], 3, 2, 3)
success_list=filter(lambda x: x[0] == 0, results)
failure_list=filter(lambda x: x[0] == -1, results) # expect no failure
self.assertEquals(failure_list, [])
data = [msg for (status, msg) in results]
df = stats.calc_stats(data, [stats.FIELD_APPLICATION, stats.FIELD_VERSION],
stats.FIELD_SUCCESS_COUNT, stats.OPERATOR_ADD)
self.assertTrue(df.shape[0], 2)
pprint(df)
def _connect(self, url):
result=http._get_server_status(url,10,3)
# print result
return result
if __name__ == '__main__':
unittest.main() | mit | 1,064,580,791,326,973,600 | 34.212 | 105 | 0.566576 | false |
dbhirko/ansible-modules-extras | cloud/vmware/vsphere_copy.py | 1 | 6194 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2015 Dag Wieers <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vsphere_copy
short_description: Copy a file to a vCenter datastore
description:
- Upload files to a vCenter datastore
version_added: 2.0
author: Dag Wieers (@dagwieers) <[email protected]>
options:
host:
description:
- The vCenter server on which the datastore is available.
required: true
login:
description:
- The login name to authenticate on the vCenter server.
required: true
password:
description:
- The password to authenticate on the vCenter server.
required: true
src:
description:
- The file to push to vCenter
required: true
datacenter:
description:
- The datacenter on the vCenter server that holds the datastore.
required: true
datastore:
description:
- The datastore on the vCenter server to push files to.
required: true
path:
description:
- The file to push to the datastore on the vCenter server.
required: true
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be
set to C(no) when no other option exists.
required: false
default: 'yes'
choices: ['yes', 'no']
notes:
- "This module ought to be run from a system that can access vCenter directly and has the file to transfer.
It can be the normal remote target or you can change it either by using C(transport: local) or using C(delegate_to)."
- Tested on vSphere 5.5
'''
EXAMPLES = '''
- vsphere_copy: host=vhost login=vuser password=vpass src=/some/local/file datacenter='DC1 Someplace' datastore=datastore1 path=some/remote/file
transport: local
- vsphere_copy: host=vhost login=vuser password=vpass src=/other/local/file datacenter='DC2 Someplace' datastore=datastore2 path=other/remote/file
delegate_to: other_system
'''
import atexit
import urllib
import mmap
import errno
import socket
def vmware_path(datastore, datacenter, path):
''' Constructs a URL path that VSphere accepts reliably '''
path = "/folder/%s" % path.lstrip("/")
# Due to a software bug in vSphere, it fails to handle ampersand in datacenter names
# The solution is to do what vSphere does (when browsing) and double-encode ampersands, maybe others ?
datacenter = datacenter.replace('&', '%26')
if not path.startswith("/"):
path = "/" + path
params = dict( dsName = datastore )
if datacenter:
params["dcPath"] = datacenter
params = urllib.urlencode(params)
return "%s?%s" % (path, params)
def main():
module = AnsibleModule(
argument_spec = dict(
host = dict(required=True, aliases=[ 'hostname' ]),
login = dict(required=True, aliases=[ 'username' ]),
password = dict(required=True),
src = dict(required=True, aliases=[ 'name' ]),
datacenter = dict(required=True),
datastore = dict(required=True),
dest = dict(required=True, aliases=[ 'path' ]),
validate_certs = dict(required=False, default=True, type='bool'),
),
# Implementing check-mode using HEAD is impossible, since size/date is not 100% reliable
supports_check_mode = False,
)
host = module.params.get('host')
login = module.params.get('login')
password = module.params.get('password')
src = module.params.get('src')
datacenter = module.params.get('datacenter')
datastore = module.params.get('datastore')
dest = module.params.get('dest')
validate_certs = module.params.get('validate_certs')
fd = open(src, "rb")
atexit.register(fd.close)
data = mmap.mmap(fd.fileno(), 0, access=mmap.ACCESS_READ)
atexit.register(data.close)
remote_path = vmware_path(datastore, datacenter, dest)
url = 'https://%s%s' % (host, remote_path)
headers = {
"Content-Type": "application/octet-stream",
"Content-Length": str(len(data)),
}
try:
r = open_url(url, data=data, headers=headers, method='PUT',
url_username=login, url_password=password, validate_certs=validate_certs,
force_basic_auth=True)
except socket.error, e:
if isinstance(e.args, tuple) and e[0] == errno.ECONNRESET:
# VSphere resets connection if the file is in use and cannot be replaced
module.fail_json(msg='Failed to upload, image probably in use', status=None, errno=e[0], reason=str(e), url=url)
else:
module.fail_json(msg=str(e), status=None, errno=e[0], reason=str(e), url=url)
except Exception, e:
error_code = -1
try:
if isinstance(e[0], int):
error_code = e[0]
except KeyError:
pass
module.fail_json(msg=str(e), status=None, errno=error_code, reason=str(e), url=url)
status = r.getcode()
if 200 <= status < 300:
module.exit_json(changed=True, status=status, reason=r.msg, url=url)
else:
length = r.headers.get('content-length', None)
if r.headers.get('transfer-encoding', '').lower() == 'chunked':
chunked = 1
else:
chunked = 0
module.fail_json(msg='Failed to upload', errno=None, status=status, reason=r.msg, length=length, headers=dict(r.headers), chunked=chunked, url=url)
# Import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| gpl-3.0 | -8,349,510,817,290,760,000 | 34.597701 | 155 | 0.656926 | false |
TAJaroszewski/lma_contrail_monitoring | deployment_scripts/puppet/modules/lma_collector/files/collectd/openstack_keystone.py | 1 | 3086 | #!/usr/bin/python
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Collectd plugin for getting statistics from Keystone
import collectd
import collectd_base as base
import collectd_openstack as openstack
PLUGIN_NAME = 'keystone'
INTERVAL = openstack.INTERVAL
class KeystoneStatsPlugin(openstack.CollectdPlugin):
""" Class to report the statistics on Keystone service.
number of tenants, users broken down by state
number of roles
"""
@base.read_callback_wrapper
def read_callback(self):
def groupby(d):
return 'enabled' if d.get('enabled') else 'disabled'
# tenants
r = self.get('keystone', 'tenants')
if not r:
self.logger.warning('Could not find Keystone tenants')
return
tenants_details = r.json().get('tenants', [])
status = self.count_objects_group_by(tenants_details,
group_by_func=groupby)
for s, nb in status.iteritems():
self.dispatch_value('tenants', nb, meta={'state': s})
# users
r = self.get('keystone', 'users')
if not r:
self.logger.warning('Could not find Keystone users')
return
users_details = r.json().get('users', [])
status = self.count_objects_group_by(users_details,
group_by_func=groupby)
for s, nb in status.iteritems():
self.dispatch_value('users', nb, meta={'state': s})
# roles
r = self.get('keystone', 'OS-KSADM/roles')
if not r:
self.logger.warning('Could not find Keystone roles')
return
roles = r.json().get('roles', [])
self.dispatch_value('roles', len(roles))
def dispatch_value(self, name, value, meta=None):
v = collectd.Values(
plugin=PLUGIN_NAME, # metric source
type='gauge',
type_instance=name,
interval=INTERVAL,
# w/a for https://github.com/collectd/collectd/issues/716
meta=meta or {'0': True},
values=[value]
)
v.dispatch()
plugin = KeystoneStatsPlugin(collectd)
def config_callback(conf):
plugin.config_callback(conf)
def notification_callback(notification):
plugin.notification_callback(notification)
def read_callback():
plugin.read_callback()
collectd.register_config(config_callback)
collectd.register_notification(notification_callback)
collectd.register_read(read_callback, INTERVAL)
| apache-2.0 | 3,808,081,965,423,770,000 | 30.814433 | 74 | 0.629618 | false |
asgeirrr/pgantomizer | pgantomizer/anonymize.py | 1 | 8192 | import argparse
import logging
import os
import subprocess
import sys
import psycopg2
import yaml
from .utils import get_in
DEFAULT_PK_COLUMN_NAME = 'id'
ANONYMIZE_DATA_TYPE = {
'timestamp with time zone': "'1111-11-11 11:11:11.111111+00'",
'date': "'1111-11-11'",
'boolean': 'random() > 0.5',
'integer': 'ceil(random() * 100)',
'smallint': 'ceil(random() * 100)',
'numeric': 'floor(random() * 10)',
'character varying': lambda column, pk_name: "'{}_' || {}".format(column, pk_name),
'text': lambda column, pk_name: "'{}_' || {}".format(column, pk_name),
'inet': "'111.111.111.111'"
}
CUSTOM_ANONYMIZATION_RULES = {
'aggregate_length': lambda column, _: 'length({})'.format(column)
}
DB_ARG_NAMES = ('dbname', 'user', 'password', 'host', 'port')
DB_ENV_NAMES = ('ANONYMIZED_DB_NAME', 'ANONYMIZED_DB_USER', 'ANONYMIZED_DB_PASS', 'ANONYMIZED_DB_HOST',
'ANONYMIZED_DB_PORT')
class PgantomizerError(Exception):
pass
class MissingAnonymizationRuleError(PgantomizerError):
pass
class InvalidAnonymizationSchemaError(PgantomizerError):
pass
def get_table_pk_name(schema, table):
return schema[table].get('pk', DEFAULT_PK_COLUMN_NAME) if schema[table] else DEFAULT_PK_COLUMN_NAME
def get_db_args_from_env():
return {name: os.environ.get(var) for name, var in zip(DB_ARG_NAMES, DB_ENV_NAMES)}
def get_psql_db_args(db_args):
return '-d {dbname} -U {user} -h {host} -p {port}'.format(**db_args)
def drop_schema(db_args):
subprocess.run(
'PGPASSWORD={password} psql {db_args} -c "DROP SCHEMA public CASCADE; CREATE SCHEMA public;" {redirect}'.format(
password=db_args.get('password'),
db_args=get_psql_db_args(db_args),
redirect='' if logging.getLogger().getEffectiveLevel() == logging.DEBUG else '>/dev/null 2>&1'),
shell=True
)
def load_db_to_new_instance(filename, db_args):
if not os.path.isfile(filename):
raise IOError('Dump file {} is not a file.'.format(filename))
os.putenv('PGPASSWORD', db_args.get('password'))
drop_schema(db_args)
subprocess.run(
'PGPASSWORD={password} pg_restore -Fc -j 8 {db_args} {filename} {redirect}'.format(
password=db_args.get('password'),
db_args=get_psql_db_args(db_args), filename=filename,
redirect='' if logging.getLogger().getEffectiveLevel() == logging.DEBUG else '>/dev/null 2>&1'),
shell=True
)
def prepare_column_for_anonymization(conn, cursor, table, column, data_type):
"""
Some data types such as VARCHAR are anonymized in such a manner that the anonymized value can be longer that
the length constrain on the column. Therefore, the constraint is enlarged.
"""
if data_type == 'character varying':
logging.debug('Extending length of varchar {}.{}'.format(table, column))
cursor.execute("ALTER TABLE {table} ALTER COLUMN {column} TYPE varchar(250);".format(
table=table,
column=column
))
conn.commit()
def check_schema(cursor, schema, db_args):
for table in schema:
try:
cursor.execute("SELECT {columns} FROM {table};".format(
columns='"{}"'.format('", "'.join(schema[table].get('raw', []) + [get_table_pk_name(schema, table)])),
table=table
))
except psycopg2.ProgrammingError as e:
raise InvalidAnonymizationSchemaError(str(e))
def anonymize_column(cursor, schema, table, column, data_type):
if column == get_table_pk_name(schema, table) or (schema[table] and column in schema[table].get('raw', [])):
logging.debug('Skipping anonymization of {}.{}'.format(table, column))
elif data_type in ANONYMIZE_DATA_TYPE:
custom_rule = get_in(schema, [table, 'custom_rules', column])
if custom_rule and custom_rule not in CUSTOM_ANONYMIZATION_RULES:
raise MissingAnonymizationRuleError('Custom rule "{}" is not defined'.format(custom_rule))
anonymization = CUSTOM_ANONYMIZATION_RULES[custom_rule] if custom_rule else ANONYMIZE_DATA_TYPE[data_type]
cursor.execute("UPDATE {table} SET {column} = {value};".format(
table=table,
column=column,
value=anonymization(column, get_table_pk_name(schema, table)) if callable(anonymization) else anonymization
))
logging.debug('Anonymized {}.{}'.format(table, column))
else:
raise MissingAnonymizationRuleError('No rule to anonymize type "{}"'.format(data_type))
def anonymize_db(schema, db_args):
with psycopg2.connect(**db_args) as conn:
with conn.cursor() as cursor:
check_schema(cursor, schema, db_args)
cursor.execute("SELECT table_name FROM information_schema.tables WHERE table_schema = 'public';")
for table_name in cursor.fetchall():
cursor.execute("SELECT column_name, data_type FROM information_schema.columns "
"WHERE table_schema = 'public' AND table_name = '{}'".format(table_name[0]))
for column_name, data_type in cursor.fetchall():
prepare_column_for_anonymization(conn, cursor, table_name[0], column_name, data_type)
anonymize_column(cursor, schema, table_name[0], column_name, data_type)
def load_anonymize_remove(dump_file, schema, leave_dump=False, db_args=None):
schema = yaml.load(open(schema))
db_args = db_args or get_db_args_from_env()
try:
load_db_to_new_instance(dump_file, db_args)
anonymize_db(schema, db_args)
except Exception: # Any exception must result into droping the schema to prevent sensitive data leakage
drop_schema(db_args)
raise
finally:
if not leave_dump:
subprocess.run(['rm', dump_file])
def main():
parser = argparse.ArgumentParser(description='Load data from a Postgres dump to a specified instance '
'and anonymize it according to rules specified in a YAML config file.',
epilog='Beware that all tables in the target DB are dropped '
'prior to loading the dump and anonymization. See README.md for details.')
parser.add_argument('-v', '--verbose', action='count', help='increase output verbosity')
parser.add_argument('-l', '--leave-dump', action='store_true', help='do not delete dump file after anonymization')
parser.add_argument('--schema', help='YAML config file with anonymization rules for all tables', required=True,
default='./schema.yaml')
parser.add_argument('-f', '--dump-file', help='path to the dump of DB to load and anonymize',
default='to_anonymize.sql')
parser.add_argument('--dbname', help='name of the database to dump')
parser.add_argument('--user', help='name of the Postgres user with access to the anonymized database')
parser.add_argument('--password', help='password of the Postgres user with access to the anonymized database',
default='')
parser.add_argument('--host', help='host where the DB is running', default='localhost')
parser.add_argument('--port', help='port where the DB is running', default='5432')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.DEBUG)
else:
logging.basicConfig(format="%(levelname)s: %(message)s")
if not os.path.isfile(args.dump_file):
sys.exit('File with dump "{}" does not exist.'.format(args.dump_file))
if not os.path.isfile(args.schema):
sys.exit('File with schema "{}" does not exist.'.format(args.schema))
db_args = ({name: value for name, value in zip(DB_ARG_NAMES, (args.dbname, args.user, args.password, args.host,
args.port))}
if args.dbname and args.user else None)
load_anonymize_remove(args.dump_file, args.schema, args.leave_dump, db_args)
if __name__ == '__main__':
main()
| bsd-3-clause | -4,770,951,618,853,237,000 | 41.226804 | 120 | 0.630005 | false |
CWDoherty/Baseball | Scripts/player_stats.py | 1 | 5865 | '''
Copyright (c) 2015 Chris Doherty, Oliver Nabavian
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import mysql.connector
# Script to get player stats and import them into our database.
# Configuration info to access database.
config = {
'user': 'root',
'password': 'isles40',
'host': '127.0.0.1',
'database': 'baseball'
}
# Connect to database
cnx = mysql.connector.connect(**config)
cursor1 = cnx.cursor(buffered=True)
# SQL query to get batting information for all players in 2014
# Gets batting information as well as Name and Birthday information for matching in our database
get_batting = ("SELECT b.playerID, b.teamID, b.G, b.AB, b.R, b.H, b.2B, b.3B, b.HR, b.RBI, "
"b.SB, b.CS, b.BB, b.SO, b.IBB, b.HBP, b.SH, b.SF, b.GIDP, m.nameLast, m.nameFirst, "
"m.birthYear, m.birthMonth, m.birthDay FROM (Batting b JOIN Master m ON m.playerID = b.playerID) "
"WHERE b.yearID = 2014")
# Execute SQL
cursor1.execute(get_batting)
batting_list = []
# Store cursor data in list and convert tuples to lists.
for c in cursor1:
batting_list.append(list(c))
# Close connection to DB
cursor1.close()
cnx.close()
# Connect to database
cnx = mysql.connector.connect(**config)
cursor2 = cnx.cursor(buffered=True)
# SQL query to get pitching info
# Gets pitching info as well as Name and Birthday info for matching in our DB
get_pitching = ("SELECT p.playerID, p.W, p.L, p.G, p.GS, p.CG, p.SHO, p.SV, p.IPouts, "
"p.H, p.ER, p.HR, p.BB, p.SO, p.BAOpp, p.ERA, p.IBB, p.WP, p.HBP, p.R, "
"p.SH, p.SF, p.GIDP, m.nameLast, m.nameFirst, m.birthYear, m.birthMonth, "
"m.birthDay FROM (Pitching p JOIN Master m ON m.playerID=p.playerID) "
" WHERE yearID = 2014")
# Execute SQL
cursor2.execute(get_pitching)
pitching_list = []
# Store data from cursor and convert to list for easier manipulation
for c in cursor2:
pitching_list.append(list(c))
# Close connection to DB
cursor2.close()
cnx.close()
''' Insert Data into DB '''
# format data to enter into DB
player_batting_list = []
for b in batting_list:
dob = str(b[21]) + "-" + str(b[22]) + "-" + str(b[23]) # indexes of relevant items
full_name = b[20] + " " + b[19]
temp = []
for field in range(2,len(b) - 5):
temp.append(b[field])
temp.append(full_name)
temp.append(dob)
player_batting_list.append(temp)
player_pitching_list = []
for p in pitching_list:
dob = str(p[25]) + "-" + str(p[26]) + "-" +str(p[27])
full_name = p[24] + " " + p[23]
temp = []
for field in range(1, len(p) - 5):
temp.append(p[field])
temp.append(full_name)
temp.append(dob)
player_pitching_list.append(temp)
'''
total_batting_list = []
# combine players that played for multiple teams into one entry
for i in range(len(player_batting_list)):
current = player_batting_list[i]
name = current[17]
dob = current[18]
dup = False
for j in range(i, len(player_batting_list)):
if (name in player_batting_list[j]) and (dob in player_batting_list[j]):
dup = True
extra = player_batting_list[j]
G = current[0] + extra[0]
AB = current[1] + extra[1]
R = current[2] + extra[2]
H = current[3] + extra[3]
dubs = current[4] + extra[4]
trip = current[5] + extra[5]
HR = current[6] + extra[6]
RBI = current[7] + extra[7]
SB = current[8] + extra[8]
CS = current[9] + extra[9]
BB = current[10] + extra[10]
SO = current[11] + extra[11]
IBB = current[12] + extra[12]
HBP = current[13] + extra[13]
SH = current[14] + extra[14]
SF = current[15] + extra[15]
GIDP = current[16] + extra[16]
l = [G, AB, R, H, dubs, trip, HR, RBI, SB, CS, BB, SO, IBB, HBP, SH, SF, GIDP]
l.append(name)
l.append(dob)
total_batting_list.append(l)
if not dup:
total_batting_list.append(current)
'''
# Config info to connect with our DB
config = {
'user': 'root',
'password': 'isles40',
'host': '127.0.0.1',
'database': 'baseballdb'
}
# Open connection with DB
cnx2 = mysql.connector.connect(**config)
cursor2 = cnx2.cursor(buffered=True)
import_batting = ("INSERT INTO BATTING "
"(G, AB, R, H, 2B, 3B, HR, RBI, SB, CS, BB, SO, IBB, HBP, SH,"
"SF, GIDP, full_name, dob)"
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,"
"%s, %s, %s, %s, %s)")
for p in range(len(player_batting_list)):
cursor2.execute(import_batting, player_batting_list[p])
cnx2.commit()
cursor2.close()
cnx2.close()
import_pitching = ("INSERT INTO Pitching "
"(W, L, G, GS, CG, SHO, SV, IPouts, H, ER, HR, BB, SO, BAOpp, "
"ERA, IBB, WP, HBP, R, SH, SF, GIDP, full_name, dob)"
"VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,"
"%s,%s,%s,%s,%s,%s,%s)")
cnx3 = mysql.connector.connect(**config)
cursor3 = cnx3.cursor(buffered=True)
for p in range(len(player_pitching_list)):
cursor3.execute(import_pitching, player_pitching_list[p])
cnx3.commit()
cursor3.close()
cnx3.close()
| mit | -1,999,657,956,672,452,900 | 28.325 | 104 | 0.662575 | false |
CptDemocracy/Python | MITx-6.00.1x-EDX-Introduction-to-Computer-Science/Week-6/PSET-6/decryptStory.py | 1 | 3795 | """
PSET-6
Problem 2: Decryption (decryptStory)
Now that you have all the pieces to the puzzle, please use them to
decode the file story.txt. In the skeleton file, you will see a method
getStoryString() that will return the encrypted version of the story.
Fill in the following function; it should create the wordList, obtain
the story, and then decrypt the story. Be sure you've read through
the whole file to see what helper functions we've defined for you that
may assist you in these tasks! This function will be only a few lines
of code (our solution does it in 4 lines).
"""
import string
import random
import operator
# helper classes code
# --------------------------------
class CharAlphaASCII(object):
ALPHA_LEN = 26
ASCII_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
ASCII_CHARS_IND = {'A': 0, 'C': 2, 'B': 1, 'E': 4, 'D': 3, 'G': 6, 'F': 5, \
'I': 8, 'H': 7, 'K': 10, 'J': 9, 'M': 12, 'L': 11, \
'O': 14, 'N': 13, 'Q': 16, 'P': 15, 'S': 18, 'R': 17, \
'U': 20, 'T': 19, 'W': 22, 'V': 21, 'Y': 24, 'X': 23, \
'Z': 25, \
'a': 26, 'c': 28, 'b': 27, 'e': 30, 'd': 29, 'g': 32, \
'f': 31, 'i': 34, 'h': 33, 'k': 36, 'j': 35, 'm': 38, \
'l': 37, 'o': 40, 'n': 39, 'q': 42, 'p': 41, 's': 44, \
'r': 43, 'u': 46, 't': 45, 'w': 48, 'v': 47, 'y': 50, \
'x': 49, 'z': 51}
def __init__(self, char):
if len(char) > 1:
raise ValueError("CharAlphaASCII can't be more than 1 of length")
if not char.isalpha():
raise ValueError("CharAlphaASCII only accepts ASCII alpha chars")
self.char = char[0]
def __add__(self, num):
if type(num) != int:
raise TypeError
return CharAlphaASCII( self.operation(num, operator.add) )
def __sub__(self, num):
if type(num) != int:
raise TypeError
return CharAlphaASCII( self.operation(num, operator.sub) )
def __str__(self):
return self.char
def __lt__(self, char):
return self.char < char
def __le__(self, char):
return self.char <= char
def __eq__(self, char):
return self.char == char
def __gt__(self, char):
return self.char > char
def __ge__(self, char):
return self.char >= char
def __len__(self, char):
return len(self.char)
def operation(self, num, op):
if type(num) != int:
raise TypeError
index = self.ASCII_CHARS_IND[self.char]
if index < self.ALPHA_LEN:
newIndex = op(index, num) % self.ALPHA_LEN
else:
newIndex = op(index, num) % self.ALPHA_LEN + self.ALPHA_LEN
return self.ASCII_CHARS[newIndex]
def ToUnicode(self):
return ord(self.char)
class Cstr(str, object):
def __init__(self, s):
self.s = s
def __add__(self, s):
return Cstr(self.s + str(s))
def __str__(self):
return self.s
# --------------------------------
# END of helper classes code
def applyCoder_CSTR(text, shift):
"""
Applies the coder to the text. Returns the encoded text.
text: string
coder: dict with mappings of characters to shifted characters
returns: text after mapping coder chars to original text
"""
cs = Cstr("")
for char in text:
if char.isalpha():
C = CharAlphaASCII(char)
C += shift
cs += C
else:
cs += char
return str(cs)
def decryptStory():
wordList = loadWords()
story = getStoryString()
return applyCoder_CSTR(story, findBestShift(wordList, story))
| mit | 3,336,012,462,027,109,400 | 30.106557 | 80 | 0.529644 | false |
asipto/kamcli | kamcli/commands/cmd_aliasdb.py | 1 | 5403 | import click
from sqlalchemy import create_engine
from kamcli.ioutils import ioutils_dbres_print
from kamcli.cli import pass_context
from kamcli.cli import parse_user_spec
@click.group("aliasdb", help="Manage database user aliases")
@pass_context
def cli(ctx):
pass
@cli.command("add", short_help="Add a user-alias pair")
@click.option(
"table",
"--table",
default="dbaliases",
help="Name of database table (default: dbaliases)",
)
@click.argument("userid", metavar="<userid>")
@click.argument("aliasid", metavar="<aliasid>")
@pass_context
def aliasdb_add(ctx, table, userid, aliasid):
"""Add a user-alias pair into database
\b
Parameters:
<userid> - username, AoR or SIP URI for subscriber
<aliasid> - username, AoR or SIP URI for alias
"""
udata = parse_user_spec(ctx, userid)
adata = parse_user_spec(ctx, aliasid)
ctx.vlog(
"Adding user [%s@%s] with alias [%s@%s]",
udata["username"],
udata["domain"],
adata["username"],
adata["domain"],
)
e = create_engine(ctx.gconfig.get("db", "rwurl"))
e.execute(
"insert into " + table + " (username, domain, alias_username, "
"alias_domain) values (%s, %s, %s, %s)",
udata["username"],
udata["domain"],
adata["username"],
adata["domain"],
)
@cli.command("rm", short_help="Remove records for a user and/or alias")
@click.option(
"table",
"--table",
default="dbaliases",
help="Name of database table (default: dbaliases)",
)
@click.option(
"matchalias",
"--match-alias",
is_flag=True,
help="Match userid value as alias (when given one argument)",
)
@click.argument("userid", metavar="<userid>")
@click.argument("aliasid", metavar="<aliasid>", nargs=-1)
@pass_context
def aliasdb_rm(ctx, table, matchalias, userid, aliasid):
"""Remove a user from groups (revoke privilege)
\b
Parameters:
<userid> - username, AoR or SIP URI for subscriber
<aliasid> - username, AoR or SIP URI for alias (optional)
"""
udata = parse_user_spec(ctx, userid)
ctx.log(
"Removing alias for record [%s@%s]", udata["username"], udata["domain"]
)
e = create_engine(ctx.gconfig.get("db", "rwurl"))
if not aliasid:
if matchalias:
e.execute(
"delete from " + table + " where alias_username=%s and "
"alias_domain=%s",
udata["username"],
udata["domain"],
)
else:
e.execute(
"delete from " + table + " where username=%s and domain=%s",
udata["username"],
udata["domain"],
)
else:
for a in aliasid:
adata = parse_user_spec(ctx, a)
e.execute(
"delete from " + table + " where username=%s and domain=%s "
"and alias_username=%s and alias_domain=%s",
udata["username"],
udata["domain"],
adata["username"],
adata["domain"],
)
@cli.command("show", short_help="Show user aliases")
@click.option(
"oformat",
"--output-format",
"-F",
type=click.Choice(["raw", "json", "table", "dict"]),
default=None,
help="Format the output",
)
@click.option(
"ostyle",
"--output-style",
"-S",
default=None,
help="Style of the output (tabulate table format)",
)
@click.option(
"table",
"--table",
default="dbaliases",
help="Name of database table (default: dbaliases)",
)
@click.option(
"matchalias",
"--match-alias",
is_flag=True,
help="Match userid value as alias",
)
@click.argument("userid", nargs=-1, metavar="[<userid>]")
@pass_context
def aliasdb_show(ctx, oformat, ostyle, table, matchalias, userid):
"""Show details for user aliases
\b
Parameters:
[<userid>] - username, AoR or SIP URI for user or alias
- it can be a list of userids
- if not provided then all aliases are shown
"""
if not userid:
ctx.vlog("Showing all records")
e = create_engine(ctx.gconfig.get("db", "rwurl"))
res = e.execute("select * from " + table)
ioutils_dbres_print(ctx, oformat, ostyle, res)
else:
for u in userid:
udata = parse_user_spec(ctx, u)
e = create_engine(ctx.gconfig.get("db", "rwurl"))
if matchalias:
ctx.vlog(
"Showing records for alias [%s@%s]",
udata["username"],
udata["domain"],
)
res = e.execute(
"select * from " + table + " where alias_username=%s "
"and alias_domain=%s",
udata["username"],
udata["domain"],
)
else:
ctx.vlog(
"Showing records for user [%s@%s]",
udata["username"],
udata["domain"],
)
res = e.execute(
"select * from " + table + " where username=%s and "
"domain=%s",
udata["username"],
udata["domain"],
)
ioutils_dbres_print(ctx, oformat, ostyle, res)
| gpl-2.0 | -459,473,105,445,511,100 | 28.850829 | 79 | 0.531371 | false |
aYukiSekiguchi/ACCESS-Chromium | build/android/android_commands.py | 1 | 28898 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides an interface to communicate with the device via the adb command.
Assumes adb binary is currently on system path.
Usage:
python android_commands.py wait-for-pm
"""
import collections
import datetime
import logging
import optparse
import os
import pexpect
import re
import subprocess
import sys
import tempfile
import time
# adb_interface.py is under ../../third_party/android/testrunner/
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), '..',
'..', 'third_party', 'android', 'testrunner'))
import adb_interface
import cmd_helper
import errors # is under ../../third_party/android/testrunner/errors.py
from run_tests_helper import IsRunningAsBuildbot
# Pattern to search for the next whole line of pexpect output and capture it
# into a match group. We can't use ^ and $ for line start end with pexpect,
# see http://www.noah.org/python/pexpect/#doc for explanation why.
PEXPECT_LINE_RE = re.compile('\n([^\r]*)\r')
# Set the adb shell prompt to be a unique marker that will [hopefully] not
# appear at the start of any line of a command's output.
SHELL_PROMPT = '~+~PQ\x17RS~+~'
# This only works for single core devices.
SCALING_GOVERNOR = '/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor'
DROP_CACHES = '/proc/sys/vm/drop_caches'
# Java properties file
LOCAL_PROPERTIES_PATH = '/data/local.prop'
# Property in /data/local.prop that controls Java assertions.
JAVA_ASSERT_PROPERTY = 'dalvik.vm.enableassertions'
BOOT_COMPLETE_RE = re.compile(
re.escape('android.intent.action.MEDIA_MOUNTED path: /mnt/sdcard')
+ '|' + re.escape('PowerManagerService: bootCompleted'))
# Keycode "enum" suitable for passing to AndroidCommands.SendKey().
KEYCODE_DPAD_RIGHT = 22
KEYCODE_ENTER = 66
KEYCODE_MENU = 82
KEYCODE_BACK = 4
def GetEmulators():
"""Returns a list of emulators. Does not filter by status (e.g. offline).
Both devices starting with 'emulator' will be returned in below output:
* daemon not running. starting it now on port 5037 *
* daemon started successfully *
List of devices attached
027c10494100b4d7 device
emulator-5554 offline
emulator-5558 device
"""
re_device = re.compile('^emulator-[0-9]+', re.MULTILINE)
devices = re_device.findall(cmd_helper.GetCmdOutput(['adb', 'devices']))
return devices
def GetAttachedDevices():
"""Returns a list of attached, online android devices.
If a preferred device has been set with ANDROID_SERIAL, it will be first in
the returned list.
Example output:
* daemon not running. starting it now on port 5037 *
* daemon started successfully *
List of devices attached
027c10494100b4d7 device
emulator-5554 offline
"""
re_device = re.compile('^([a-zA-Z0-9_:.-]+)\tdevice$', re.MULTILINE)
devices = re_device.findall(cmd_helper.GetCmdOutput(['adb', 'devices']))
preferred_device = os.environ.get("ANDROID_SERIAL")
if preferred_device in devices:
devices.remove(preferred_device)
devices.insert(0, preferred_device)
return devices
def _GetHostFileInfo(file_name):
"""Returns a tuple containing size and modified UTC time for file_name."""
# The time accuracy on device is only to minute level, remove the second and
# microsecond from host results.
utc_time = datetime.datetime.utcfromtimestamp(os.path.getmtime(file_name))
time_delta = datetime.timedelta(seconds=utc_time.second,
microseconds=utc_time.microsecond)
return os.path.getsize(file_name), utc_time - time_delta
def ListHostPathContents(path):
"""Lists files in all subdirectories of |path|.
Args:
path: The path to list.
Returns:
A dict of {"name": (size, lastmod), ...}.
"""
if os.path.isfile(path):
return {os.path.basename(path): _GetHostFileInfo(path)}
ret = {}
for root, dirs, files in os.walk(path):
for d in dirs:
if d.startswith('.'):
dirs.remove(d) # Prune the dir for subsequent iterations.
for f in files:
if f.startswith('.'):
continue
full_file_name = os.path.join(root, f)
file_name = os.path.relpath(full_file_name, path)
ret[file_name] = _GetHostFileInfo(full_file_name)
return ret
def _GetFilesFromRecursiveLsOutput(path, ls_output, re_file, utc_offset=None):
"""Gets a list of files from `ls` command output.
Python's os.walk isn't used because it doesn't work over adb shell.
Args:
path: The path to list.
ls_output: A list of lines returned by an `ls -lR` command.
re_file: A compiled regular expression which parses a line into named groups
consisting of at minimum "filename", "date", "time", "size" and
optionally "timezone".
utc_offset: A 5-character string of the form +HHMM or -HHMM, where HH is a
2-digit string giving the number of UTC offset hours, and MM is a
2-digit string giving the number of UTC offset minutes. If the input
utc_offset is None, will try to look for the value of "timezone" if it
is specified in re_file.
Returns:
A dict of {"name": (size, lastmod), ...} where:
name: The file name relative to |path|'s directory.
size: The file size in bytes (0 for directories).
lastmod: The file last modification date in UTC.
"""
re_directory = re.compile('^%s/(?P<dir>[^:]+):$' % re.escape(path))
path_dir = os.path.dirname(path)
current_dir = ''
files = {}
for line in ls_output:
directory_match = re_directory.match(line)
if directory_match:
current_dir = directory_match.group('dir')
continue
file_match = re_file.match(line)
if file_match:
filename = os.path.join(current_dir, file_match.group('filename'))
if filename.startswith(path_dir):
filename = filename[len(path_dir)+1:]
lastmod = datetime.datetime.strptime(
file_match.group('date') + ' ' + file_match.group('time')[:5],
'%Y-%m-%d %H:%M')
if not utc_offset and 'timezone' in re_file.groupindex:
utc_offset = file_match.group('timezone')
if isinstance(utc_offset, str) and len(utc_offset) == 5:
utc_delta = datetime.timedelta(hours=int(utc_offset[1:3]),
minutes=int(utc_offset[3:5]))
if utc_offset[0:1] == '-':
utc_delta = -utc_delta;
lastmod -= utc_delta
files[filename] = (int(file_match.group('size')), lastmod)
return files
def GetLogTimestamp(log_line):
"""Returns the timestamp of the given |log_line|."""
try:
return datetime.datetime.strptime(log_line[:18], '%m-%d %H:%M:%S.%f')
except (ValueError, IndexError):
logging.critical('Error reading timestamp from ' + log_line)
return None
class AndroidCommands(object):
"""Helper class for communicating with Android device via adb.
Args:
device: If given, adb commands are only send to the device of this ID.
Otherwise commands are sent to all attached devices.
wait_for_pm: If true, issues an adb wait-for-device command.
"""
def __init__(self, device=None, wait_for_pm=False):
self._adb = adb_interface.AdbInterface()
if device:
self._adb.SetTargetSerial(device)
if wait_for_pm:
self.WaitForDevicePm()
self._logcat = None
self._original_governor = None
self._pushed_files = []
def Adb(self):
"""Returns our AdbInterface to avoid us wrapping all its methods."""
return self._adb
def WaitForDevicePm(self):
"""Blocks until the device's package manager is available.
To workaround http://b/5201039, we restart the shell and retry if the
package manager isn't back after 120 seconds.
Raises:
errors.WaitForResponseTimedOutError after max retries reached.
"""
last_err = None
retries = 3
while retries:
try:
self._adb.WaitForDevicePm()
return # Success
except errors.WaitForResponseTimedOutError as e:
last_err = e
logging.warning('Restarting and retrying after timeout: %s' % str(e))
retries -= 1
self.RestartShell()
raise last_err # Only reached after max retries, re-raise the last error.
def SynchronizeDateTime(self):
"""Synchronize date/time between host and device."""
self._adb.SendShellCommand('date -u %f' % time.time())
def RestartShell(self):
"""Restarts the shell on the device. Does not block for it to return."""
self.RunShellCommand('stop')
self.RunShellCommand('start')
def Reboot(self, full_reboot=True):
"""Reboots the device and waits for the package manager to return.
Args:
full_reboot: Whether to fully reboot the device or just restart the shell.
"""
# TODO(torne): hive can't reboot the device either way without breaking the
# connection; work out if we can handle this better
if os.environ.get('USING_HIVE'):
logging.warning('Ignoring reboot request as we are on hive')
return
if full_reboot:
self._adb.SendCommand('reboot')
else:
self.RestartShell()
self.WaitForDevicePm()
self.StartMonitoringLogcat(timeout=120)
self.WaitForLogMatch(BOOT_COMPLETE_RE)
self.UnlockDevice()
def Uninstall(self, package):
"""Uninstalls the specified package from the device.
Args:
package: Name of the package to remove.
"""
uninstall_command = 'uninstall %s' % package
logging.info('>>> $' + uninstall_command)
self._adb.SendCommand(uninstall_command, timeout_time=60)
def Install(self, package_file_path):
"""Installs the specified package to the device.
Args:
package_file_path: Path to .apk file to install.
"""
assert os.path.isfile(package_file_path)
install_command = 'install %s' % package_file_path
logging.info('>>> $' + install_command)
self._adb.SendCommand(install_command, timeout_time=2*60)
# It is tempting to turn this function into a generator, however this is not
# possible without using a private (local) adb_shell instance (to ensure no
# other command interleaves usage of it), which would defeat the main aim of
# being able to reuse the adb shell instance across commands.
def RunShellCommand(self, command, timeout_time=20, log_result=True):
"""Send a command to the adb shell and return the result.
Args:
command: String containing the shell command to send. Must not include
the single quotes as we use them to escape the whole command.
timeout_time: Number of seconds to wait for command to respond before
retrying, used by AdbInterface.SendShellCommand.
log_result: Boolean to indicate whether we should log the result of the
shell command.
Returns:
list containing the lines of output received from running the command
"""
logging.info('>>> $' + command)
if "'" in command: logging.warning(command + " contains ' quotes")
result = self._adb.SendShellCommand("'%s'" % command,
timeout_time).splitlines()
if log_result:
logging.info('\n>>> '.join(result))
return result
def KillAll(self, process):
"""Android version of killall, connected via adb.
Args:
process: name of the process to kill off
Returns:
the number of processess killed
"""
pids = self.ExtractPid(process)
if pids:
self.RunShellCommand('kill ' + ' '.join(pids))
return len(pids)
def StartActivity(self, package, activity,
action='android.intent.action.VIEW', data=None,
extras=None, trace_file_name=None):
"""Starts |package|'s activity on the device.
Args:
package: Name of package to start (e.g. 'com.android.chrome').
activity: Name of activity (e.g. '.Main' or 'com.android.chrome.Main').
data: Data string to pass to activity (e.g. 'http://www.example.com/').
extras: Dict of extras to pass to activity.
trace_file_name: If used, turns on and saves the trace to this file name.
"""
cmd = 'am start -a %s -n %s/%s' % (action, package, activity)
if data:
cmd += ' -d "%s"' % data
if extras:
cmd += ' -e'
for key in extras:
cmd += ' %s %s' % (key, extras[key])
if trace_file_name:
cmd += ' -S -P ' + trace_file_name
self.RunShellCommand(cmd)
def EnableAdbRoot(self):
"""Enable root on the device."""
self._adb.EnableAdbRoot()
def CloseApplication(self, package):
"""Attempt to close down the application, using increasing violence.
Args:
package: Name of the process to kill off, e.g. com.android.chrome
"""
self.RunShellCommand('am force-stop ' + package)
def ClearApplicationState(self, package):
"""Closes and clears all state for the given |package|."""
self.CloseApplication(package)
self.RunShellCommand('rm -r /data/data/%s/cache/*' % package)
self.RunShellCommand('rm -r /data/data/%s/files/*' % package)
self.RunShellCommand('rm -r /data/data/%s/shared_prefs/*' % package)
def SendKeyEvent(self, keycode):
"""Sends keycode to the device.
Args:
keycode: Numeric keycode to send (see "enum" at top of file).
"""
self.RunShellCommand('input keyevent %d' % keycode)
def PushIfNeeded(self, local_path, device_path):
"""Pushes |local_path| to |device_path|.
Works for files and directories. This method skips copying any paths in
|test_data_paths| that already exist on the device with the same timestamp
and size.
All pushed files can be removed by calling RemovePushedFiles().
"""
assert os.path.exists(local_path)
self._pushed_files.append(device_path)
# If the path contents are the same, there's nothing to do.
local_contents = ListHostPathContents(local_path)
device_contents = self.ListPathContents(device_path)
# Only compare the size and timestamp if only copying a file because
# the filename on device can be renamed.
if os.path.isfile(local_path):
assert len(local_contents) == 1
is_equal = local_contents.values() == device_contents.values()
else:
is_equal = local_contents == device_contents
if is_equal:
logging.info('%s is up-to-date. Skipping file push.' % device_path)
return
# They don't match, so remove everything first and then create it.
if os.path.isdir(local_path):
self.RunShellCommand('rm -r %s' % device_path, timeout_time=2*60)
self.RunShellCommand('mkdir -p %s' % device_path)
# NOTE: We can't use adb_interface.Push() because it hardcodes a timeout of
# 60 seconds which isn't sufficient for a lot of users of this method.
push_command = 'push %s %s' % (local_path, device_path)
logging.info('>>> $' + push_command)
output = self._adb.SendCommand(push_command, timeout_time=30*60)
# Success looks like this: "3035 KB/s (12512056 bytes in 4.025s)"
# Errors look like this: "failed to copy ... "
if not re.search('^[0-9]', output):
logging.critical('PUSH FAILED: ' + output)
def GetFileContents(self, filename):
"""Gets contents from the file specified by |filename|."""
return self.RunShellCommand('if [ -f "' + filename + '" ]; then cat "' +
filename + '"; fi')
def SetFileContents(self, filename, contents):
"""Writes |contents| to the file specified by |filename|."""
with tempfile.NamedTemporaryFile() as f:
f.write(contents)
f.flush()
self._adb.Push(f.name, filename)
def RemovePushedFiles(self):
"""Removes all files pushed with PushIfNeeded() from the device."""
for p in self._pushed_files:
self.RunShellCommand('rm -r %s' % p, timeout_time=2*60)
def ListPathContents(self, path):
"""Lists files in all subdirectories of |path|.
Args:
path: The path to list.
Returns:
A dict of {"name": (size, lastmod), ...}.
"""
# Example output:
# /foo/bar:
# -rw-r----- 1 user group 102 2011-05-12 12:29:54.131623387 +0100 baz.txt
re_file = re.compile('^-(?P<perms>[^\s]+)\s+'
'(?P<user>[^\s]+)\s+'
'(?P<group>[^\s]+)\s+'
'(?P<size>[^\s]+)\s+'
'(?P<date>[^\s]+)\s+'
'(?P<time>[^\s]+)\s+'
'(?P<filename>[^\s]+)$')
return _GetFilesFromRecursiveLsOutput(
path, self.RunShellCommand('ls -lR %s' % path), re_file,
self.RunShellCommand('date +%z')[0])
def SetupPerformanceTest(self):
"""Sets up performance tests."""
# Disable CPU scaling to reduce noise in tests
if not self._original_governor:
self._original_governor = self.RunShellCommand('cat ' + SCALING_GOVERNOR)
self.RunShellCommand('echo performance > ' + SCALING_GOVERNOR)
self.DropRamCaches()
def TearDownPerformanceTest(self):
"""Tears down performance tests."""
if self._original_governor:
self.RunShellCommand('echo %s > %s' % (self._original_governor[0],
SCALING_GOVERNOR))
self._original_governor = None
def SetJavaAssertsEnabled(self, enable):
"""Sets or removes the device java assertions property.
Args:
enable: If True the property will be set.
Returns:
True if the file was modified (reboot is required for it to take effect).
"""
# First ensure the desired property is persisted.
temp_props_file = tempfile.NamedTemporaryFile()
properties = ''
if self._adb.Pull(LOCAL_PROPERTIES_PATH, temp_props_file.name):
properties = file(temp_props_file.name).read()
re_search = re.compile(r'^\s*' + re.escape(JAVA_ASSERT_PROPERTY) +
r'\s*=\s*all\s*$', re.MULTILINE)
if enable != bool(re.search(re_search, properties)):
re_replace = re.compile(r'^\s*' + re.escape(JAVA_ASSERT_PROPERTY) +
r'\s*=\s*\w+\s*$', re.MULTILINE)
properties = re.sub(re_replace, '', properties)
if enable:
properties += '\n%s=all\n' % JAVA_ASSERT_PROPERTY
file(temp_props_file.name, 'w').write(properties)
self._adb.Push(temp_props_file.name, LOCAL_PROPERTIES_PATH)
# Next, check the current runtime value is what we need, and
# if not, set it and report that a reboot is required.
was_set = 'all' in self.RunShellCommand('getprop ' + JAVA_ASSERT_PROPERTY)
if was_set == enable:
return False
self.RunShellCommand('setprop %s "%s"' % (JAVA_ASSERT_PROPERTY,
enable and 'all' or ''))
return True
def DropRamCaches(self):
"""Drops the filesystem ram caches for performance testing."""
self.RunShellCommand('echo 3 > ' + DROP_CACHES)
def StartMonitoringLogcat(self, clear=True, timeout=10, logfile=None,
filters=[]):
"""Starts monitoring the output of logcat, for use with WaitForLogMatch.
Args:
clear: If True the existing logcat output will be cleared, to avoiding
matching historical output lurking in the log.
timeout: How long WaitForLogMatch will wait for the given match
filters: A list of logcat filters to be used.
"""
if clear:
self.RunShellCommand('logcat -c')
args = ['logcat', '-v', 'threadtime']
if filters:
args.extend(filters)
else:
args.append('*:v')
# Spawn logcat and syncronize with it.
for _ in range(4):
self._logcat = pexpect.spawn('adb', args, timeout=timeout,
logfile=logfile)
self.RunShellCommand('log startup_sync')
if self._logcat.expect(['startup_sync', pexpect.EOF,
pexpect.TIMEOUT]) == 0:
break
self._logcat.close(force=True)
else:
logging.critical('Error reading from logcat: ' + str(self._logcat.match))
sys.exit(1)
def GetMonitoredLogCat(self):
"""Returns an "adb logcat" command as created by pexpected.spawn."""
if not self._logcat:
self.StartMonitoringLogcat(clear=False)
return self._logcat
def WaitForLogMatch(self, search_re):
"""Blocks until a line containing |line_re| is logged or a timeout occurs.
Args:
search_re: The compiled re to search each line for.
Returns:
The re match object.
"""
if not self._logcat:
self.StartMonitoringLogcat(clear=False)
logging.info('<<< Waiting for logcat:' + str(search_re.pattern))
t0 = time.time()
try:
while True:
# Note this will block for upto the timeout _per log line_, so we need
# to calculate the overall timeout remaining since t0.
time_remaining = t0 + self._logcat.timeout - time.time()
if time_remaining < 0: raise pexpect.TIMEOUT(self._logcat)
self._logcat.expect(PEXPECT_LINE_RE, timeout=time_remaining)
line = self._logcat.match.group(1)
search_match = search_re.search(line)
if search_match:
return search_match
logging.info('<<< Skipped Logcat Line:' + str(line))
except pexpect.TIMEOUT:
raise pexpect.TIMEOUT(
'Timeout (%ds) exceeded waiting for pattern "%s" (tip: use -vv '
'to debug)' %
(self._logcat.timeout, search_re.pattern))
def StartRecordingLogcat(self, clear=True, filters=['*:v']):
"""Starts recording logcat output to eventually be saved as a string.
This call should come before some series of tests are run, with either
StopRecordingLogcat or SearchLogcatRecord following the tests.
Args:
clear: True if existing log output should be cleared.
filters: A list of logcat filters to be used.
"""
if clear:
self._adb.SendCommand('logcat -c')
logcat_command = 'adb logcat -v threadtime %s' % ' '.join(filters)
self.logcat_process = subprocess.Popen(logcat_command, shell=True,
stdout=subprocess.PIPE)
def StopRecordingLogcat(self):
"""Stops an existing logcat recording subprocess and returns output.
Returns:
The logcat output as a string or an empty string if logcat was not
being recorded at the time.
"""
if not self.logcat_process:
return ''
# Cannot evaluate directly as 0 is a possible value.
# Better to read the self.logcat_process.stdout before killing it,
# Otherwise the communicate may return incomplete output due to pipe break.
if self.logcat_process.poll() == None:
self.logcat_process.kill()
(output, _) = self.logcat_process.communicate()
self.logcat_process = None
return output
def SearchLogcatRecord(self, record, message, thread_id=None, proc_id=None,
log_level=None, component=None):
"""Searches the specified logcat output and returns results.
This method searches through the logcat output specified by record for a
certain message, narrowing results by matching them against any other
specified criteria. It returns all matching lines as described below.
Args:
record: A string generated by Start/StopRecordingLogcat to search.
message: An output string to search for.
thread_id: The thread id that is the origin of the message.
proc_id: The process that is the origin of the message.
log_level: The log level of the message.
component: The name of the component that would create the message.
Returns:
A list of dictionaries represeting matching entries, each containing keys
thread_id, proc_id, log_level, component, and message.
"""
if thread_id:
thread_id = str(thread_id)
if proc_id:
proc_id = str(proc_id)
results = []
reg = re.compile('(\d+)\s+(\d+)\s+([A-Z])\s+([A-Za-z]+)\s*:(.*)$',
re.MULTILINE)
log_list = reg.findall(record)
for (tid, pid, log_lev, comp, msg) in log_list:
if ((not thread_id or thread_id == tid) and
(not proc_id or proc_id == pid) and
(not log_level or log_level == log_lev) and
(not component or component == comp) and msg.find(message) > -1):
match = dict({'thread_id': tid, 'proc_id': pid,
'log_level': log_lev, 'component': comp,
'message': msg})
results.append(match)
return results
def ExtractPid(self, process_name):
"""Extracts Process Ids for a given process name from Android Shell.
Args:
process_name: name of the process on the device.
Returns:
List of all the process ids (as strings) that match the given name.
"""
pids = []
for line in self.RunShellCommand('ps'):
data = line.split()
try:
if process_name in data[-1]: # name is in the last column
pids.append(data[1]) # PID is in the second column
except IndexError:
pass
return pids
def GetIoStats(self):
"""Gets cumulative disk IO stats since boot (for all processes).
Returns:
Dict of {num_reads, num_writes, read_ms, write_ms} or None if there
was an error.
"""
# Field definitions.
# http://www.kernel.org/doc/Documentation/iostats.txt
device = 2
num_reads_issued_idx = 3
num_reads_merged_idx = 4
num_sectors_read_idx = 5
ms_spent_reading_idx = 6
num_writes_completed_idx = 7
num_writes_merged_idx = 8
num_sectors_written_idx = 9
ms_spent_writing_idx = 10
num_ios_in_progress_idx = 11
ms_spent_doing_io_idx = 12
ms_spent_doing_io_weighted_idx = 13
for line in self.RunShellCommand('cat /proc/diskstats'):
fields = line.split()
if fields[device] == 'mmcblk0':
return {
'num_reads': int(fields[num_reads_issued_idx]),
'num_writes': int(fields[num_writes_completed_idx]),
'read_ms': int(fields[ms_spent_reading_idx]),
'write_ms': int(fields[ms_spent_writing_idx]),
}
logging.warning('Could not find disk IO stats.')
return None
def GetMemoryUsage(self, package):
"""Returns the memory usage for all processes whose name contains |pacakge|.
Args:
name: A string holding process name to lookup pid list for.
Returns:
Dict of {metric:usage_kb}, summed over all pids associated with |name|.
The metric keys retruned are: Size, Rss, Pss, Shared_Clean, Shared_Dirty,
Private_Clean, Private_Dirty, Referenced, Swap, KernelPageSize,
MMUPageSize.
"""
usage_dict = collections.defaultdict(int)
pid_list = self.ExtractPid(package)
# We used to use the showmap command, but it is currently broken on
# stingray so it's easier to just parse /proc/<pid>/smaps directly.
memory_stat_re = re.compile('^(?P<key>\w+):\s+(?P<value>\d+) kB$')
for pid in pid_list:
for line in self.RunShellCommand('cat /proc/%s/smaps' % pid,
log_result=False):
match = re.match(memory_stat_re, line)
if match: usage_dict[match.group('key')] += int(match.group('value'))
if not usage_dict or not any(usage_dict.values()):
# Presumably the process died between ps and showmap.
logging.warning('Could not find memory usage for pid ' + str(pid))
return usage_dict
def UnlockDevice(self):
"""Unlocks the screen of the device."""
# Make sure a menu button event will actually unlock the screen.
if IsRunningAsBuildbot():
assert self.RunShellCommand('getprop ro.test_harness')[0].strip() == '1'
# The following keyevent unlocks the screen if locked.
self.SendKeyEvent(KEYCODE_MENU)
# If the screen wasn't locked the previous command will bring up the menu,
# which this will dismiss. Otherwise this shouldn't change anything.
self.SendKeyEvent(KEYCODE_BACK)
def main(argv):
option_parser = optparse.OptionParser()
option_parser.add_option('-w', '--wait_for_pm', action='store_true',
default=False, dest='wait_for_pm',
help='Waits for Device Package Manager to become available')
option_parser.add_option('--enable_asserts', dest='set_asserts',
action='store_true', default=None,
help='Sets the dalvik.vm.enableassertions property to "all"')
option_parser.add_option('--disable_asserts', dest='set_asserts',
action='store_false', default=None,
help='Removes the dalvik.vm.enableassertions property')
options, args = option_parser.parse_args(argv)
commands = AndroidCommands(wait_for_pm=options.wait_for_pm)
if options.set_asserts != None:
if commands.SetJavaAssertsEnabled(options.set_asserts):
commands.Reboot(full_reboot=False)
if __name__ == '__main__':
main(sys.argv)
| bsd-3-clause | -6,705,148,772,835,299,000 | 36.048718 | 80 | 0.64828 | false |
conan-io/conan | conans/test/unittests/client/graph/version_ranges_graph_test.py | 1 | 12748 | from collections import OrderedDict
from collections import namedtuple
import six
from parameterized import parameterized
from conans.errors import ConanException
from conans.model.ref import ConanFileReference
from conans.model.requires import Requirements
from conans.test.unittests.model.transitive_reqs_test import GraphTest
from conans.test.utils.tools import GenConanfile, TurboTestClient, TestServer, \
NO_SETTINGS_PACKAGE_ID
from conans.test.utils.tools import create_profile
def _get_nodes(graph, name):
""" return all the nodes matching a particular name. Could be >1 in case
that private requirements embed different versions
"""
return [n for n in graph.nodes if n.conanfile.name == name]
Edge = namedtuple("Edge", "src dst")
def _get_edges(graph):
edges = set()
for n in graph.nodes:
edges.update([Edge(n, neigh) for neigh in n.neighbors()])
return edges
def _clear_revs(requires):
for require in requires.values():
require.ref = require.ref.copy_clear_rev()
return requires
class VersionRangesTest(GraphTest):
def setUp(self):
super(VersionRangesTest, self).setUp()
for v in ["0.1", "0.2", "0.3", "1.1", "1.1.2", "1.2.1", "2.1", "2.2.1"]:
say_content = GenConanfile().with_name("Say").with_version(v)
say_ref = ConanFileReference.loads("Say/%s@myuser/testing" % v)
self.retriever.save_recipe(say_ref, say_content)
def build_graph(self, content, update=False):
self.loader._cached_conanfile_classes = {}
profile = create_profile()
root_conan = self.retriever.root(str(content), profile)
deps_graph = self.builder.load_graph(root_conan, update, update, self.remotes,
profile_host=profile,
profile_build=None)
self.output.write("\n".join(self.resolver.output))
return deps_graph
def test_local_basic(self):
for expr, solution in [(">0.0", "2.2.1"),
(">0.1,<1", "0.3"),
(">0.1,<1||2.1", "2.1"),
("", "2.2.1"),
("~0", "0.3"),
("~=1", "1.2.1"),
("~1.1", "1.1.2"),
("~=2", "2.2.1"),
("~=2.1", "2.1"),
]:
req = ConanFileReference.loads("Say/[%s]@myuser/testing" % expr)
deps_graph = self.build_graph(GenConanfile().with_name("Hello").with_version("1.2")
.with_require(req))
self.assertEqual(2, len(deps_graph.nodes))
hello = _get_nodes(deps_graph, "Hello")[0]
say = _get_nodes(deps_graph, "Say")[0]
self.assertEqual(_get_edges(deps_graph), {Edge(hello, say)})
self.assertEqual(hello.ref, None)
conanfile = hello.conanfile
self.assertEqual(conanfile.version, "1.2")
self.assertEqual(conanfile.name, "Hello")
say_ref = ConanFileReference.loads("Say/%s@myuser/testing" % solution)
self.assertEqual(_clear_revs(conanfile.requires), Requirements(str(say_ref)))
def test_remote_basic(self):
self.resolver._local_search = None
remote_packages = []
for v in ["0.1", "0.2", "0.3", "1.1", "1.1.2", "1.2.1", "2.1", "2.2.1"]:
say_ref = ConanFileReference.loads("Say/[%s]@myuser/testing" % v)
remote_packages.append(say_ref)
self.remotes.add("myremote", "myurl")
self.remote_manager.packages = remote_packages
for expr, solution in [(">0.0", "2.2.1"),
(">0.1,<1", "0.3"),
(">0.1,<1||2.1", "2.1"),
("", "2.2.1"),
("~0", "0.3"),
("~=1", "1.2.1"),
("~1.1", "1.1.2"),
("~=2", "2.2.1"),
("~=2.1", "2.1"),
]:
req = ConanFileReference.loads("Say/[%s]@myuser/testing" % expr)
deps_graph = self.build_graph(GenConanfile().with_name("Hello").with_version("1.2")
.with_require(req),
update=True)
self.assertEqual(self.remote_manager.count, {'Say/*@myuser/testing': 1})
self.assertEqual(2, len(deps_graph.nodes))
hello = _get_nodes(deps_graph, "Hello")[0]
say = _get_nodes(deps_graph, "Say")[0]
self.assertEqual(_get_edges(deps_graph), {Edge(hello, say)})
self.assertEqual(hello.ref, None)
conanfile = hello.conanfile
self.assertEqual(conanfile.version, "1.2")
self.assertEqual(conanfile.name, "Hello")
say_ref = ConanFileReference.loads("Say/%s@myuser/testing" % solution)
self.assertEqual(_clear_revs(conanfile.requires), Requirements(str(say_ref)))
def test_remote_optimized(self):
self.resolver._local_search = None
remote_packages = []
self.remotes.add("myremote", "myurl")
for v in ["0.1", "0.2", "0.3", "1.1", "1.1.2", "1.2.1", "2.1", "2.2.1"]:
say_ref = ConanFileReference.loads("Say/%s@myuser/testing" % v)
remote_packages.append(say_ref)
self.remote_manager.packages = remote_packages
dep_content = """from conans import ConanFile
class Dep1Conan(ConanFile):
requires = "Say/[%s]@myuser/testing"
"""
dep_ref = ConanFileReference.loads("Dep1/0.1@myuser/testing")
self.retriever.save_recipe(dep_ref, dep_content % ">=0.1")
dep_ref = ConanFileReference.loads("Dep2/0.1@myuser/testing")
self.retriever.save_recipe(dep_ref, dep_content % ">=0.1")
hello_content = """from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
requires = "Dep1/0.1@myuser/testing", "Dep2/0.1@myuser/testing"
"""
deps_graph = self.build_graph(hello_content, update=True)
self.assertEqual(4, len(deps_graph.nodes))
hello = _get_nodes(deps_graph, "Hello")[0]
say = _get_nodes(deps_graph, "Say")[0]
dep1 = _get_nodes(deps_graph, "Dep1")[0]
dep2 = _get_nodes(deps_graph, "Dep2")[0]
self.assertEqual(_get_edges(deps_graph), {Edge(hello, dep1), Edge(hello, dep2),
Edge(dep1, say), Edge(dep2, say)})
# Most important check: counter of calls to remote
self.assertEqual(self.remote_manager.count, {'Say/*@myuser/testing': 1})
@parameterized.expand([("", "0.3", None, None, False),
('"Say/1.1@myuser/testing"', "1.1", False, True, False),
('"Say/0.2@myuser/testing"', "0.2", False, True, False),
('("Say/1.1@myuser/testing", "override")', "1.1", True, True, False),
('("Say/0.2@myuser/testing", "override")', "0.2", True, True, False),
# ranges
('"Say/[<=1.2]@myuser/testing"', "1.2.1", False, False, True),
('"Say/[>=0.2,<=1.0]@myuser/testing"', "0.3", False, True, True),
('"Say/[>=0.2 <=1.0]@myuser/testing"', "0.3", False, True, True),
('("Say/[<=1.2]@myuser/testing", "override")', "1.2.1", True, False, True),
('("Say/[>=0.2,<=1.0]@myuser/testing", "override")', "0.3", True, True, True),
('("Say/[>=0.2 <=1.0]@myuser/testing", "override")', "0.3", True, True, True),
])
def test_transitive(self, version_range, solution, override, valid, is_vrange):
hello_text = GenConanfile().with_name("Hello").with_version("1.2")\
.with_require("Say/[>0.1, <1]@myuser/testing")
hello_ref = ConanFileReference.loads("Hello/1.2@myuser/testing")
self.retriever.save_recipe(hello_ref, hello_text)
chat_content = """
from conans import ConanFile
class ChatConan(ConanFile):
name = "Chat"
version = "2.3"
requires = "Hello/1.2@myuser/testing", %s
"""
if valid is False:
with six.assertRaisesRegex(self, ConanException, "not valid"):
self.build_graph(chat_content % version_range)
return
deps_graph = self.build_graph(chat_content % version_range)
hello = _get_nodes(deps_graph, "Hello")[0]
say = _get_nodes(deps_graph, "Say")[0]
chat = _get_nodes(deps_graph, "Chat")[0]
edges = {Edge(hello, say), Edge(chat, hello)}
if override is not None:
self.assertIn("overridden", self.output)
else:
self.assertNotIn("overridden", self.output)
if override is False:
edges = {Edge(hello, say), Edge(chat, say), Edge(chat, hello)}
if is_vrange is True: # If it is not a version range, it is not 'is_resolved'
self.assertIn(" valid", self.output)
self.assertNotIn("not valid", self.output)
self.assertEqual(3, len(deps_graph.nodes))
self.assertEqual(_get_edges(deps_graph), edges)
self.assertEqual(hello.ref.copy_clear_rev(), hello_ref)
conanfile = hello.conanfile
self.assertEqual(conanfile.version, "1.2")
self.assertEqual(conanfile.name, "Hello")
say_ref = ConanFileReference.loads("Say/%s@myuser/testing" % solution)
self.assertEqual(_clear_revs(conanfile.requires), Requirements(str(say_ref)))
def test_duplicated_error(self):
content = GenConanfile().with_name("log4cpp").with_version("1.1.1")
log4cpp_ref = ConanFileReference.loads("log4cpp/1.1.1@myuser/testing")
self.retriever.save_recipe(log4cpp_ref, content)
content = """
from conans import ConanFile
class LoggerInterfaceConan(ConanFile):
name = "LoggerInterface"
version = "0.1.1"
def requirements(self):
self.requires("log4cpp/[~1.1]@myuser/testing")
"""
logiface_ref = ConanFileReference.loads("LoggerInterface/0.1.1@myuser/testing")
self.retriever.save_recipe(logiface_ref, content)
content = """
from conans import ConanFile
class OtherConan(ConanFile):
name = "other"
version = "2.0.11549"
requires = "LoggerInterface/[~0.1]@myuser/testing"
"""
other_ref = ConanFileReference.loads("other/2.0.11549@myuser/testing")
self.retriever.save_recipe(other_ref, content)
content = """
from conans import ConanFile
class Project(ConanFile):
requires = "LoggerInterface/[~0.1]@myuser/testing", "other/[~2.0]@myuser/testing"
"""
deps_graph = self.build_graph(content)
log4cpp = _get_nodes(deps_graph, "log4cpp")[0]
logger_interface = _get_nodes(deps_graph, "LoggerInterface")[0]
other = _get_nodes(deps_graph, "other")[0]
self.assertEqual(4, len(deps_graph.nodes))
self.assertEqual(log4cpp.ref.copy_clear_rev(), log4cpp_ref)
conanfile = log4cpp.conanfile
self.assertEqual(conanfile.version, "1.1.1")
self.assertEqual(conanfile.name, "log4cpp")
self.assertEqual(logger_interface.ref.copy_clear_rev(), logiface_ref)
conanfile = logger_interface.conanfile
self.assertEqual(conanfile.version, "0.1.1")
self.assertEqual(conanfile.name, "LoggerInterface")
self.assertEqual(other.ref.copy_clear_rev(), other_ref)
conanfile = other.conanfile
self.assertEqual(conanfile.version, "2.0.11549")
self.assertEqual(conanfile.name, "other")
def test_different_user_channel_resolved_correctly(self):
server1 = TestServer()
server2 = TestServer()
servers = OrderedDict([("server1", server1), ("server2", server2)])
client = TurboTestClient(servers=servers)
ref1 = ConanFileReference.loads("lib/1.0@conan/stable")
ref2 = ConanFileReference.loads("lib/1.0@conan/testing")
client.create(ref1, conanfile=GenConanfile())
client.upload_all(ref1, remote="server1")
client.create(ref2, conanfile=GenConanfile())
client.upload_all(ref2, remote="server2")
client2 = TurboTestClient(servers=servers)
client2.run("install lib/[>=1.0]@conan/testing")
self.assertIn("lib/1.0@conan/testing: Retrieving package {} "
"from remote 'server2' ".format(NO_SETTINGS_PACKAGE_ID), client2.out)
| mit | -7,860,550,224,070,192,000 | 42.958621 | 105 | 0.562127 | false |
rsalmei/clearly | tests/unit/utils/test_data.py | 1 | 2993 | import re
from unittest import mock
import pytest
from celery.events.state import Task, Worker
from clearly.protos.clearly_pb2 import TaskMessage, WorkerMessage
# noinspection PyProtectedMember
from clearly.utils.data import _accept, accept_task, accept_worker, obj_to_message
TASK = dict(name='name', routing_key='routing_key', uuid='uuid', retries=5, args='args',
kwargs='kwargs', result='result', traceback='traceback', result_meta='meta')
WORKER = dict(hostname='hostname', pid=12000, sw_sys='sw_sys', sw_ident='sw_ident',
sw_ver='sw_ver', loadavg=[1., 2., 3.], processed=789789, freq=5, heartbeats=[1])
@pytest.mark.parametrize('obj, to_type, data', [
(Task(**TASK), TaskMessage, TASK),
(Worker(**WORKER), WorkerMessage, WORKER),
])
def test_server_obj_to_message_valid(obj, to_type, data):
obj.timestamp, obj.state = 123.1, 'state'
message = obj_to_message(obj, to_type)
assert all(getattr(message, k) == v for k, v in data.items())
@pytest.mark.parametrize('obj, to_type', [
(1, TaskMessage),
(1, WorkerMessage),
('wrong', TaskMessage),
('wrong', WorkerMessage),
({'wrong': True}, TaskMessage),
({'wrong': True}, WorkerMessage),
(Task(**TASK), WorkerMessage),
(Worker(**WORKER), TaskMessage),
])
def test_server_obj_to_message_invalid(obj, to_type):
with pytest.raises(AttributeError):
obj_to_message(obj, to_type)
@pytest.fixture(params=(True, False))
def negate(request):
yield request.param
@pytest.mark.parametrize('regex, values, expected', [
(r'.', ('a',), True),
(r'.', ('agagfsa', ''), True),
(r'.', ('', 'ggfdagfds'), True),
(r'.', ('',), False),
(r'.', ('', '', ''), False),
(r'a', ('a',), True),
(r'a', ('b',), False),
(r'a', ('', 'zxc', 'qwe', 'bab'), True),
(r'a', ('bbbbbb', ''), False),
(r'a', ('', 'bbbabbb', ''), True),
(r'a', ('',), False),
(r'ab|ac', ('bbbbaaaa',), False),
(r'ab|ac', ('bbbbaaaab',), True),
(r'ab|ac', ('', 'a'), False),
(r'ab|ac', ('', 'aaaaaa'), False),
(r'ab|ac', ('aabb', ''), True),
(r'ab|ac', ('aabb', 'aacc'), True),
(r'ab|ac', ('aaaa', 'aacc'), True),
(r'ab|ac', ('aaaa', 'bbbb'), False),
])
def test_data_client_accepts(regex, values, expected, negate):
assert _accept(re.compile(regex), negate, values) == (expected ^ negate)
def test_accept_tasks():
pattern = re.compile('pattern')
with mock.patch('clearly.utils.data._accept') as mock_accept, \
mock.patch('clearly.utils.data.TASK_OP') as mock_op:
accept_task(pattern, True, Task(**TASK))
mock_accept.assert_called_once_with(pattern, True, mock_op())
def test_accept_workers():
pattern = re.compile('pattern')
with mock.patch('clearly.utils.data._accept') as mock_accept, \
mock.patch('clearly.utils.data.WORKER_OP') as mock_op:
accept_worker(pattern, True, Task(**TASK))
mock_accept.assert_called_once_with(pattern, True, mock_op())
| mit | 6,475,354,787,992,684,000 | 33.802326 | 94 | 0.602072 | false |
mdtux89/amr-eager | action.py | 1 | 1131 | #!/usr/bin/env python
#coding=utf-8
'''
Definition of Action class. In AMREAGER, an action can be either 'shift', 'reduce', 'rarc'
or 'larc'. When it's a shift, the argument is the subgraph triggered by the token. When it's a reduce,
the argument is used to specify the optional reeentrant edge to create. For rarcs and rarcs, the
argument is the label for those edges.
@author: Marco Damonte ([email protected])
@since: 03-10-16
'''
class Action:
def __init__(self, name, argv = None):
assert (name == "shift" or name == "larc" or name == "rarc" or name == "reduce")
self.name = name
self.argv = argv
def __repr__(self):
return '<%s %s %s>' % (self.__class__.__name__, self.name, self.argv)
def __eq__(self, other):
return self.name == other.name and self.argv == other.argv
def get_id(self):
act_id = 0
if self.name == "shift":
act_id = 1
elif self.name == "reduce":
act_id = 2
elif self.name == "larc":
act_id = 3
elif self.name == "rarc":
act_id = 4
return act_id
| bsd-2-clause | -1,735,350,711,124,829,000 | 30.416667 | 102 | 0.576481 | false |
Quihico/repository.spartacus | temp/script.module.python.koding.aio/lib/koding/video.py | 1 | 16114 | # -*- coding: utf-8 -*-
# script.module.python.koding.aio
# Python Koding AIO (c) by whufclee ([email protected])
# Python Koding AIO is licensed under a
# Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License.
# You should have received a copy of the license along with this
# work. If not, see http://creativecommons.org/licenses/by-nc-nd/4.0.
# IMPORTANT: If you choose to use the special noobsandnerds features which hook into their server
# please make sure you give approptiate credit in your add-on description (noobsandnerds.com)
#
# Please make sure you've read and understood the license, this code can NOT be used commercially
# and it can NOT be modified and redistributed. If you're found to be in breach of this license
# then any affected add-ons will be blacklisted and will not be able to work on the same system
# as any other add-ons which use this code. Thank you for your cooperation.
import os
import shutil
import xbmc
import xbmcgui
from __init__ import dolog
from guitools import Show_Busy
from systemtools import Last_Error
dp = xbmcgui.DialogProgress()
check_started = xbmc.translatePath('special://profile/addon_data/script.module.python.koding.aio/temp/playback_in_progress')
#----------------------------------------------------------------
# TUTORIAL #
def Check_Playback(ignore_dp=False,timeout=10):
"""
This function will return true or false based on video playback. Simply start a stream
(whether via an add-on, direct link to URL or local storage doesn't matter), the code will
then work out if playback is successful. This uses a number of checks and should take into
account all potential glitches which can occur during playback. The return should happen
within a second or two of playback being successful (or not).
CODE: Check_Playback()
AVAILABLE PARAMS:
ignore_dp - By default this is set to True but if set to False
this will ignore the DialogProgress window. If you use a DP while
waiting for the stream to start then you'll want to set this True.
Please bare in mind the reason this check is in place and enabled
by default is because some streams do bring up a DialogProgress
when initiated (such as f4m proxy links) and disabling this check
in those circumstances can cause false positives.
timeout - This is the amount of time you want to allow for playback
to start before sending back a response of False. Please note if
ignore_dp is set to True then it will also add a potential 10s extra
to this amount if a DialogProgress window is open. The default setting
for this is 10s.
EXAMPLE CODE:
xbmc.Player().play('http://totalrevolution.tv/videos/python_koding/Browse_To_Folder.mov')
isplaying = koding.Check_Playback()
if isplaying:
dialog.ok('PLAYBACK SUCCESSFUL','Congratulations, playback was successful')
xbmc.Player().stop()
else:
dialog.ok('PLAYBACK FAILED','Sorry, playback failed :(')
~"""
if not os.path.exists(check_started):
os.makedirs(check_started)
if not ignore_dp:
isdialog = True
counter = 1
# Check if the progress window is active and wait for playback
while isdialog:
dolog('### Current Window: %s' % xbmc.getInfoLabel('System.CurrentWindow'))
dolog('### Current XML: %s' % xbmc.getInfoLabel('Window.Property(xmlfile)'))
dolog('### Progress Dialog active, sleeping for %s seconds' % counter)
xbmc.sleep(1000)
if xbmc.getCondVisibility('Window.IsActive(progressdialog)') or (xbmc.getInfoLabel('Window.Property(xmlfile)') == 'DialogProgress.xml'):
isdialog = True
else:
isdialog = False
counter += 1
dolog('counter: %s' % counter)
# Given the DialogProgress 10 seconds to finish and it's still up - time to close it
if counter == 10:
try:
dolog('attempting to send click to close dp')
xbmc.executebuiltin('SendClick()')
if dp.iscanceled():
dp.close()
except:
dolog('### FAILED TO CLOSE DP')
isplaying = xbmc.Player().isPlaying()
counter = 1
if xbmc.Player().isPlayingAudio():
return True
# If xbmc player is not yet active give it some time to initialise
while not isplaying and counter < timeout:
xbmc.sleep(1000)
isplaying = xbmc.Player().isPlaying()
dolog('### XBMC Player not yet active, sleeping for %s seconds' % counter)
counter += 1
success = 0
counter = 0
# If it's playing give it time to physically start streaming then attempt to pull some info
if isplaying:
xbmc.sleep(1000)
while not success and counter < 10:
try:
if xbmc.Player().isPlayingVideo():
infotag = xbmc.Player().getVideoInfoTag()
vidtime = xbmc.Player().getTime()
if vidtime > 0:
success = 1
# If playback doesn't start automatically (buffering) we force it to play
else:
dolog('### Playback active but time at zero, trying to unpause')
xbmc.executebuiltin('PlayerControl(Play)')
xbmc.sleep(2000)
vidtime = xbmc.Player().getTime()
if vidtime > 0:
success = 1
# If no infotag or time could be pulled then we assume playback failed, try and stop the xbmc.player
except:
counter += 1
xbmc.sleep(1000)
# Check if the busy dialog is still active from previous locked up playback attempt
isbusy = xbmc.getCondVisibility('Window.IsActive(busydialog)')
counter = 1
while isbusy:
dolog('### Busy dialog active, sleeping for %ss' % counter)
xbmc.sleep(1000)
isbusy = xbmc.getCondVisibility('Window.IsActive(busydialog)')
counter += 1
if counter == 5:
xbmc.executebuiltin('Dialog.Close(busydialog)')
if not success:
xbmc.executebuiltin('PlayerControl(Stop)')
dolog('### Failed playback, stopped stream')
shutil.rmtree(check_started)
return False
else:
shutil.rmtree(check_started)
return True
#----------------------------------------------------------------
# TUTORIAL #
def Play_Video(video,showbusy=True,content='video',ignore_dp=False,timeout=10, item=None):
"""
This will attempt to play a video and return True or False on
whether or not playback was successful. This function is similar
to Check_Playback but this actually tries a number of methods to
play the video whereas Check_Playback does not actually try to
play a video - it will just return True/False on whether or not
a video is currently playing.
CODE: Play_Video(video, [showbusy, content])
AVAILABLE PARAMS:
(*) video - This is the path to the video, this can be a local
path, online path or a channel number from the PVR.
showbusy - By default this is set to True which means while the
function is attempting to playback the video the user will see the
busy dialog. Set to False if you prefer this not to appear but do
bare in mind a user may navigate to another section and try playing
something else if they think this isn't doing anything.
content - By default this is set to 'video', however if you're
passing through audio you may want to set this to 'music' so the
system can correctly set the tags for artist, song etc.
ignore_dp - By default this is set to True but if set to False
this will ignore the DialogProgress window. If you use a DP while
waiting for the stream to start then you'll want to set this True.
Please bare in mind the reason this check is in place and enabled
by default is because some streams do bring up a DialogProgress
when initiated (such as f4m proxy links) and disabling this check
in those circumstances can cause false positives.
timeout - This is the amount of time you want to allow for playback
to start before sending back a response of False. Please note if
ignore_dp is set to True then it will also add a potential 10s extra
to this amount if a DialogProgress window is open. The default setting
for this is 10s.
EXAMPLE CODE:
isplaying = koding.Play_Video('http://totalrevolution.tv/videos/python_koding/Browse_To_Folder.mov')
if isplaying:
dialog.ok('PLAYBACK SUCCESSFUL','Congratulations, playback was successful')
xbmc.Player().stop()
else:
dialog.ok('PLAYBACK FAILED','Sorry, playback failed :(')
~"""
dolog('### ORIGINAL VIDEO: %s'%video)
import urlresolver
try: import simplejson as json
except: import json
if not item:
meta = {}
for i in ['title', 'originaltitle', 'tvshowtitle', 'year', 'season', 'episode', 'genre', 'rating', 'votes',
'director', 'writer', 'plot', 'tagline']:
try:
meta[i] = xbmc.getInfoLabel('listitem.%s' % i)
except:
pass
meta = dict((k, v) for k, v in meta.iteritems() if not v == '')
if 'title' not in meta:
meta['title'] = xbmc.getInfoLabel('listitem.label')
icon = xbmc.getInfoLabel('listitem.icon')
item = xbmcgui.ListItem(path=video, iconImage =icon, thumbnailImage=icon)
if content == "music":
try:
meta['artist'] = xbmc.getInfoLabel('listitem.artist')
item.setInfo(type='Music', infoLabels={'title': meta['title'], 'artist': meta['artist']})
except:
item.setInfo(type='Video', infoLabels=meta)
else:
item.setInfo(type='Video', infoLabels=meta)
else:
item.setInfo(type='Video', infoLabels=meta)
playback = False
if showbusy:
Show_Busy()
# if a plugin path is sent we try activate window
if video.startswith('plugin://'):
try:
dolog('Attempting to play via xbmc.Player().play() method')
xbmc.Player().play(video)
# dolog('Attempting to play via XBMC.ActivateWindow(10025, ...) method')
# xbmc.executebuiltin('XBMC.ActivateWindow(10025,%s)' % video)
playback = Check_Playback(ignore_dp,timeout)
is_in_progress = True
progress_count = 0
while is_in_progress:
xbmc.sleep(1000)
progress_count += 1
dolog('Progress check is active, sleeping %s'%progress_count)
is_in_progress = os.path.exists(check_started)
except:
dolog(Last_Error())
# If an XBMC action has been sent through we do an executebuiltin command
elif video.startswith('ActivateWindow') or video.startswith('RunAddon') or video.startswith('RunScript') or video.startswith('PlayMedia'):
try:
dolog('Attempting to play via xbmc.executebuiltin method')
xbmc.executebuiltin('%s'%video)
playback = Check_Playback(ignore_dp,timeout)
is_in_progress = True
progress_count = 0
while is_in_progress:
xbmc.sleep(1000)
progress_count += 1
dolog('Progress check is active, sleeping %s'%progress_count)
is_in_progress = os.path.exists(check_started)
except:
dolog(Last_Error())
elif ',' in video:
# Standard xbmc.player method (a comma in url seems to throw urlresolver off)
try:
dolog('Attempting to play via xbmc.Player.play() method')
xbmc.Player().play('%s'%video, item)
playback = Check_Playback(ignore_dp,timeout)
is_in_progress = True
progress_count = 0
while is_in_progress:
xbmc.sleep(1000)
progress_count += 1
dolog('Progress check is active, sleeping %s'%progress_count)
is_in_progress = os.path.exists(check_started)
# Attempt to resolve via urlresolver
except:
try:
dolog('Attempting to resolve via urlresolver module')
dolog('video = %s'%video)
hmf = urlresolver.HostedMediaFile(url=video, include_disabled=False, include_universal=True)
if hmf.valid_url() == True:
video = hmf.resolve()
dolog('### VALID URL, RESOLVED: %s'%video)
xbmc.Player().play('%s' % video, item)
playback = Check_Playback(ignore_dp,timeout)
is_in_progress = True
progress_count = 0
while is_in_progress:
xbmc.sleep(1000)
progress_count += 1
dolog('Progress check is active, sleeping %s'%progress_count)
is_in_progress = os.path.exists(check_started)
except:
dolog(Last_Error())
# Play from a db entry - untested
elif video.isdigit():
dolog('### Video is digit, presuming it\'s a db item')
command = ('{"jsonrpc": "2.0", "id":"1", "method": "Player.Open","params":{"item":{"channelid":%s}}}' % url)
xbmc.executeJSONRPC(command)
playback = Check_Playback(ignore_dp,timeout)
is_in_progress = True
progress_count = 0
while is_in_progress:
xbmc.sleep(1000)
progress_count += 1
dolog('Progress check is active, sleeping %s'%progress_count)
is_in_progress = os.path.exists(check_started)
else:
# Attempt to resolve via urlresolver
try:
dolog('Attempting to resolve via urlresolver module')
dolog('video = %s'%video)
hmf = urlresolver.HostedMediaFile(url=video, include_disabled=False, include_universal=True)
if hmf.valid_url() == True:
video = hmf.resolve()
dolog('### VALID URL, RESOLVED: %s'%video)
xbmc.Player().play('%s' % video, item)
playback = Check_Playback(ignore_dp,timeout)
is_in_progress = True
progress_count = 0
while is_in_progress:
xbmc.sleep(1000)
progress_count += 1
dolog('Progress check is active, sleeping %s'%progress_count)
is_in_progress = os.path.exists(check_started)
# Standard xbmc.player method
except:
try:
dolog('Attempting to play via xbmc.Player.play() method')
xbmc.Player().play('%s' % video, item)
playback = Check_Playback(ignore_dp,timeout)
is_in_progress = True
progress_count = 0
while is_in_progress:
xbmc.sleep(1000)
progress_count += 1
dolog('Progress check is active, sleeping %s'%progress_count)
is_in_progress = os.path.exists(check_started)
except:
dolog(Last_Error())
dolog('Playback status: %s' % playback)
Show_Busy(False)
return playback
#----------------------------------------------------------------
# TUTORIAL #
def Sleep_If_Playback_Active():
"""
This will allow you to pause code while kodi is playing audio or video
CODE: Sleep_If_Playback_Active()
EXAMPLE CODE:
dialog.ok('PLAY A VIDEO','We will now attempt to play a video, once you stop this video you should see a dialog.ok message.')
xbmc.Player().play('http://download.blender.org/peach/bigbuckbunny_movies/big_buck_bunny_720p_stereo.avi')
xbmc.sleep(3000) # Give kodi enough time to load up the video
koding.Sleep_If_Playback_Active()
dialog.ok('PLAYBACK FINISHED','The playback has now been finished so this dialog code has now been initiated')
~"""
isplaying = xbmc.Player().isPlaying()
while isplaying:
xbmc.sleep(500)
isplaying = xbmc.Player().isPlaying() | gpl-2.0 | 1,564,265,599,120,319,500 | 40.738342 | 148 | 0.616946 | false |
tudorvio/tempest | tempest/api/compute/test_authorization.py | 1 | 18893 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from oslo_log import log as logging
from tempest_lib import exceptions as lib_exc
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class AuthorizationTestJSON(base.BaseV2ComputeTest):
credentials = ['primary', 'alt']
@classmethod
def skip_checks(cls):
super(AuthorizationTestJSON, cls).skip_checks()
if not CONF.service_available.glance:
raise cls.skipException('Glance is not available.')
@classmethod
def setup_credentials(cls):
# No network resources required for this test
cls.set_network_resources()
super(AuthorizationTestJSON, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(AuthorizationTestJSON, cls).setup_clients()
cls.client = cls.os.servers_client
cls.images_client = cls.os.images_client
cls.glance_client = cls.os.image_client
cls.keypairs_client = cls.os.keypairs_client
cls.security_client = cls.os.security_groups_client
cls.rule_client = cls.os.security_group_rules_client
cls.alt_client = cls.alt_manager.servers_client
cls.alt_images_client = cls.alt_manager.images_client
cls.alt_keypairs_client = cls.alt_manager.keypairs_client
cls.alt_security_client = cls.alt_manager.security_groups_client
cls.alt_rule_client = cls.alt_manager.security_group_rules_client
@classmethod
def resource_setup(cls):
super(AuthorizationTestJSON, cls).resource_setup()
server = cls.create_test_server(wait_until='ACTIVE')
cls.server = cls.client.show_server(server['id'])
name = data_utils.rand_name('image')
body = cls.glance_client.create_image(name=name,
container_format='bare',
disk_format='raw',
is_public=False)['image']
image_id = body['id']
image_file = six.StringIO(('*' * 1024))
body = cls.glance_client.update_image(image_id,
data=image_file)['image']
cls.glance_client.wait_for_image_status(image_id, 'active')
cls.image = cls.images_client.show_image(image_id)
cls.keypairname = data_utils.rand_name('keypair')
cls.keypairs_client.create_keypair(name=cls.keypairname)
name = data_utils.rand_name('security')
description = data_utils.rand_name('description')
cls.security_group = cls.security_client.create_security_group(
name=name, description=description)
parent_group_id = cls.security_group['id']
ip_protocol = 'tcp'
from_port = 22
to_port = 22
cls.rule = cls.rule_client.create_security_group_rule(
parent_group_id=parent_group_id, ip_protocol=ip_protocol,
from_port=from_port, to_port=to_port)
@classmethod
def resource_cleanup(cls):
if hasattr(cls, 'image'):
cls.images_client.delete_image(cls.image['id'])
if hasattr(cls, 'keypairname'):
cls.keypairs_client.delete_keypair(cls.keypairname)
if hasattr(cls, 'security_group'):
cls.security_client.delete_security_group(cls.security_group['id'])
super(AuthorizationTestJSON, cls).resource_cleanup()
@test.idempotent_id('56816e4a-bd34-47b5-aee9-268c3efeb5d4')
def test_get_server_for_alt_account_fails(self):
# A GET request for a server on another user's account should fail
self.assertRaises(lib_exc.NotFound, self.alt_client.show_server,
self.server['id'])
@test.idempotent_id('fb8a4870-6d9d-44ad-8375-95d52e98d9f6')
def test_delete_server_for_alt_account_fails(self):
# A DELETE request for another user's server should fail
self.assertRaises(lib_exc.NotFound, self.alt_client.delete_server,
self.server['id'])
@test.idempotent_id('d792f91f-1d49-4eb5-b1ff-b229c4b9dc64')
def test_update_server_for_alt_account_fails(self):
# An update server request for another user's server should fail
self.assertRaises(lib_exc.NotFound, self.alt_client.update_server,
self.server['id'], name='test')
@test.idempotent_id('488f24df-d7f7-4207-949a-f17fcb8e8769')
def test_list_server_addresses_for_alt_account_fails(self):
# A list addresses request for another user's server should fail
self.assertRaises(lib_exc.NotFound, self.alt_client.list_addresses,
self.server['id'])
@test.idempotent_id('00b442d0-2e72-40e7-9b1f-31772e36da01')
def test_list_server_addresses_by_network_for_alt_account_fails(self):
# A list address/network request for another user's server should fail
server_id = self.server['id']
self.assertRaises(lib_exc.NotFound,
self.alt_client.list_addresses_by_network, server_id,
'public')
@test.idempotent_id('cc90b35a-19f0-45d2-b680-2aabf934aa22')
def test_list_servers_with_alternate_tenant(self):
# A list on servers from one tenant should not
# show on alternate tenant
# Listing servers from alternate tenant
alt_server_ids = []
body = self.alt_client.list_servers()
alt_server_ids = [s['id'] for s in body['servers']]
self.assertNotIn(self.server['id'], alt_server_ids)
@test.idempotent_id('376dbc16-0779-4384-a723-752774799641')
def test_change_password_for_alt_account_fails(self):
# A change password request for another user's server should fail
self.assertRaises(lib_exc.NotFound, self.alt_client.change_password,
self.server['id'], 'newpass')
@test.idempotent_id('14cb5ff5-f646-45ca-8f51-09081d6c0c24')
def test_reboot_server_for_alt_account_fails(self):
# A reboot request for another user's server should fail
self.assertRaises(lib_exc.NotFound, self.alt_client.reboot,
self.server['id'], 'HARD')
@test.idempotent_id('8a0bce51-cd00-480b-88ba-dbc7d8408a37')
def test_rebuild_server_for_alt_account_fails(self):
# A rebuild request for another user's server should fail
self.assertRaises(lib_exc.NotFound, self.alt_client.rebuild,
self.server['id'], self.image_ref_alt)
@test.idempotent_id('e4da647e-f982-4e61-9dad-1d1abebfb933')
def test_resize_server_for_alt_account_fails(self):
# A resize request for another user's server should fail
self.assertRaises(lib_exc.NotFound, self.alt_client.resize,
self.server['id'], self.flavor_ref_alt)
@test.idempotent_id('a9fe8112-0ffa-4902-b061-f892bd5fe0d3')
def test_create_image_for_alt_account_fails(self):
# A create image request for another user's server should fail
self.assertRaises(lib_exc.NotFound,
self.alt_images_client.create_image,
self.server['id'], name='testImage')
@test.idempotent_id('95d445f6-babc-4f2e-aea3-aa24ec5e7f0d')
def test_create_server_with_unauthorized_image(self):
# Server creation with another user's image should fail
self.assertRaises(lib_exc.BadRequest, self.alt_client.create_server,
'test', self.image['id'], self.flavor_ref)
@test.idempotent_id('acf8724b-142b-4044-82c3-78d31a533f24')
def test_create_server_fails_when_tenant_incorrect(self):
# A create server request should fail if the tenant id does not match
# the current user
# Change the base URL to impersonate another user
self.alt_client.auth_provider.set_alt_auth_data(
request_part='url',
auth_data=self.client.auth_provider.auth_data
)
self.assertRaises(lib_exc.BadRequest,
self.alt_client.create_server, 'test',
self.image['id'], self.flavor_ref)
@test.idempotent_id('f03d1ded-7fd4-4d29-bc13-e2391f29c625')
def test_create_keypair_in_analt_user_tenant(self):
# A create keypair request should fail if the tenant id does not match
# the current user
# POST keypair with other user tenant
k_name = data_utils.rand_name('keypair')
try:
# Change the base URL to impersonate another user
self.alt_keypairs_client.auth_provider.set_alt_auth_data(
request_part='url',
auth_data=self.keypairs_client.auth_provider.auth_data
)
resp = {}
resp['status'] = None
self.assertRaises(lib_exc.BadRequest,
self.alt_keypairs_client.create_keypair,
name=k_name)
finally:
# Next request the base_url is back to normal
if (resp['status'] is not None):
self.alt_keypairs_client.delete_keypair(k_name)
LOG.error("Create keypair request should not happen "
"if the tenant id does not match the current user")
@test.idempotent_id('85bcdd8f-56b4-4868-ae56-63fbf6f7e405')
def test_get_keypair_of_alt_account_fails(self):
# A GET request for another user's keypair should fail
self.assertRaises(lib_exc.NotFound,
self.alt_keypairs_client.show_keypair,
self.keypairname)
@test.idempotent_id('6d841683-a8e0-43da-a1b8-b339f7692b61')
def test_delete_keypair_of_alt_account_fails(self):
# A DELETE request for another user's keypair should fail
self.assertRaises(lib_exc.NotFound,
self.alt_keypairs_client.delete_keypair,
self.keypairname)
@test.idempotent_id('fcb2e144-36e3-4dfb-9f9f-e72fcdec5656')
def test_get_image_for_alt_account_fails(self):
# A GET request for an image on another user's account should fail
self.assertRaises(lib_exc.NotFound,
self.alt_images_client.show_image, self.image['id'])
@test.idempotent_id('9facb962-f043-4a9d-b9ee-166a32dea098')
def test_delete_image_for_alt_account_fails(self):
# A DELETE request for another user's image should fail
self.assertRaises(lib_exc.NotFound,
self.alt_images_client.delete_image,
self.image['id'])
@test.idempotent_id('752c917e-83be-499d-a422-3559127f7d3c')
def test_create_security_group_in_analt_user_tenant(self):
# A create security group request should fail if the tenant id does not
# match the current user
# POST security group with other user tenant
s_name = data_utils.rand_name('security')
s_description = data_utils.rand_name('security')
try:
# Change the base URL to impersonate another user
self.alt_security_client.auth_provider.set_alt_auth_data(
request_part='url',
auth_data=self.security_client.auth_provider.auth_data
)
resp = {}
resp['status'] = None
self.assertRaises(lib_exc.BadRequest,
self.alt_security_client.create_security_group,
name=s_name, description=s_description)
finally:
# Next request the base_url is back to normal
if resp['status'] is not None:
self.alt_security_client.delete_security_group(resp['id'])
LOG.error("Create Security Group request should not happen if"
"the tenant id does not match the current user")
@test.idempotent_id('9db3590f-4d15-4e5f-985e-b28514919a6f')
def test_get_security_group_of_alt_account_fails(self):
# A GET request for another user's security group should fail
self.assertRaises(lib_exc.NotFound,
self.alt_security_client.show_security_group,
self.security_group['id'])
@test.idempotent_id('155387a5-2bbc-4acf-ab06-698dae537ea5')
def test_delete_security_group_of_alt_account_fails(self):
# A DELETE request for another user's security group should fail
self.assertRaises(lib_exc.NotFound,
self.alt_security_client.delete_security_group,
self.security_group['id'])
@test.idempotent_id('b2b76de0-210a-4089-b921-591c9ec552f6')
def test_create_security_group_rule_in_analt_user_tenant(self):
# A create security group rule request should fail if the tenant id
# does not match the current user
# POST security group rule with other user tenant
parent_group_id = self.security_group['id']
ip_protocol = 'icmp'
from_port = -1
to_port = -1
try:
# Change the base URL to impersonate another user
self.alt_rule_client.auth_provider.set_alt_auth_data(
request_part='url',
auth_data=self.rule_client.auth_provider.auth_data
)
resp = {}
resp['status'] = None
self.assertRaises(lib_exc.BadRequest,
self.alt_rule_client.
create_security_group_rule,
parent_group_id=parent_group_id,
ip_protocol=ip_protocol,
from_port=from_port, to_port=to_port)
finally:
# Next request the base_url is back to normal
if resp['status'] is not None:
self.alt_rule_client.delete_security_group_rule(resp['id'])
LOG.error("Create security group rule request should not "
"happen if the tenant id does not match the"
" current user")
@test.idempotent_id('c6044177-37ef-4ce4-b12c-270ddf26d7da')
def test_delete_security_group_rule_of_alt_account_fails(self):
# A DELETE request for another user's security group rule
# should fail
self.assertRaises(lib_exc.NotFound,
self.alt_rule_client.delete_security_group_rule,
self.rule['id'])
@test.idempotent_id('c5f52351-53d9-4fc9-83e5-917f7f5e3d71')
def test_set_metadata_of_alt_account_server_fails(self):
# A set metadata for another user's server should fail
req_metadata = {'meta1': 'data1', 'meta2': 'data2'}
self.assertRaises(lib_exc.NotFound,
self.alt_client.set_server_metadata,
self.server['id'],
req_metadata)
@test.idempotent_id('fb6f51e9-df15-4939-898d-1aca38c258f0')
def test_set_metadata_of_alt_account_image_fails(self):
# A set metadata for another user's image should fail
req_metadata = {'meta1': 'value1', 'meta2': 'value2'}
self.assertRaises(lib_exc.NotFound,
self.alt_images_client.set_image_metadata,
self.image['id'], req_metadata)
@test.idempotent_id('dea1936a-473d-49f2-92ad-97bb7aded22e')
def test_get_metadata_of_alt_account_server_fails(self):
# A get metadata for another user's server should fail
req_metadata = {'meta1': 'data1'}
self.client.set_server_metadata(self.server['id'], req_metadata)
self.addCleanup(self.client.delete_server_metadata_item,
self.server['id'], 'meta1')
self.assertRaises(lib_exc.NotFound,
self.alt_client.get_server_metadata_item,
self.server['id'], 'meta1')
@test.idempotent_id('16b2d724-0d3b-4216-a9fa-97bd4d9cf670')
def test_get_metadata_of_alt_account_image_fails(self):
# A get metadata for another user's image should fail
req_metadata = {'meta1': 'value1'}
self.addCleanup(self.images_client.delete_image_metadata_item,
self.image['id'], 'meta1')
self.images_client.set_image_metadata(self.image['id'],
req_metadata)
self.assertRaises(lib_exc.NotFound,
self.alt_images_client.show_image_metadata_item,
self.image['id'], 'meta1')
@test.idempotent_id('79531e2e-e721-493c-8b30-a35db36fdaa6')
def test_delete_metadata_of_alt_account_server_fails(self):
# A delete metadata for another user's server should fail
req_metadata = {'meta1': 'data1'}
self.addCleanup(self.client.delete_server_metadata_item,
self.server['id'], 'meta1')
self.client.set_server_metadata(self.server['id'], req_metadata)
self.assertRaises(lib_exc.NotFound,
self.alt_client.delete_server_metadata_item,
self.server['id'], 'meta1')
@test.idempotent_id('a5175dcf-cef8-43d6-9b77-3cb707d62e94')
def test_delete_metadata_of_alt_account_image_fails(self):
# A delete metadata for another user's image should fail
req_metadata = {'meta1': 'data1'}
self.addCleanup(self.images_client.delete_image_metadata_item,
self.image['id'], 'meta1')
self.images_client.set_image_metadata(self.image['id'],
req_metadata)
self.assertRaises(lib_exc.NotFound,
self.alt_images_client.delete_image_metadata_item,
self.image['id'], 'meta1')
@test.idempotent_id('b0c1e7a0-8853-40fd-8384-01f93d116cae')
def test_get_console_output_of_alt_account_server_fails(self):
# A Get Console Output for another user's server should fail
self.assertRaises(lib_exc.NotFound,
self.alt_client.get_console_output,
self.server['id'], 10)
| apache-2.0 | -3,149,787,669,873,692,700 | 46.709596 | 79 | 0.614513 | false |
shiquanwang/pylearn2 | pylearn2/scripts/tutorials/softmax_regression/tests/test_softmaxreg.py | 1 | 1450 | """
Test for softmax_regression.ipynb
"""
import os
from pylearn2.testing.skip import skip_if_no_data
from pylearn2.config import yaml_parse
from theano import config
def test():
skip_if_no_data()
dirname = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
with open(os.path.join(dirname, 'sr_dataset.yaml'), 'r') as f:
dataset = f.read()
if config.mode == "DEBUG_MODE":
hyper_params = {'train_stop': 10}
else:
hyper_params = {'train_stop': 50}
dataset = dataset % (hyper_params)
with open(os.path.join(dirname, 'sr_model.yaml'), 'r') as f:
model = f.read()
with open(os.path.join(dirname, 'sr_algorithm.yaml'), 'r') as f:
algorithm = f.read()
if config.mode == "DEBUG_MODE":
hyper_params = {'batch_size': 10,
'valid_stop': 50010}
else:
hyper_params = {'batch_size': 10,
'valid_stop': 50050}
algorithm = algorithm % (hyper_params)
with open(os.path.join(dirname, 'sr_train.yaml'), 'r') as f:
train = f.read()
save_path = os.path.dirname(os.path.realpath(__file__))
train = train % locals()
train = yaml_parse.load(train)
train.main_loop()
try:
os.remove("{}/softmax_regression.pkl".format(save_path))
os.remove("{}/softmax_regression_best.pkl".format(save_path))
except:
pass
if __name__ == '__main__':
test()
| bsd-3-clause | -464,289,127,205,744,400 | 24.892857 | 76 | 0.577241 | false |
CCharlieLi/StaffManagmentSystem | Website/admin.py | 1 | 1775 | from django.contrib import admin
from Website.models import *
from django import forms
from django.utils.translation import ugettext_lazy
from Skyrover.widgets import KindEditor
# Register your models here.
class kindeditorNewsForm(forms.ModelForm):
Content = forms.CharField(label=ugettext_lazy(u"Content"), widget=KindEditor(attrs={'rows':15, 'cols':100}),required=True)
class Meta:
model = News
fields = "__all__"
class NewsAdmin(admin.ModelAdmin):
list_display = ('Title','Publsh_Date')
form = kindeditorNewsForm
class kindeditorAnnounceForm(forms.ModelForm):
Content = forms.CharField(label=ugettext_lazy(u"Content"), widget=KindEditor(attrs={'rows':15, 'cols':100}),required=True)
class Meta:
model = Announce
fields = "__all__"
class AnnounceAdmin(admin.ModelAdmin):
list_display = ('Title','Publsh_Date')
form = kindeditorAnnounceForm
class PolicyAdmin(admin.ModelAdmin):
list_display = ('Name','Publsh_Date','keyword')
class MagazineAdmin(admin.ModelAdmin):
list_display = ('Name','Publsh_Date','keyword')
class PartnershipAdmin(admin.ModelAdmin):
list_display = ('Name','PeopleType')
class kindeditorPeopleForm(forms.ModelForm):
Content = forms.CharField(label=ugettext_lazy(u"Content"), widget=KindEditor(attrs={'rows':15, 'cols':100}),required=True)
class Meta:
model = People
fields = "__all__"
class PeopleAdmin(admin.ModelAdmin):
list_display = ('Name','PeopleType')
form = kindeditorPeopleForm
admin.site.register(News,NewsAdmin)
admin.site.register(Announce,AnnounceAdmin)
admin.site.register(Policy,PolicyAdmin)
admin.site.register(Magazine,MagazineAdmin)
admin.site.register(PeopleType)
admin.site.register(Partnership,PartnershipAdmin)
admin.site.register(People,PeopleAdmin)
admin.site.register(Group)
| gpl-2.0 | -58,941,322,863,837,096 | 31.272727 | 123 | 0.761127 | false |
uclmr/inferbeddings | scripts/fb15k/UCL_FB15K_adv_v3.1.py | 1 | 3991 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import itertools
import os.path
import sys
import argparse
import logging
def cartesian_product(dicts):
return (dict(zip(dicts, x)) for x in itertools.product(*dicts.values()))
def summary(configuration):
kvs = sorted([(k, v) for k, v in configuration.items()], key=lambda e: e[0])
return '_'.join([('%s=%s' % (k, v)) for (k, v) in kvs])
def to_cmd(c, _path=None):
if _path is None:
_path = '/home/pminervi/workspace/inferbeddings/'
command = 'python3 {}/bin/kbp-cli.py' \
' --train {}/data/fb15k/freebase_mtr100_mte100-train.txt' \
' --valid {}/data/fb15k/freebase_mtr100_mte100-valid.txt' \
' --test {}/data/fb15k/freebase_mtr100_mte100-test.txt' \
' --clauses {}/data/fb15k/clauses/clauses_0.999.pl' \
' --nb-epochs {}' \
' --lr {}' \
' --nb-batches {}' \
' --model {}' \
' --similarity {}' \
' --margin {}' \
' --embedding-size {}' \
' --adv-lr {} --adv-init-ground --adversary-epochs {}' \
' --discriminator-epochs {} --adv-weight {} --adv-batch-size {}' \
' --predicate-norm 1'.format(_path, _path, _path, _path, _path,
c['epochs'], c['lr'], c['batches'],
c['model'], c['similarity'],
c['margin'], c['embedding_size'],
c['adv_lr'], c['adv_epochs'],
c['disc_epochs'], c['adv_weight'], c['adv_batch_size'])
return command
def to_logfile(c, path):
outfile = "%s/ucl_fb15k_adv_v3.1.%s.log" % (path, summary(c))
return outfile
def main(argv):
def formatter(prog):
return argparse.HelpFormatter(prog, max_help_position=100, width=200)
argparser = argparse.ArgumentParser('Generating experiments for the UCL cluster', formatter_class=formatter)
argparser.add_argument('--debug', '-D', action='store_true', help='Debug flag')
argparser.add_argument('--path', '-p', action='store', type=str, default=None, help='Path')
args = argparser.parse_args(argv)
hyperparameters_space = dict(
epochs=[100],
optimizer=['adagrad'],
lr=[.1],
batches=[10],
model=['ComplEx'],
similarity=['dot'],
margin=[2, 5, 10],
embedding_size=[20, 50, 100, 150, 200],
adv_lr=[.1],
adv_epochs=[0, 1, 10],
disc_epochs=[1, 10],
adv_weight=[0, 1, 10, 100, 1000, 10000],
adv_batch_size=[1, 10, 100]
)
configurations = cartesian_product(hyperparameters_space)
path = '/home/pminervi/workspace/inferbeddings/logs/ucl_fb15k_adv_v3.1/'
if not os.path.exists(path):
os.makedirs(path)
for job_id, cfg in enumerate(configurations):
logfile = to_logfile(cfg, path)
completed = False
if os.path.isfile(logfile):
with open(logfile, 'r', encoding='utf-8', errors='ignore') as f:
content = f.read()
completed = '### MICRO (test filtered)' in content
if not completed:
line = '{} >> {} 2>&1'.format(to_cmd(cfg, _path=args.path), logfile)
if args.debug:
print(line)
else:
file_name = 'ucl_fb15k_adv_v3.1_{}.job'.format(job_id)
alias = ''
job_script = '#$ -S /bin/bash\n' \
'#$ -wd /tmp/\n' \
'#$ -l h_vmem=12G,tmem=12G\n' \
'#$ -l h_rt=96:00:00\n' \
'{}\n{}\n'.format(alias, line)
with open(file_name, 'w') as f:
f.write(job_script)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main(sys.argv[1:])
| mit | -8,338,077,182,654,248,000 | 34.633929 | 112 | 0.496617 | false |
TomTranter/OpenPNM | tests/unit/models/geometry/PoreSeedTest.py | 1 | 1690 | import numpy as np
import scipy as sp
import openpnm as op
import openpnm.models.geometry.pore_seed as mods
class PoreSeedTest:
def setup_class(self):
self.net = op.network.Cubic(shape=[5, 5, 5])
self.geo = op.geometry.GenericGeometry(network=self.net,
pores=self.net.Ps,
throats=self.net.Ts)
def test_random(self):
f = mods.random
self.geo.add_model(propname='pore.seed',
model=f,
seed=0,
num_range=[0.1, 2])
assert np.amax(self.geo['pore.seed']) > 1
assert np.amin(self.geo['pore.seed']) < 1
def test_spatially_correlated(self):
f = mods.spatially_correlated
self.geo.add_model(propname='pore.seed',
model=f,
weights=[2, 2, 2],
regen_mode='normal')
assert np.amin(self.geo['pore.seed'] > 0)
assert np.amax(self.geo['pore.seed'] < 1)
def test_spatially_correlated_zero_weights(self):
f = mods.spatially_correlated
self.geo.add_model(propname='pore.seed',
model=f,
weights=[0, 0, 0],
regen_mode='normal')
assert np.amin(self.geo['pore.seed'] > 0)
assert np.amax(self.geo['pore.seed'] < 1)
if __name__ == '__main__':
t = PoreSeedTest()
self = t
t.setup_class()
for item in t.__dir__():
if item.startswith('test'):
print('running test: '+item)
t.__getattribute__(item)()
| mit | 6,806,431,745,261,210,000 | 32.8 | 67 | 0.489941 | false |
cmoutard/mne-python | mne/io/brainvision/brainvision.py | 1 | 20471 | # -*- coding: utf-8 -*-
"""Conversion tool from Brain Vision EEG to FIF"""
# Authors: Teon Brooks <[email protected]>
# Christian Brodbeck <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import os
import time
import re
import warnings
import numpy as np
from ...utils import verbose, logger
from ..constants import FIFF
from ..meas_info import _empty_info
from ..base import _BaseRaw, _check_update_montage
from ..utils import _read_segments_file
from ...externals.six import StringIO
from ...externals.six.moves import configparser
class RawBrainVision(_BaseRaw):
"""Raw object from Brain Vision EEG file
Parameters
----------
vhdr_fname : str
Path to the EEG header file.
montage : str | None | instance of Montage
Path or instance of montage containing electrode positions.
If None, sensor locations are (0,0,0). See the documentation of
:func:`mne.channels.read_montage` for more information.
eog : list or tuple
Names of channels or list of indices that should be designated
EOG channels. Values should correspond to the vhdr file.
Default is ``('HEOGL', 'HEOGR', 'VEOGb')``.
misc : list or tuple
Names of channels or list of indices that should be designated
MISC channels. Values should correspond to the electrodes
in the vhdr file. Default is ``()``.
scale : float
The scaling factor for EEG data. Units are in volts. Default scale
factor is 1. For microvolts, the scale factor would be 1e-6. This is
used when the header file does not specify the scale factor.
preload : bool
If True, all data are loaded at initialization.
If False, data are not read until save.
response_trig_shift : int | None
An integer that will be added to all response triggers when reading
events (stimulus triggers will be unaffected). If None, response
triggers will be ignored. Default is 0 for backwards compatibility, but
typically another value or None will be necessary.
event_id : dict | None
The id of the event to consider. If None (default),
only stimulus events are added to the stimulus channel. If dict,
the keys will be mapped to trigger values on the stimulus channel
in addition to the stimulus events. Keys are case-sensitive.
Example: {'SyncStatus': 1; 'Pulse Artifact': 3}.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
@verbose
def __init__(self, vhdr_fname, montage=None,
eog=('HEOGL', 'HEOGR', 'VEOGb'), misc=(),
scale=1., preload=False, response_trig_shift=0,
event_id=None, verbose=None):
# Channel info and events
logger.info('Extracting parameters from %s...' % vhdr_fname)
vhdr_fname = os.path.abspath(vhdr_fname)
info, fmt, self._order, mrk_fname, montage = _get_vhdr_info(
vhdr_fname, eog, misc, scale, montage)
events = _read_vmrk_events(mrk_fname, event_id, response_trig_shift)
_check_update_montage(info, montage)
with open(info['filename'], 'rb') as f:
f.seek(0, os.SEEK_END)
n_samples = f.tell()
dtype_bytes = _fmt_byte_dict[fmt]
self.preload = False # so the event-setting works
last_samps = [(n_samples // (dtype_bytes * (info['nchan'] - 1))) - 1]
self._create_event_ch(events, last_samps[0] + 1)
super(RawBrainVision, self).__init__(
info, last_samps=last_samps, filenames=[info['filename']],
orig_format=fmt, preload=preload, verbose=verbose)
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a chunk of raw data"""
# read data
dtype = _fmt_dtype_dict[self.orig_format]
n_data_ch = len(self.ch_names) - 1
_read_segments_file(self, data, idx, fi, start, stop, cals, mult,
dtype=dtype, n_channels=n_data_ch,
trigger_ch=self._event_ch)
def get_brainvision_events(self):
"""Retrieve the events associated with the Brain Vision Raw object
Returns
-------
events : array, shape (n_events, 3)
Events, each row consisting of an (onset, duration, trigger)
sequence.
"""
return self._events.copy()
def set_brainvision_events(self, events):
"""Set the events and update the synthesized stim channel
Parameters
----------
events : array, shape (n_events, 3)
Events, each row consisting of an (onset, duration, trigger)
sequence.
"""
self._create_event_ch(events)
def _create_event_ch(self, events, n_samp=None):
"""Create the event channel"""
if n_samp is None:
n_samp = self.last_samp - self.first_samp + 1
events = np.array(events, int)
if events.ndim != 2 or events.shape[1] != 3:
raise ValueError("[n_events x 3] shaped array required")
# update events
self._event_ch = _synthesize_stim_channel(events, n_samp)
self._events = events
if self.preload:
self._data[-1] = self._event_ch
def _read_vmrk_events(fname, event_id=None, response_trig_shift=0):
"""Read events from a vmrk file
Parameters
----------
fname : str
vmrk file to be read.
event_id : dict | None
The id of the event to consider. If dict, the keys will be mapped to
trigger values on the stimulus channel. Example:
{'SyncStatus': 1; 'Pulse Artifact': 3}. If empty dict (default),
only stimulus events are added to the stimulus channel.
response_trig_shift : int | None
Integer to shift response triggers by. None ignores response triggers.
Returns
-------
events : array, shape (n_events, 3)
An array containing the whole recording's events, each row representing
an event as (onset, duration, trigger) sequence.
"""
if event_id is None:
event_id = dict()
# read vmrk file
with open(fname, 'rb') as fid:
txt = fid.read().decode('utf-8')
header = txt.split('\n')[0].strip()
_check_mrk_version(header)
if (response_trig_shift is not None and
not isinstance(response_trig_shift, int)):
raise TypeError("response_trig_shift must be an integer or None")
# extract Marker Infos block
m = re.search("\[Marker Infos\]", txt)
if not m:
return np.zeros(0)
mk_txt = txt[m.end():]
m = re.search("\[.*\]", mk_txt)
if m:
mk_txt = mk_txt[:m.start()]
# extract event information
items = re.findall("^Mk\d+=(.*)", mk_txt, re.MULTILINE)
events = []
for info in items:
mtype, mdesc, onset, duration = info.split(',')[:4]
onset = int(onset)
duration = (int(duration) if duration.isdigit() else 1)
try:
trigger = int(re.findall('[A-Za-z]*\s*?(\d+)', mdesc)[0])
except IndexError:
trigger = None
if mtype.lower().startswith('response'):
if response_trig_shift is not None:
trigger += response_trig_shift
else:
trigger = None
if mdesc in event_id:
trigger = event_id[mdesc]
if trigger:
events.append((onset, duration, trigger))
events = np.array(events).reshape(-1, 3)
return events
def _synthesize_stim_channel(events, n_samp):
"""Synthesize a stim channel from events read from a vmrk file
Parameters
----------
events : array, shape (n_events, 3)
Each row representing an event as (onset, duration, trigger) sequence
(the format returned by _read_vmrk_events).
n_samp : int
The number of samples.
Returns
-------
stim_channel : array, shape (n_samples,)
An array containing the whole recording's event marking
"""
# select events overlapping buffer
onset = events[:, 0]
# create output buffer
stim_channel = np.zeros(n_samp, int)
for onset, duration, trigger in events:
stim_channel[onset:onset + duration] = trigger
return stim_channel
def _check_hdr_version(header):
tags = ['Brain Vision Data Exchange Header File Version 1.0',
'Brain Vision Data Exchange Header File Version 2.0']
if header not in tags:
raise ValueError("Currently only support %r, not %r"
"Contact MNE-Developers for support."
% (str(tags), header))
def _check_mrk_version(header):
tags = ['Brain Vision Data Exchange Marker File, Version 1.0',
'Brain Vision Data Exchange Marker File, Version 2.0']
if header not in tags:
raise ValueError("Currently only support %r, not %r"
"Contact MNE-Developers for support."
% (str(tags), header))
_orientation_dict = dict(MULTIPLEXED='F', VECTORIZED='C')
_fmt_dict = dict(INT_16='short', INT_32='int', IEEE_FLOAT_32='single')
_fmt_byte_dict = dict(short=2, int=4, single=4)
_fmt_dtype_dict = dict(short='<i2', int='<i4', single='<f4')
_unit_dict = {'V': 1., u'µV': 1e-6}
def _get_vhdr_info(vhdr_fname, eog, misc, scale, montage):
"""Extracts all the information from the header file.
Parameters
----------
vhdr_fname : str
Raw EEG header to be read.
eog : list of str
Names of channels that should be designated EOG channels. Names should
correspond to the vhdr file.
misc : list of str
Names of channels that should be designated MISC channels. Names
should correspond to the electrodes in the vhdr file.
scale : float
The scaling factor for EEG data. Units are in volts. Default scale
factor is 1.. For microvolts, the scale factor would be 1e-6. This is
used when the header file does not specify the scale factor.
montage : str | True | None | instance of Montage
Path or instance of montage containing electrode positions.
If None, sensor locations are (0,0,0). See the documentation of
:func:`mne.channels.read_montage` for more information.
Returns
-------
info : Info
The measurement info.
fmt : str
The data format in the file.
edf_info : dict
A dict containing Brain Vision specific parameters.
events : array, shape (n_events, 3)
Events from the corresponding vmrk file.
"""
scale = float(scale)
ext = os.path.splitext(vhdr_fname)[-1]
if ext != '.vhdr':
raise IOError("The header file must be given to read the data, "
"not the '%s' file." % ext)
with open(vhdr_fname, 'rb') as f:
# extract the first section to resemble a cfg
header = f.readline().decode('utf-8').strip()
_check_hdr_version(header)
settings = f.read().decode('utf-8')
if settings.find('[Comment]') != -1:
params, settings = settings.split('[Comment]')
else:
params, settings = settings, ''
cfg = configparser.ConfigParser()
if hasattr(cfg, 'read_file'): # newer API
cfg.read_file(StringIO(params))
else:
cfg.readfp(StringIO(params))
# get sampling info
# Sampling interval is given in microsec
sfreq = 1e6 / cfg.getfloat('Common Infos', 'SamplingInterval')
info = _empty_info(sfreq)
# check binary format
assert cfg.get('Common Infos', 'DataFormat') == 'BINARY'
order = cfg.get('Common Infos', 'DataOrientation')
if order not in _orientation_dict:
raise NotImplementedError('Data Orientation %s is not supported'
% order)
order = _orientation_dict[order]
fmt = cfg.get('Binary Infos', 'BinaryFormat')
if fmt not in _fmt_dict:
raise NotImplementedError('Datatype %s is not supported' % fmt)
fmt = _fmt_dict[fmt]
# load channel labels
info['nchan'] = cfg.getint('Common Infos', 'NumberOfChannels') + 1
ch_names = [''] * info['nchan']
cals = np.empty(info['nchan'])
ranges = np.empty(info['nchan'])
cals.fill(np.nan)
ch_dict = dict()
for chan, props in cfg.items('Channel Infos'):
n = int(re.findall(r'ch(\d+)', chan)[0]) - 1
props = props.split(',')
if len(props) < 4:
props += ('V',)
name, _, resolution, unit = props[:4]
ch_dict[chan] = name
ch_names[n] = name
if resolution == "":
if not(unit): # For truncated vhdrs (e.g. EEGLAB export)
resolution = 0.000001
else:
resolution = 1. # for files with units specified, but not res
unit = unit.replace(u'\xc2', u'') # Remove unwanted control characters
cals[n] = float(resolution)
ranges[n] = _unit_dict.get(unit, unit) * scale
# create montage
if montage is True:
from ...transforms import _sphere_to_cartesian
from ...channels.montage import Montage
montage_pos = list()
montage_names = list()
for ch in cfg.items('Coordinates'):
montage_names.append(ch_dict[ch[0]])
radius, theta, phi = map(float, ch[1].split(','))
# 1: radius, 2: theta, 3: phi
pos = _sphere_to_cartesian(r=radius, theta=theta, phi=phi)
montage_pos.append(pos)
montage_sel = np.arange(len(montage_pos))
montage = Montage(montage_pos, montage_names, 'Brainvision',
montage_sel)
ch_names[-1] = 'STI 014'
cals[-1] = 1.
ranges[-1] = 1.
if np.isnan(cals).any():
raise RuntimeError('Missing channel units')
# Attempts to extract filtering info from header. If not found, both are
# set to zero.
settings = settings.splitlines()
idx = None
if 'Channels' in settings:
idx = settings.index('Channels')
settings = settings[idx + 1:]
for idx, setting in enumerate(settings):
if re.match('#\s+Name', setting):
break
else:
idx = None
if idx:
lowpass = []
highpass = []
for i, ch in enumerate(ch_names[:-1], 1):
line = settings[idx + i].split()
assert ch in line
highpass.append(line[5])
lowpass.append(line[6])
if len(highpass) == 0:
pass
elif all(highpass):
if highpass[0] == 'NaN':
pass # Placeholder for future use. Highpass set in _empty_info
elif highpass[0] == 'DC':
info['highpass'] = 0.
else:
info['highpass'] = float(highpass[0])
else:
info['highpass'] = np.min(np.array(highpass, dtype=np.float))
warnings.warn('%s' % ('Channels contain different highpass '
'filters. Highest filter setting will '
'be stored.'))
if len(lowpass) == 0:
pass
elif all(lowpass):
if lowpass[0] == 'NaN':
pass # Placeholder for future use. Lowpass set in _empty_info
else:
info['lowpass'] = float(lowpass[0])
else:
info['lowpass'] = np.min(np.array(lowpass, dtype=np.float))
warnings.warn('%s' % ('Channels contain different lowpass filters.'
' Lowest filter setting will be stored.'))
# Post process highpass and lowpass to take into account units
header = settings[idx].split(' ')
header = [h for h in header if len(h)]
if '[s]' in header[4] and (info['highpass'] > 0):
info['highpass'] = 1. / info['highpass']
if '[s]' in header[5]:
info['lowpass'] = 1. / info['lowpass']
# locate EEG and marker files
path = os.path.dirname(vhdr_fname)
info['filename'] = os.path.join(path, cfg.get('Common Infos', 'DataFile'))
info['meas_date'] = int(time.time())
info['buffer_size_sec'] = 1. # reasonable default
# Creates a list of dicts of eeg channels for raw.info
logger.info('Setting channel info structure...')
info['chs'] = []
info['ch_names'] = ch_names
for idx, ch_name in enumerate(ch_names):
if ch_name in eog or idx in eog or idx - info['nchan'] in eog:
kind = FIFF.FIFFV_EOG_CH
coil_type = FIFF.FIFFV_COIL_NONE
unit = FIFF.FIFF_UNIT_V
elif ch_name in misc or idx in misc or idx - info['nchan'] in misc:
kind = FIFF.FIFFV_MISC_CH
coil_type = FIFF.FIFFV_COIL_NONE
unit = FIFF.FIFF_UNIT_V
elif ch_name == 'STI 014':
kind = FIFF.FIFFV_STIM_CH
coil_type = FIFF.FIFFV_COIL_NONE
unit = FIFF.FIFF_UNIT_NONE
else:
kind = FIFF.FIFFV_EEG_CH
coil_type = FIFF.FIFFV_COIL_EEG
unit = FIFF.FIFF_UNIT_V
info['chs'].append(dict(
ch_name=ch_name, coil_type=coil_type, kind=kind, logno=idx + 1,
scanno=idx + 1, cal=cals[idx], range=ranges[idx], loc=np.zeros(12),
unit=unit, unit_mul=0., # always zero- mne manual pg. 273
coord_frame=FIFF.FIFFV_COORD_HEAD))
# for stim channel
mrk_fname = os.path.join(path, cfg.get('Common Infos', 'MarkerFile'))
info._check_consistency()
return info, fmt, order, mrk_fname, montage
def read_raw_brainvision(vhdr_fname, montage=None,
eog=('HEOGL', 'HEOGR', 'VEOGb'), misc=(),
scale=1., preload=False, response_trig_shift=0,
event_id=None, verbose=None):
"""Reader for Brain Vision EEG file
Parameters
----------
vhdr_fname : str
Path to the EEG header file.
montage : str | None | instance of Montage
Path or instance of montage containing electrode positions.
If None, sensor locations are (0,0,0). See the documentation of
:func:`mne.channels.read_montage` for more information.
eog : list or tuple of str
Names of channels or list of indices that should be designated
EOG channels. Values should correspond to the vhdr file
Default is ``('HEOGL', 'HEOGR', 'VEOGb')``.
misc : list or tuple of str
Names of channels or list of indices that should be designated
MISC channels. Values should correspond to the electrodes
in the vhdr file. Default is ``()``.
scale : float
The scaling factor for EEG data. Units are in volts. Default scale
factor is 1. For microvolts, the scale factor would be 1e-6. This is
used when the header file does not specify the scale factor.
preload : bool
If True, all data are loaded at initialization.
If False, data are not read until save.
response_trig_shift : int | None
An integer that will be added to all response triggers when reading
events (stimulus triggers will be unaffected). If None, response
triggers will be ignored. Default is 0 for backwards compatibility, but
typically another value or None will be necessary.
event_id : dict | None
The id of the event to consider. If None (default),
only stimulus events are added to the stimulus channel. If dict,
the keys will be mapped to trigger values on the stimulus channel
in addition to the stimulus events. Keys are case-sensitive.
Example: {'SyncStatus': 1; 'Pulse Artifact': 3}.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
raw : instance of RawBrainVision
A Raw object containing BrainVision data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
raw = RawBrainVision(vhdr_fname=vhdr_fname, montage=montage, eog=eog,
misc=misc, scale=scale,
preload=preload, verbose=verbose, event_id=event_id,
response_trig_shift=response_trig_shift)
return raw
| bsd-3-clause | -8,863,967,679,030,398,000 | 37.91635 | 79 | 0.593796 | false |
cstlee/kafkamark | scripts/kafkamark_plot.py | 1 | 1296 | # ISC License
#
# Copyright (c) 2017, Stanford University
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
'''
usage:
kafkamark plot [options] <datafile>
options:
-h, --help
'''
import matplotlib.pyplot as plt
import numpy as np
from docopt import docopt
def plot(argv):
args = docopt(__doc__, argv=argv)
x = []
y = []
with open(args['<datafile>'], 'r') as f:
for line in f.readlines():
if line[0] == '#':
continue
data = line.split()
x.append(float(data[0]))
y.append(float(data[1]))
plt.semilogx(x, y)
plt.show()
| isc | 2,795,149,768,533,533,000 | 29.139535 | 79 | 0.677469 | false |
weedge/doubanFmSpeackerPi | plugin/fm/baseFM.py | 1 | 3321 | # -*- coding: utf-8-*-
import os
import logging
import pipes
import tempfile
import subprocess
import psutil
import signal
import lib.appPath
from lib.baseClass import AbstractClass
class AbstractFM(AbstractClass):
"""
Generic parent class for FM class
"""
@classmethod
def is_available(cls):
return (super(cls, cls).is_available() and
lib.diagnose.check_executable('mplayer'))
def mplay(self, url):
cmd = ['mplayer', str(url)]
cmd_str = ' '.join([pipes.quote(arg) for arg in cmd])
self._logger.debug('Executing %s', cmd_str)
with tempfile.TemporaryFile() as f:
self._mplay_process = subprocess.Popen(cmd,stdout=f,stderr=f,preexec_fn=os.setsid)
self._logger.debug("mplayer pid: '%d'", self._mplay_process.pid)
#正在播放的时候保存mplayer pid(这个pid为进程组id)
pid_file = os.path.join(lib.appPath.DATA_PATH,self.__class__.__name__+"_mplay.pid")
with open(pid_file, 'w') as pid_fp:
pid_fp.write(str(self._mplay_process.pid))
pid_fp.close()
self._mplay_process.wait()
#播放完删除mplayer pid 文件
if os.path.exists(pid_file):
os.remove(pid_file)
f.seek(0)
output = f.read()
if output:
self._logger.debug("Output was: '%s'", output)
def kill_mplay_procsss(self):
'''
kill当前播放的mplay进程 (进程id从文件中获取)
'''
pid_file = os.path.join(lib.appPath.DATA_PATH,self.__class__.__name__+"_mplay.pid")
if os.path.exists(pid_file):
with open(pid_file, 'r') as f:
pid = int(f.read())
f.close()
if pid:
self._logger.debug("pgkill mplay pid: %d",pid)
os.killpg(pid,signal.SIGKILL)
def suspend_mplay_process(self):
'''
挂起当前播放的mplay进程 (进程id从文件中获取)
'''
res = None
pid_file = os.path.join(lib.appPath.DATA_PATH,self.__class__.__name__+"_mplay.pid")
with open(pid_file, 'r') as f:
pid = int(f.read())
f.close()
if pid:
self._logger.debug("suspend mplay pid: %d",pid)
res = psutil.Process(pid).suspend()
return res
def resume_mplay_process(self):
'''
唤醒当前播放的mplay进程 (进程id从文件中获取)
'''
pid_file = os.path.join(lib.appPath.DATA_PATH,self.__class__.__name__+"_mplay.pid")
with open(pid_file, 'r') as f:
pid = int(f.read())
f.close()
if pid:
self._logger.debug("resume mplay pid: %d",pid)
res = psutil.Process(pid).resume()
return res
def login(self):
pass
def getAccessToken(self):
pass
def getSong(self):
pass
def setLikeSong(self):
pass
def setUnLikeSong(self):
pass
def setHateSong(self):
pass
def downloadSong(self):
pass
def next(self):
pass
def stop(self):
pass
def play(self):
pass
| apache-2.0 | 4,241,096,129,657,073,000 | 25.822034 | 95 | 0.52575 | false |
daniel-severo/dask-ml | tests/test_metrics.py | 1 | 2139 | import pytest
import dask.array as da
import numpy as np
import numpy.testing as npt
from dask.array.utils import assert_eq
import sklearn.metrics as sm
import dask_ml.metrics as dm
def test_pairwise_distances(X_blobs):
centers = X_blobs[::100].compute()
result = dm.pairwise_distances(X_blobs, centers)
expected = sm.pairwise_distances(X_blobs.compute(), centers)
assert_eq(result, expected, atol=1e-4)
def test_pairwise_distances_argmin_min(X_blobs):
centers = X_blobs[::100].compute()
a_, b_ = sm.pairwise_distances_argmin_min(X_blobs.compute(), centers)
a, b = dm.pairwise_distances_argmin_min(X_blobs, centers)
npt.assert_array_equal(a.compute(), a_)
npt.assert_array_equal(b.compute(), b_)
def test_euclidean_distances():
X = da.random.uniform(size=(100, 4), chunks=50)
Y = da.random.uniform(size=(100, 4), chunks=50)
a = dm.euclidean_distances(X, Y)
b = sm.euclidean_distances(X, Y)
assert_eq(a, b)
x_norm_squared = (X ** 2).sum(axis=1).compute()[:, np.newaxis]
a = dm.euclidean_distances(X, Y, X_norm_squared=x_norm_squared)
b = sm.euclidean_distances(X, Y, X_norm_squared=x_norm_squared)
assert_eq(a, b)
y_norm_squared = (Y ** 2).sum(axis=1).compute()[np.newaxis, :]
a = dm.euclidean_distances(X, Y, Y_norm_squared=y_norm_squared)
b = sm.euclidean_distances(X, Y, Y_norm_squared=y_norm_squared)
assert_eq(a, b)
def test_euclidean_distances_same():
X = da.random.uniform(size=(100, 4), chunks=50)
a = dm.euclidean_distances(X, X)
b = sm.euclidean_distances(X, X)
assert_eq(a, b, atol=1e-4)
x_norm_squared = (X ** 2).sum(axis=1).compute()[:, np.newaxis]
assert_eq(X, X, Y_norm_squared=x_norm_squared, atol=1e-4)
@pytest.mark.parametrize('kernel', [
'linear',
'polynomial',
'rbf',
'sigmoid',
])
def test_pairwise_kernels(kernel):
X = da.random.uniform(size=(100, 4), chunks=(50, 4))
a = dm.pairwise.PAIRWISE_KERNEL_FUNCTIONS[kernel]
b = sm.pairwise.PAIRWISE_KERNEL_FUNCTIONS[kernel]
r1 = a(X)
r2 = b(X.compute())
assert isinstance(X, da.Array)
assert_eq(r1, r2)
| bsd-3-clause | 7,806,270,662,457,897,000 | 30.455882 | 73 | 0.655914 | false |
balloob/pychromecast | examples/yleareena_example.py | 1 | 2570 | """
Example on how to use the Yle Areena Controller
"""
import argparse
import logging
import sys
from time import sleep
import pychromecast
from pychromecast.controllers.yleareena import YleAreenaController
import zeroconf
logger = logging.getLogger(__name__)
# Change to the name of your Chromecast
CAST_NAME = "My Chromecast"
parser = argparse.ArgumentParser(
description="Example on how to use the Yle Areena Controller.")
parser.add_argument('--show-debug', help='Enable debug log',
action='store_true')
parser.add_argument('--cast',
help='Name of cast device (default: "%(default)s")',
default=CAST_NAME)
parser.add_argument('--program', help='Areena Program ID',
default="1-50097921")
parser.add_argument('--audio_language', help='audio_language',
default="")
parser.add_argument('--text_language', help='text_language',
default="off")
args = parser.parse_args()
if args.show_debug:
logging.basicConfig(level=logging.DEBUG)
if args.show_zeroconf_debug:
print("Zeroconf version: " + zeroconf.__version__)
logging.getLogger("zeroconf").setLevel(logging.DEBUG)
def get_kaltura_id(program_id):
"""
Dive into the yledl internals and fetch the kaltura player id.
This can be used with Chromecast
"""
from yledl.streamfilters import StreamFilters
from yledl.http import HttpClient
from yledl.localization import TranslationChooser
from yledl.extractors import extractor_factory
from yledl.titleformatter import TitleFormatter
title_formatter = TitleFormatter()
language_chooser = TranslationChooser('fin')
httpclient = HttpClient(None)
stream_filters = StreamFilters()
url = 'https://areena.yle.fi/{}'.format(program_id)
extractor = extractor_factory(url, stream_filters, language_chooser, httpclient)
pid = extractor.program_id_from_url(url)
info = extractor.program_info_for_pid(pid, url, title_formatter, None)
return info.media_id.split('-')[-1]
chromecasts, browser = pychromecast.get_listed_chromecasts(friendly_names=[args.cast])
if not chromecasts:
print('No chromecast with name "{}" discovered'.format(args.cast))
sys.exit(1)
cast = chromecasts[0]
# Start socket client's worker thread and wait for initial status update
cast.wait()
yt = YleAreenaController()
cast.register_handler(yt)
yt.play_areena_media(entry_id=get_kaltura_id(args.program), audio_language=args.audio_language, text_language=args.text_language)
sleep(10)
| mit | -3,048,973,992,877,498,000 | 31.125 | 129 | 0.706226 | false |
ecodiv/code-snippets | cross-validation/cross_fold_validation.py | 1 | 8776 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
DESCRIPTION: Code to run a n-fold cross validation on the results of the GRASS
GIS v.surf.bspline and v.surf.idw function. This code is used in
a tutorial about carrying out n-fold cross validation in GRASS
GIS (https://tutorials.ecodiv.earth/toc/cross_validation.html.
NOTE: Code should work on GRASS GIS 7.2 + and should be run from
within a GRASS session.
@author: pvbreugel add ecodiv dot earth (2016)
"""
# Modules
# -----------------------------------------------------------------------------
import os
import sys
import numpy as np
import uuid
import tempfile
import string
from grass.pygrass.modules import Module
import grass.script as gs
from subprocess import PIPE
# Functions
# -----------------------------------------------------------------------------
def tmpname(prefix):
"""Generate a tmp name which contains prefix
Store the name in the global list.
Use only for raster maps.
"""
tmpf = prefix + str(uuid.uuid4())
tmpf = string.replace(tmpf, '-', '_')
return tmpf
def bspline_param(vectormap, depvar):
"""Get output bspline parameter estimates"""
stfact = Module("v.surf.bspline", flags="ec", input=vectormap,
column=depvar, memory=1024, stdout_=PIPE).outputs.stdout
stepdist = float(stfact.split(':')[-1].strip())
stfact = Module("v.surf.bspline", flags="c", input=vectormap,
ew_step=stepdist, ns_step=stepdist, column=depvar,
memory=1024, stdout_=PIPE).outputs.stdout
stfact = stfact.replace(" ", "")
stfact = stfact.split("\n")[1:-1]
stfact = [z.split("|") for z in stfact]
stfact = [[float(x) for x in y if x != ''] for y in stfact]
minlambda = min(stfact, key=lambda x: abs(x[1]))[0]
return(stepdist, minlambda)
def bspline_validation(vector, column, lambda_i, ew_step, ns_step, keep,
npartitions=4, method='bilinear', solver='cholesky',
maxit=10000, memory=2048):
"""Compute validation statistics (rsme) for bspline extrapolation"""
# Temporary map
tmpmap = tmpname("cvbase_")
Module("g.copy", vector=[vector, tmpmap])
# Compute rsme over model with all callibration points
tmpout = tmpname("cvtmp4_")
Module("v.surf.bspline", input=tmpmap, column=column, ew_step=ew_step,
ns_step=ns_step, method=method, lambda_i=lambda_i,
solver=solver, maxit=maxit, memory=memory, raster_output=tmpout)
Module("v.what.rast", map=tmpmap, raster=tmpout, column="bspline")
stats = Module("db.select", flags="c", sql="SELECT {},bspline FROM {}".
format(column, tmpmap), stdout_=PIPE).outputs.stdout
stats = stats.replace("\n", "|")[:-1].split("|")
stats = (np.asarray([float(x) for x in stats], dtype="float").
reshape(len(stats)/2, 2))
rsme_all = np.sqrt(np.mean(np.diff(stats, axis=1)**2))
if keep:
Module("g.rename", raster=[tmpout, keep])
else:
Module("g.remove", type="raster", name=tmpout, flags="f")
# Run n-fold crossvalidation
if npartitions > 0:
Module("v.kcv", map=tmpmap, npartitions=npartitions)
rsme = []
for i in range(1, npartitions+1):
tmp_cal = tmpname("cvtmp_calibrate_")
tmp_val = tmpname("cvtmp_validate_")
tmpout1 = tmpname("cvtmp_output_1_")
tmpout2 = tmpname("cvtmp_output_2_")
tmpout3 = tmpname("cvtmp_output_3_")
Module("v.extract", flags="r", input=tmpmap, output=tmp_cal,
where="part={}".format(i))
Module("v.extract", input=tmpmap, where="part={}".format(i),
output=tmp_val)
Module("v.surf.bspline", input=tmp_cal, column=column,
ew_step=ew_step, ns_step=ns_step, method=method,
lambda_i=lambda_i, solver=solver, maxit=maxit,
memory=memory, output=tmpout1, sparse_input=tmp_val)
Module("v.category", input=tmpout1, output=tmpout2,
option="del", cat=-1)
Module("v.category", input=tmpout2, output=tmpout3, option="add")
Module("v.db.addtable", map=tmpout3)
Module("v.db.addcolumn", map=tmpout3,
columns=("x double precision, y double precision, "
"z double precision"))
Module("v.to.db", map=tmpout3, option="coor", columns="x,y,z")
# TODO: need to find out how to use the from_ with Module
gs.run_command("v.distance", from_=tmpout3, to=tmp_val,
upload="to_attr", column="x", to_column=column)
stats = Module("db.select", flags="c", sql="SELECT x, z FROM {}".
format(tmpout3), stdout_=PIPE).outputs.stdout
stats = stats.replace("\n", "|")[:-1].split("|")
stats = (np.asarray([float(x) for x in stats], dtype="float").
reshape(len(stats)/2, 2))
rsme.append(np.sqrt(np.mean(np.diff(stats, axis=1)**2)))
Module("g.remove", type="vector", pattern="cvtmp_*", flags="f")
Module("g.remove", type="vector", pattern="cvbase_*", flags="f")
return {'rsme_full': rsme_all, 'rsme_cv_mean': np.asarray(rsme).mean(),
'rsme_cv_std': np.asarray(rsme).std(), 'rsme_cv': rsme}
else:
return {'rsme_full': rsme_all}
def idw_validation(vector, column, keep, npoints=12, power=2, npartitions=10,
memory=2048):
"""Compute validation statistics (rsme) for idw extrapolation"""
# Temporary map
tmpmap = tmpname("cvbase_")
Module("g.copy", vector=[vector, tmpmap])
# Compute rsme over model with all callibration points
tmpout = tmpname("cvtmp4_")
Module("v.surf.idw", input=tmpmap, column=column, npoints=npoints,
power=power, output=tmpout)
Module("v.what.rast", map=tmpmap, raster=tmpout, column="idw")
stats = Module("db.select", flags="c", sql="SELECT {},idw FROM {}".
format(column, tmpmap), stdout_=PIPE).outputs.stdout
stats = stats.replace("\n", "|")[:-1].split("|")
stats = (np.asarray([float(x) for x in stats], dtype="float").
reshape(len(stats)/2, 2))
rsme_all = np.sqrt(np.mean(np.diff(stats, axis=1)**2))
if keep:
Module("g.rename", raster=[tmpout, keep])
else:
Module("g.remove", type="raster", name=tmpout, flags="f")
# Run n-fold crossvalidation
if npartitions > 0:
Module("v.kcv", map=tmpmap, npartitions=npartitions)
rsme = []
for i in range(1, npartitions+1):
tmppnt = tmpname("cvtmp2_")
tmpspa = tmpname("cvtmp3_")
tmpout = tmpname("cvtmp4_")
Module("v.extract", flags="r", input=tmpmap, output=tmppnt,
where="part={}".format(i))
Module("v.extract", input=tmpmap, where="part={}".format(i),
output=tmpspa)
Module("v.surf.idw", input=tmppnt, column=column, npoints=npoints,
power=power, output=tmpout)
Module("v.what.rast", map=tmpspa, raster=tmpout, column="idw")
stats = Module("db.select", flags="c",
sql="SELECT {},idw FROM {}".
format(column, tmpspa), stdout_=PIPE).outputs.stdout
stats = stats.replace("\n", "|")[:-1].split("|")
stats = (np.asarray([float(x) for x in stats], dtype="float").
reshape(len(stats)/2, 2))
rsme.append(np.sqrt(np.mean(np.diff(stats, axis=1)**2)))
Module("g.remove", type="all", pattern="cvtmp*", flags="f")
Module("g.remove", type="vector", pattern="cvbase_*", flags="f")
# Return output
return {'rsme_full': rsme_all, 'rsme_cv_mean': np.asarray(rsme).mean(),
'rsme_cv_std': np.asarray(rsme).std(), 'rsme_cv': rsme}
# Example
# -----------------------------------------------------------------------------
# Determine parameters
stepdist, minlambda = bspline_param(vectormap="households2", depvar="lv")
# Compute evaluation statistics
bspline_stats = bspline_validation(vector="households2", column="lv",
keep="lv2_bspline", lambda_i=minlambda,
ew_step=stepdist, ns_step=stepdist,
npartitions=10, method='bilinear',
solver='cholesky', maxit=10000, memory=2048)
idw_validation(vector="households2", column="lv", npoints=12, power=2,
npartitions=10, keep="lv2_idw")
| gpl-3.0 | 8,661,852,449,883,056,000 | 38.178571 | 79 | 0.559594 | false |
arider/riderml | riderml/regression/gradient_descent.py | 1 | 6474 | import numpy
ETA_PLUS = 1.2
ETA_MINUS = 0.5
def stochastic_gradient_descent(function,
derivative,
x, y,
theta=None,
iterations=100,
learning_rate=0.000001,
shuffle=True,
batch_size=.2):
"""
Gradient descent with mini batches. Batch_size is a float for proportion
of the data or an int. 1 means standard stochastic gradient descent.
args:
function - a function taking two arrays and returning one
derivative - derivative of the function
x - a numpy array; instances in rows
y - a numpy array
theta - initial coefficients.
iterations - number of iterations to do
learning_rate - initial learning rate
shuffle - whether or not to shuffle the data before each iteration
batch_size - proportion or integer size of batches.
"""
if theta is None:
theta = numpy.random.rand(x.shape[1], y.shape[1])
assert x.shape[1] == theta.shape[0]
# translate float into batch size int
batch_number = float(batch_size)
if batch_size < 1 and batch_size > 0:
batch_number = int(batch_size * x.shape[0])
if batch_number < 1:
batch_number = 1
# initialize feature specific learning rates
delta = numpy.zeros(theta.shape)
delta += learning_rate
previous_gradient = numpy.zeros([x.shape[1], theta.shape[1]])
current_theta = numpy.array(theta)
for iteration in range(iterations):
# shuffle data
if shuffle:
inds = numpy.random.permutation(range(x.shape[0]))
x = x[inds]
y = y[inds]
# process batches
batch_index = 0
for i in range(int(x.shape[0] / batch_number)):
if i == int(x.shape[0] / batch_number) - 1:
batch_inds = range(int(batch_index * batch_number), x.shape[0])
else:
batch_inds = range(int(batch_index * batch_number),
int((batch_index + 1) * batch_number))
batch_x = x[batch_inds]
batch_y = y[batch_inds]
loss = function(batch_x, current_theta) - batch_y
# avg gradient per example
gradient = (derivative(batch_x, theta).T.dot(loss) /
batch_x.shape[0])
# update the learning rate
sign = numpy.sign(gradient * previous_gradient)
for ci in range(sign.shape[1]):
for f in range(sign.shape[0]):
if sign[f, ci] < 0.:
delta[f, ci] = ETA_MINUS * delta[f, ci]
gradient[f, ci] = 0.
elif sign[f, ci] > 0.:
delta[f, ci] = ETA_PLUS * delta[f, ci]
current_theta -= numpy.sign(gradient) * delta
previous_gradient = gradient
batch_index += 1
return current_theta
def gradient_descent(function,
derivative,
x, y,
theta=None,
iterations=100,
learning_rate=0.000001,
shuffle=True):
"""
Gradient descent -- use irprop- algorithm to adjust learning rate on a
per-feature basis
arguments:
function - the function to learn parameters of (takes (x, theta))
ex: logistic, linear, etc....
derivative - the derivative of the function
x - the input data in a matrix at least (1, 1)
y - the response variable(s)
theta - coefficients array
iterations - number of iterations
learning_rate - the learning rate, float
shuffle - permute the data at each iteration
"""
if theta is None:
theta = numpy.random.rand(x.shape[1], y.shape[1])
# parameters for rprop
previous_gradient = numpy.zeros([x.shape[1], theta.shape[1]])
delta = numpy.zeros(theta.shape)
delta += learning_rate
for i in range(0, int(iterations)):
if shuffle:
inds = numpy.random.permutation(range(x.shape[0]))
x = x[inds]
y = y[inds]
# avg gradient per example
loss = function(x, theta) - y
gradient = derivative(x, theta).T.dot(loss) / x.shape[0]
# update the learning rate
sign = gradient * previous_gradient
for ci in range(sign.shape[1]):
for f in range(sign.shape[0]):
if sign[f, ci] < 0.:
delta[f, ci] = ETA_MINUS * delta[f, ci]
gradient[f, ci] = 0.
elif sign[f, ci] > 0.:
delta[f, ci] = ETA_PLUS * delta[f, ci]
theta -= numpy.sign(gradient) * delta
previous_gradient = gradient
return theta
def adagrad(function, d_function, x, y, theta, iterations,
learning_rate=0.01, shuffle=True, smoothing=.5):
"""
Gradient descent -- use rprop algorithm to adjust learning rate on a
per-feature basis
arguments:
function - the function to learn parameters of (takes x, theta)
derivative - the derivative of the function
ex: logistic, linear, etc....
x - the input data in a matrix at least (1, 1)
y - the response variable(s)
theta - coefficients array
iterations - number of iterations
learning_rate - the learning rate, float
shuffle - permute the data at each iteration
smoothing - exponential smoothing in case adagrad is too
aggressive in step size
"""
running_gradient = numpy.zeros(theta.shape)
for iteration in range(iterations):
loss = function(x, theta) - y
gradient = loss.T.dot(d_function(x)) / x.shape[0]
# the step size is too aggressive with 'canonical' adagrad on
# non-sparse problems, so we use exponential smoothing instead of
# running_gradient += gradient ** 2
if smoothing:
running_gradient = (smoothing * running_gradient +
(1 - smoothing) * (gradient ** 2).T)
else:
running_gradient += gradient ** 2
lr = numpy.multiply(1. / (numpy.sqrt(running_gradient)), gradient.T)
theta -= learning_rate * lr
return theta
| mit | -6,365,893,648,222,809,000 | 34.571429 | 79 | 0.545103 | false |
dstanek/keystone | keystone/common/ldap/core.py | 1 | 76540 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import codecs
import functools
import os.path
import re
import sys
import weakref
import ldap.filter
import ldappool
from oslo_log import log
import six
from six.moves import map, zip
from keystone import exception
from keystone.i18n import _
from keystone.i18n import _LW
LOG = log.getLogger(__name__)
LDAP_VALUES = {'TRUE': True, 'FALSE': False}
CONTROL_TREEDELETE = '1.2.840.113556.1.4.805'
LDAP_SCOPES = {'one': ldap.SCOPE_ONELEVEL,
'sub': ldap.SCOPE_SUBTREE}
LDAP_DEREF = {'always': ldap.DEREF_ALWAYS,
'default': None,
'finding': ldap.DEREF_FINDING,
'never': ldap.DEREF_NEVER,
'searching': ldap.DEREF_SEARCHING}
LDAP_TLS_CERTS = {'never': ldap.OPT_X_TLS_NEVER,
'demand': ldap.OPT_X_TLS_DEMAND,
'allow': ldap.OPT_X_TLS_ALLOW}
# RFC 4511 (The LDAP Protocol) defines a list containing only the OID '1.1' to
# indicate that no attributes should be returned besides the DN.
DN_ONLY = ['1.1']
_utf8_encoder = codecs.getencoder('utf-8')
def utf8_encode(value):
"""Encode a basestring to UTF-8.
If the string is unicode encode it to UTF-8, if the string is
str then assume it's already encoded. Otherwise raise a TypeError.
:param value: A basestring
:returns: UTF-8 encoded version of value
:raises: TypeError if value is not basestring
"""
if isinstance(value, six.text_type):
return _utf8_encoder(value)[0]
elif isinstance(value, six.binary_type):
return value
else:
raise TypeError("value must be basestring, "
"not %s" % value.__class__.__name__)
_utf8_decoder = codecs.getdecoder('utf-8')
def utf8_decode(value):
"""Decode a from UTF-8 into unicode.
If the value is a binary string assume it's UTF-8 encoded and decode
it into a unicode string. Otherwise convert the value from its
type into a unicode string.
:param value: value to be returned as unicode
:returns: value as unicode
:raises: UnicodeDecodeError for invalid UTF-8 encoding
"""
if isinstance(value, six.binary_type):
return _utf8_decoder(value)[0]
return six.text_type(value)
def py2ldap(val):
"""Type convert a Python value to a type accepted by LDAP (unicode).
The LDAP API only accepts strings for values therefore convert
the value's type to a unicode string. A subsequent type conversion
will encode the unicode as UTF-8 as required by the python-ldap API,
but for now we just want a string representation of the value.
:param val: The value to convert to a LDAP string representation
:returns: unicode string representation of value.
"""
if isinstance(val, bool):
return u'TRUE' if val else u'FALSE'
else:
return six.text_type(val)
def enabled2py(val):
"""Similar to ldap2py, only useful for the enabled attribute."""
try:
return LDAP_VALUES[val]
except KeyError:
pass
try:
return int(val)
except ValueError:
pass
return utf8_decode(val)
def ldap2py(val):
"""Convert an LDAP formatted value to Python type used by OpenStack.
Virtually all LDAP values are stored as UTF-8 encoded strings.
OpenStack prefers values which are unicode friendly.
:param val: LDAP formatted value
:returns: val converted to preferred Python type
"""
return utf8_decode(val)
def convert_ldap_result(ldap_result):
"""Convert LDAP search result to Python types used by OpenStack.
Each result tuple is of the form (dn, attrs), where dn is a string
containing the DN (distinguished name) of the entry, and attrs is
a dictionary containing the attributes associated with the
entry. The keys of attrs are strings, and the associated values
are lists of strings.
OpenStack wants to use Python types of its choosing. Strings will
be unicode, truth values boolean, whole numbers int's, etc. DN's will
also be decoded from UTF-8 to unicode.
:param ldap_result: LDAP search result
:returns: list of 2-tuples containing (dn, attrs) where dn is unicode
and attrs is a dict whose values are type converted to
OpenStack preferred types.
"""
py_result = []
at_least_one_referral = False
for dn, attrs in ldap_result:
ldap_attrs = {}
if dn is None:
# this is a Referral object, rather than an Entry object
at_least_one_referral = True
continue
for kind, values in attrs.items():
try:
val2py = enabled2py if kind == 'enabled' else ldap2py
ldap_attrs[kind] = [val2py(x) for x in values]
except UnicodeDecodeError:
LOG.debug('Unable to decode value for attribute %s', kind)
py_result.append((utf8_decode(dn), ldap_attrs))
if at_least_one_referral:
LOG.debug(('Referrals were returned and ignored. Enable referral '
'chasing in keystone.conf via [ldap] chase_referrals'))
return py_result
def safe_iter(attrs):
if attrs is None:
return
elif isinstance(attrs, list):
for e in attrs:
yield e
else:
yield attrs
def parse_deref(opt):
try:
return LDAP_DEREF[opt]
except KeyError:
raise ValueError(_('Invalid LDAP deref option: %(option)s. '
'Choose one of: %(options)s') %
{'option': opt,
'options': ', '.join(LDAP_DEREF.keys()), })
def parse_tls_cert(opt):
try:
return LDAP_TLS_CERTS[opt]
except KeyError:
raise ValueError(_(
'Invalid LDAP TLS certs option: %(option)s. '
'Choose one of: %(options)s') % {
'option': opt,
'options': ', '.join(LDAP_TLS_CERTS.keys())})
def ldap_scope(scope):
try:
return LDAP_SCOPES[scope]
except KeyError:
raise ValueError(
_('Invalid LDAP scope: %(scope)s. Choose one of: %(options)s') % {
'scope': scope,
'options': ', '.join(LDAP_SCOPES.keys())})
def prep_case_insensitive(value):
"""Prepare a string for case-insensitive comparison.
This is defined in RFC4518. For simplicity, all this function does is
lowercase all the characters, strip leading and trailing whitespace,
and compress sequences of spaces to a single space.
"""
value = re.sub(r'\s+', ' ', value.strip().lower())
return value
def is_ava_value_equal(attribute_type, val1, val2):
"""Returns True if and only if the AVAs are equal.
When comparing AVAs, the equality matching rule for the attribute type
should be taken into consideration. For simplicity, this implementation
does a case-insensitive comparison.
Note that this function uses prep_case_insenstive so the limitations of
that function apply here.
"""
return prep_case_insensitive(val1) == prep_case_insensitive(val2)
def is_rdn_equal(rdn1, rdn2):
"""Returns True if and only if the RDNs are equal.
* RDNs must have the same number of AVAs.
* Each AVA of the RDNs must be the equal for the same attribute type. The
order isn't significant. Note that an attribute type will only be in one
AVA in an RDN, otherwise the DN wouldn't be valid.
* Attribute types aren't case sensitive. Note that attribute type
comparison is more complicated than implemented. This function only
compares case-insentive. The code should handle multiple names for an
attribute type (e.g., cn, commonName, and 2.5.4.3 are the same).
Note that this function uses is_ava_value_equal to compare AVAs so the
limitations of that function apply here.
"""
if len(rdn1) != len(rdn2):
return False
for attr_type_1, val1, dummy in rdn1:
found = False
for attr_type_2, val2, dummy in rdn2:
if attr_type_1.lower() != attr_type_2.lower():
continue
found = True
if not is_ava_value_equal(attr_type_1, val1, val2):
return False
break
if not found:
return False
return True
def is_dn_equal(dn1, dn2):
"""Returns True if and only if the DNs are equal.
Two DNs are equal if they've got the same number of RDNs and if the RDNs
are the same at each position. See RFC4517.
Note that this function uses is_rdn_equal to compare RDNs so the
limitations of that function apply here.
:param dn1: Either a string DN or a DN parsed by ldap.dn.str2dn.
:param dn2: Either a string DN or a DN parsed by ldap.dn.str2dn.
"""
if not isinstance(dn1, list):
dn1 = ldap.dn.str2dn(utf8_encode(dn1))
if not isinstance(dn2, list):
dn2 = ldap.dn.str2dn(utf8_encode(dn2))
if len(dn1) != len(dn2):
return False
for rdn1, rdn2 in zip(dn1, dn2):
if not is_rdn_equal(rdn1, rdn2):
return False
return True
def dn_startswith(descendant_dn, dn):
"""Returns True if and only if the descendant_dn is under the dn.
:param descendant_dn: Either a string DN or a DN parsed by ldap.dn.str2dn.
:param dn: Either a string DN or a DN parsed by ldap.dn.str2dn.
"""
if not isinstance(descendant_dn, list):
descendant_dn = ldap.dn.str2dn(utf8_encode(descendant_dn))
if not isinstance(dn, list):
dn = ldap.dn.str2dn(utf8_encode(dn))
if len(descendant_dn) <= len(dn):
return False
# Use the last len(dn) RDNs.
return is_dn_equal(descendant_dn[-len(dn):], dn)
@six.add_metaclass(abc.ABCMeta)
class LDAPHandler(object):
'''Abstract class which defines methods for a LDAP API provider.
Native Keystone values cannot be passed directly into and from the
python-ldap API. Type conversion must occur at the LDAP API
boudary, examples of type conversions are:
* booleans map to the strings 'TRUE' and 'FALSE'
* integer values map to their string representation.
* unicode strings are encoded in UTF-8
In addition to handling type conversions at the API boundary we
have the requirement to support more than one LDAP API
provider. Currently we have:
* python-ldap, this is the standard LDAP API for Python, it
requires access to a live LDAP server.
* Fake LDAP which emulates python-ldap. This is used for
testing without requiring a live LDAP server.
To support these requirements we need a layer that performs type
conversions and then calls another LDAP API which is configurable
(e.g. either python-ldap or the fake emulation).
We have an additional constraint at the time of this writing due to
limitations in the logging module. The logging module is not
capable of accepting UTF-8 encoded strings, it will throw an
encoding exception. Therefore all logging MUST be performed prior
to UTF-8 conversion. This means no logging can be performed in the
ldap APIs that implement the python-ldap API because those APIs
are defined to accept only UTF-8 strings. Thus the layer which
performs type conversions must also do the logging. We do the type
conversions in two steps, once to convert all Python types to
unicode strings, then log, then convert the unicode strings to
UTF-8.
There are a variety of ways one could accomplish this, we elect to
use a chaining technique whereby instances of this class simply
call the next member in the chain via the "conn" attribute. The
chain is constructed by passing in an existing instance of this
class as the conn attribute when the class is instantiated.
Here is a brief explanation of why other possible approaches were
not used:
subclassing
To perform the wrapping operations in the correct order
the type convesion class would have to subclass each of
the API providers. This is awkward, doubles the number of
classes, and does not scale well. It requires the type
conversion class to be aware of all possible API
providers.
decorators
Decorators provide an elegant solution to wrap methods and
would be an ideal way to perform type conversions before
calling the wrapped function and then converting the
values returned from the wrapped function. However
decorators need to be aware of the method signature, it
has to know what input parameters need conversion and how
to convert the result. For an API like python-ldap which
has a large number of different method signatures it would
require a large number of specialized
decorators. Experience has shown it's very easy to apply
the wrong decorator due to the inherent complexity and
tendency to cut-n-paste code. Another option is to
parameterize the decorator to make it "smart". Experience
has shown such decorators become insanely complicated and
difficult to understand and debug. Also decorators tend to
hide what's really going on when a method is called, the
operations being performed are not visible when looking at
the implemation of a decorated method, this too experience
has shown leads to mistakes.
Chaining simplifies both wrapping to perform type conversion as
well as the substitution of alternative API providers. One simply
creates a new instance of the API interface and insert it at the
front of the chain. Type conversions are explicit and obvious.
If a new method needs to be added to the API interface one adds it
to the abstract class definition. Should one miss adding the new
method to any derivations of the abstract class the code will fail
to load and run making it impossible to forget updating all the
derived classes.
'''
@abc.abstractmethod
def __init__(self, conn=None):
self.conn = conn
@abc.abstractmethod
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None, pool_retry_max=None,
pool_retry_delay=None, pool_conn_timeout=None,
pool_conn_lifetime=None):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def set_option(self, option, invalue):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_option(self, option):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def unbind_s(self):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def add_s(self, dn, modlist):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def search_s(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
resp_ctrl_classes=None):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def modify_s(self, dn, modlist):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_s(self, dn):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
raise exception.NotImplemented() # pragma: no cover
class PythonLDAPHandler(LDAPHandler):
'''Implementation of the LDAPHandler interface which calls the
python-ldap API.
Note, the python-ldap API requires all string values to be UTF-8
encoded. The KeystoneLDAPHandler enforces this prior to invoking
the methods in this class.
'''
def __init__(self, conn=None):
super(PythonLDAPHandler, self).__init__(conn=conn)
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None, pool_retry_max=None,
pool_retry_delay=None, pool_conn_timeout=None,
pool_conn_lifetime=None):
_common_ldap_initialization(url=url,
use_tls=use_tls,
tls_cacertfile=tls_cacertfile,
tls_cacertdir=tls_cacertdir,
tls_req_cert=tls_req_cert,
debug_level=debug_level)
self.conn = ldap.initialize(url)
self.conn.protocol_version = ldap.VERSION3
if alias_dereferencing is not None:
self.conn.set_option(ldap.OPT_DEREF, alias_dereferencing)
self.page_size = page_size
if use_tls:
self.conn.start_tls_s()
if chase_referrals is not None:
self.conn.set_option(ldap.OPT_REFERRALS, int(chase_referrals))
def set_option(self, option, invalue):
return self.conn.set_option(option, invalue)
def get_option(self, option):
return self.conn.get_option(option)
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
return self.conn.simple_bind_s(who, cred, serverctrls, clientctrls)
def unbind_s(self):
return self.conn.unbind_s()
def add_s(self, dn, modlist):
return self.conn.add_s(dn, modlist)
def search_s(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
return self.conn.search_s(base, scope, filterstr,
attrlist, attrsonly)
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
return self.conn.search_ext(base, scope,
filterstr, attrlist, attrsonly,
serverctrls, clientctrls,
timeout, sizelimit)
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
resp_ctrl_classes=None):
# The resp_ctrl_classes parameter is a recent addition to the
# API. It defaults to None. We do not anticipate using it.
# To run with older versions of python-ldap we do not pass it.
return self.conn.result3(msgid, all, timeout)
def modify_s(self, dn, modlist):
return self.conn.modify_s(dn, modlist)
def delete_s(self, dn):
return self.conn.delete_s(dn)
def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
return self.conn.delete_ext_s(dn, serverctrls, clientctrls)
def _common_ldap_initialization(url, use_tls=False, tls_cacertfile=None,
tls_cacertdir=None, tls_req_cert=None,
debug_level=None):
'''Method for common ldap initialization between PythonLDAPHandler and
PooledLDAPHandler.
'''
LOG.debug("LDAP init: url=%s", url)
LOG.debug('LDAP init: use_tls=%s tls_cacertfile=%s tls_cacertdir=%s '
'tls_req_cert=%s tls_avail=%s',
use_tls, tls_cacertfile, tls_cacertdir,
tls_req_cert, ldap.TLS_AVAIL)
if debug_level is not None:
ldap.set_option(ldap.OPT_DEBUG_LEVEL, debug_level)
using_ldaps = url.lower().startswith("ldaps")
if use_tls and using_ldaps:
raise AssertionError(_('Invalid TLS / LDAPS combination'))
# The certificate trust options apply for both LDAPS and TLS.
if use_tls or using_ldaps:
if not ldap.TLS_AVAIL:
raise ValueError(_('Invalid LDAP TLS_AVAIL option: %s. TLS '
'not available') % ldap.TLS_AVAIL)
if tls_cacertfile:
# NOTE(topol)
# python ldap TLS does not verify CACERTFILE or CACERTDIR
# so we add some extra simple sanity check verification
# Also, setting these values globally (i.e. on the ldap object)
# works but these values are ignored when setting them on the
# connection
if not os.path.isfile(tls_cacertfile):
raise IOError(_("tls_cacertfile %s not found "
"or is not a file") %
tls_cacertfile)
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, tls_cacertfile)
elif tls_cacertdir:
# NOTE(topol)
# python ldap TLS does not verify CACERTFILE or CACERTDIR
# so we add some extra simple sanity check verification
# Also, setting these values globally (i.e. on the ldap object)
# works but these values are ignored when setting them on the
# connection
if not os.path.isdir(tls_cacertdir):
raise IOError(_("tls_cacertdir %s not found "
"or is not a directory") %
tls_cacertdir)
ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, tls_cacertdir)
if tls_req_cert in list(LDAP_TLS_CERTS.values()):
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, tls_req_cert)
else:
LOG.debug("LDAP TLS: invalid TLS_REQUIRE_CERT Option=%s",
tls_req_cert)
class MsgId(list):
'''Wrapper class to hold connection and msgid.'''
pass
def use_conn_pool(func):
'''Use this only for connection pool specific ldap API.
This adds connection object to decorated API as next argument after self.
'''
def wrapper(self, *args, **kwargs):
# assert isinstance(self, PooledLDAPHandler)
with self._get_pool_connection() as conn:
self._apply_options(conn)
return func(self, conn, *args, **kwargs)
return wrapper
class PooledLDAPHandler(LDAPHandler):
'''Implementation of the LDAPHandler interface which uses pooled
connection manager.
Pool specific configuration is defined in [ldap] section.
All other LDAP configuration is still used from [ldap] section
Keystone LDAP authentication logic authenticates an end user using its DN
and password via LDAP bind to establish supplied password is correct.
This can fill up the pool quickly (as pool re-uses existing connection
based on its bind data) and would not leave space in pool for connection
re-use for other LDAP operations.
Now a separate pool can be established for those requests when related flag
'use_auth_pool' is enabled. That pool can have its own size and
connection lifetime. Other pool attributes are shared between those pools.
If 'use_pool' is disabled, then 'use_auth_pool' does not matter.
If 'use_auth_pool' is not enabled, then connection pooling is not used for
those LDAP operations.
Note, the python-ldap API requires all string values to be UTF-8
encoded. The KeystoneLDAPHandler enforces this prior to invoking
the methods in this class.
'''
# Added here to allow override for testing
Connector = ldappool.StateConnector
auth_pool_prefix = 'auth_pool_'
connection_pools = {} # static connector pool dict
def __init__(self, conn=None, use_auth_pool=False):
super(PooledLDAPHandler, self).__init__(conn=conn)
self.who = ''
self.cred = ''
self.conn_options = {} # connection specific options
self.page_size = None
self.use_auth_pool = use_auth_pool
self.conn_pool = None
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None, pool_retry_max=None,
pool_retry_delay=None, pool_conn_timeout=None,
pool_conn_lifetime=None):
_common_ldap_initialization(url=url,
use_tls=use_tls,
tls_cacertfile=tls_cacertfile,
tls_cacertdir=tls_cacertdir,
tls_req_cert=tls_req_cert,
debug_level=debug_level)
self.page_size = page_size
# Following two options are not added in common initialization as they
# need to follow a sequence in PythonLDAPHandler code.
if alias_dereferencing is not None:
self.set_option(ldap.OPT_DEREF, alias_dereferencing)
if chase_referrals is not None:
self.set_option(ldap.OPT_REFERRALS, int(chase_referrals))
if self.use_auth_pool: # separate pool when use_auth_pool enabled
pool_url = self.auth_pool_prefix + url
else:
pool_url = url
try:
self.conn_pool = self.connection_pools[pool_url]
except KeyError:
self.conn_pool = ldappool.ConnectionManager(
url,
size=pool_size,
retry_max=pool_retry_max,
retry_delay=pool_retry_delay,
timeout=pool_conn_timeout,
connector_cls=self.Connector,
use_tls=use_tls,
max_lifetime=pool_conn_lifetime)
self.connection_pools[pool_url] = self.conn_pool
def set_option(self, option, invalue):
self.conn_options[option] = invalue
def get_option(self, option):
value = self.conn_options.get(option)
# if option was not specified explicitly, then use connection default
# value for that option if there.
if value is None:
with self._get_pool_connection() as conn:
value = conn.get_option(option)
return value
def _apply_options(self, conn):
# if connection has a lifetime, then it already has options specified
if conn.get_lifetime() > 30:
return
for option, invalue in self.conn_options.items():
conn.set_option(option, invalue)
def _get_pool_connection(self):
return self.conn_pool.connection(self.who, self.cred)
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
'''Not using use_conn_pool decorator here as this API takes cred as
input.
'''
self.who = who
self.cred = cred
with self._get_pool_connection() as conn:
self._apply_options(conn)
def unbind_s(self):
# After connection generator is done `with` statement execution block
# connection is always released via finally block in ldappool.
# So this unbind is a no op.
pass
@use_conn_pool
def add_s(self, conn, dn, modlist):
return conn.add_s(dn, modlist)
@use_conn_pool
def search_s(self, conn, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
return conn.search_s(base, scope, filterstr, attrlist,
attrsonly)
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
'''This API is asynchoronus API which returns MsgId instance to be used
in result3 call.
To work with result3 API in predicatable manner, same LDAP connection
is needed which provided msgid. So wrapping used connection and msgid
in MsgId class. The connection associated with search_ext is released
once last hard reference to MsgId object is freed. This will happen
when the method is done with returned MsgId usage.
'''
conn_ctxt = self._get_pool_connection()
conn = conn_ctxt.__enter__()
try:
msgid = conn.search_ext(base, scope,
filterstr, attrlist, attrsonly,
serverctrls, clientctrls,
timeout, sizelimit)
except Exception:
conn_ctxt.__exit__(*sys.exc_info())
raise
res = MsgId((conn, msgid))
weakref.ref(res, functools.partial(conn_ctxt.__exit__,
None, None, None))
return res
def result3(self, msgid, all=1, timeout=None,
resp_ctrl_classes=None):
'''This method is used to wait for and return the result of an
operation previously initiated by one of the LDAP asynchronous
operation routines (eg search_ext()) It returned an invocation
identifier (a message id) upon successful initiation of their
operation.
Input msgid is expected to be instance of class MsgId which has LDAP
session/connection used to execute search_ext and message idenfier.
The connection associated with search_ext is released once last hard
reference to MsgId object is freed. This will happen when function
which requested msgId and used it in result3 exits.
'''
conn, msg_id = msgid
return conn.result3(msg_id, all, timeout)
@use_conn_pool
def modify_s(self, conn, dn, modlist):
return conn.modify_s(dn, modlist)
@use_conn_pool
def delete_s(self, conn, dn):
return conn.delete_s(dn)
@use_conn_pool
def delete_ext_s(self, conn, dn, serverctrls=None, clientctrls=None):
return conn.delete_ext_s(dn, serverctrls, clientctrls)
class KeystoneLDAPHandler(LDAPHandler):
'''Convert data types and perform logging.
This LDAP inteface wraps the python-ldap based interfaces. The
python-ldap interfaces require string values encoded in UTF-8. The
OpenStack logging framework at the time of this writing is not
capable of accepting strings encoded in UTF-8, the log functions
will throw decoding errors if a non-ascii character appears in a
string.
Prior to the call Python data types are converted to a string
representation as required by the LDAP APIs.
Then logging is performed so we can track what is being
sent/received from LDAP. Also the logging filters security
sensitive items (i.e. passwords).
Then the string values are encoded into UTF-8.
Then the LDAP API entry point is invoked.
Data returned from the LDAP call is converted back from UTF-8
encoded strings into the Python data type used internally in
OpenStack.
'''
def __init__(self, conn=None):
super(KeystoneLDAPHandler, self).__init__(conn=conn)
self.page_size = 0
def __enter__(self):
return self
def _disable_paging(self):
# Disable the pagination from now on
self.page_size = 0
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None,
pool_retry_max=None, pool_retry_delay=None,
pool_conn_timeout=None, pool_conn_lifetime=None):
self.page_size = page_size
return self.conn.connect(url, page_size, alias_dereferencing,
use_tls, tls_cacertfile, tls_cacertdir,
tls_req_cert, chase_referrals,
debug_level=debug_level,
use_pool=use_pool,
pool_size=pool_size,
pool_retry_max=pool_retry_max,
pool_retry_delay=pool_retry_delay,
pool_conn_timeout=pool_conn_timeout,
pool_conn_lifetime=pool_conn_lifetime)
def set_option(self, option, invalue):
return self.conn.set_option(option, invalue)
def get_option(self, option):
return self.conn.get_option(option)
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
LOG.debug("LDAP bind: who=%s", who)
who_utf8 = utf8_encode(who)
cred_utf8 = utf8_encode(cred)
return self.conn.simple_bind_s(who_utf8, cred_utf8,
serverctrls=serverctrls,
clientctrls=clientctrls)
def unbind_s(self):
LOG.debug("LDAP unbind")
return self.conn.unbind_s()
def add_s(self, dn, modlist):
ldap_attrs = [(kind, [py2ldap(x) for x in safe_iter(values)])
for kind, values in modlist]
logging_attrs = [(kind, values
if kind != 'userPassword'
else ['****'])
for kind, values in ldap_attrs]
LOG.debug('LDAP add: dn=%s attrs=%s',
dn, logging_attrs)
dn_utf8 = utf8_encode(dn)
ldap_attrs_utf8 = [(kind, [utf8_encode(x) for x in safe_iter(values)])
for kind, values in ldap_attrs]
return self.conn.add_s(dn_utf8, ldap_attrs_utf8)
def search_s(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
# NOTE(morganfainberg): Remove "None" singletons from this list, which
# allows us to set mapped attributes to "None" as defaults in config.
# Without this filtering, the ldap query would raise a TypeError since
# attrlist is expected to be an iterable of strings.
if attrlist is not None:
attrlist = [attr for attr in attrlist if attr is not None]
LOG.debug('LDAP search: base=%s scope=%s filterstr=%s '
'attrs=%s attrsonly=%s',
base, scope, filterstr, attrlist, attrsonly)
if self.page_size:
ldap_result = self._paged_search_s(base, scope,
filterstr, attrlist)
else:
base_utf8 = utf8_encode(base)
filterstr_utf8 = utf8_encode(filterstr)
if attrlist is None:
attrlist_utf8 = None
else:
attrlist_utf8 = list(map(utf8_encode, attrlist))
ldap_result = self.conn.search_s(base_utf8, scope,
filterstr_utf8,
attrlist_utf8, attrsonly)
py_result = convert_ldap_result(ldap_result)
return py_result
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
if attrlist is not None:
attrlist = [attr for attr in attrlist if attr is not None]
LOG.debug('LDAP search_ext: base=%s scope=%s filterstr=%s '
'attrs=%s attrsonly=%s'
'serverctrls=%s clientctrls=%s timeout=%s sizelimit=%s',
base, scope, filterstr, attrlist, attrsonly,
serverctrls, clientctrls, timeout, sizelimit)
return self.conn.search_ext(base, scope,
filterstr, attrlist, attrsonly,
serverctrls, clientctrls,
timeout, sizelimit)
def _paged_search_s(self, base, scope, filterstr, attrlist=None):
res = []
use_old_paging_api = False
# The API for the simple paged results control changed between
# python-ldap 2.3 and 2.4. We need to detect the capabilities
# of the python-ldap version we are using.
if hasattr(ldap, 'LDAP_CONTROL_PAGE_OID'):
use_old_paging_api = True
lc = ldap.controls.SimplePagedResultsControl(
controlType=ldap.LDAP_CONTROL_PAGE_OID,
criticality=True,
controlValue=(self.page_size, ''))
page_ctrl_oid = ldap.LDAP_CONTROL_PAGE_OID
else:
lc = ldap.controls.libldap.SimplePagedResultsControl(
criticality=True,
size=self.page_size,
cookie='')
page_ctrl_oid = ldap.controls.SimplePagedResultsControl.controlType
base_utf8 = utf8_encode(base)
filterstr_utf8 = utf8_encode(filterstr)
if attrlist is None:
attrlist_utf8 = None
else:
attrlist = [attr for attr in attrlist if attr is not None]
attrlist_utf8 = list(map(utf8_encode, attrlist))
msgid = self.conn.search_ext(base_utf8,
scope,
filterstr_utf8,
attrlist_utf8,
serverctrls=[lc])
# Endless loop request pages on ldap server until it has no data
while True:
# Request to the ldap server a page with 'page_size' entries
rtype, rdata, rmsgid, serverctrls = self.conn.result3(msgid)
# Receive the data
res.extend(rdata)
pctrls = [c for c in serverctrls
if c.controlType == page_ctrl_oid]
if pctrls:
# LDAP server supports pagination
if use_old_paging_api:
est, cookie = pctrls[0].controlValue
lc.controlValue = (self.page_size, cookie)
else:
cookie = lc.cookie = pctrls[0].cookie
if cookie:
# There is more data still on the server
# so we request another page
msgid = self.conn.search_ext(base_utf8,
scope,
filterstr_utf8,
attrlist_utf8,
serverctrls=[lc])
else:
# Exit condition no more data on server
break
else:
LOG.warning(_LW('LDAP Server does not support paging. '
'Disable paging in keystone.conf to '
'avoid this message.'))
self._disable_paging()
break
return res
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
resp_ctrl_classes=None):
ldap_result = self.conn.result3(msgid, all, timeout, resp_ctrl_classes)
LOG.debug('LDAP result3: msgid=%s all=%s timeout=%s '
'resp_ctrl_classes=%s ldap_result=%s',
msgid, all, timeout, resp_ctrl_classes, ldap_result)
py_result = convert_ldap_result(ldap_result)
return py_result
def modify_s(self, dn, modlist):
ldap_modlist = [
(op, kind, (None if values is None
else [py2ldap(x) for x in safe_iter(values)]))
for op, kind, values in modlist]
logging_modlist = [(op, kind, (values if kind != 'userPassword'
else ['****']))
for op, kind, values in ldap_modlist]
LOG.debug('LDAP modify: dn=%s modlist=%s',
dn, logging_modlist)
dn_utf8 = utf8_encode(dn)
ldap_modlist_utf8 = [
(op, kind, (None if values is None
else [utf8_encode(x) for x in safe_iter(values)]))
for op, kind, values in ldap_modlist]
return self.conn.modify_s(dn_utf8, ldap_modlist_utf8)
def delete_s(self, dn):
LOG.debug("LDAP delete: dn=%s", dn)
dn_utf8 = utf8_encode(dn)
return self.conn.delete_s(dn_utf8)
def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
LOG.debug('LDAP delete_ext: dn=%s serverctrls=%s clientctrls=%s',
dn, serverctrls, clientctrls)
dn_utf8 = utf8_encode(dn)
return self.conn.delete_ext_s(dn_utf8, serverctrls, clientctrls)
def __exit__(self, exc_type, exc_val, exc_tb):
self.unbind_s()
_HANDLERS = {}
def register_handler(prefix, handler):
_HANDLERS[prefix] = handler
def _get_connection(conn_url, use_pool=False, use_auth_pool=False):
for prefix, handler in _HANDLERS.items():
if conn_url.startswith(prefix):
return handler()
if use_pool:
return PooledLDAPHandler(use_auth_pool=use_auth_pool)
else:
return PythonLDAPHandler()
def filter_entity(entity_ref):
"""Filter out private items in an entity dict.
:param entity_ref: the entity dictionary. The 'dn' field will be removed.
'dn' is used in LDAP, but should not be returned to the user. This
value may be modified.
:returns: entity_ref
"""
if entity_ref:
entity_ref.pop('dn', None)
return entity_ref
class BaseLdap(object):
DEFAULT_OU = None
DEFAULT_STRUCTURAL_CLASSES = None
DEFAULT_ID_ATTR = 'cn'
DEFAULT_OBJECTCLASS = None
DEFAULT_FILTER = None
DEFAULT_EXTRA_ATTR_MAPPING = []
DUMB_MEMBER_DN = 'cn=dumb,dc=nonexistent'
NotFound = None
notfound_arg = None
options_name = None
model = None
attribute_options_names = {}
immutable_attrs = []
attribute_ignore = []
tree_dn = None
def __init__(self, conf):
self.LDAP_URL = conf.ldap.url
self.LDAP_USER = conf.ldap.user
self.LDAP_PASSWORD = conf.ldap.password
self.LDAP_SCOPE = ldap_scope(conf.ldap.query_scope)
self.alias_dereferencing = parse_deref(conf.ldap.alias_dereferencing)
self.page_size = conf.ldap.page_size
self.use_tls = conf.ldap.use_tls
self.tls_cacertfile = conf.ldap.tls_cacertfile
self.tls_cacertdir = conf.ldap.tls_cacertdir
self.tls_req_cert = parse_tls_cert(conf.ldap.tls_req_cert)
self.attribute_mapping = {}
self.chase_referrals = conf.ldap.chase_referrals
self.debug_level = conf.ldap.debug_level
# LDAP Pool specific attribute
self.use_pool = conf.ldap.use_pool
self.pool_size = conf.ldap.pool_size
self.pool_retry_max = conf.ldap.pool_retry_max
self.pool_retry_delay = conf.ldap.pool_retry_delay
self.pool_conn_timeout = conf.ldap.pool_connection_timeout
self.pool_conn_lifetime = conf.ldap.pool_connection_lifetime
# End user authentication pool specific config attributes
self.use_auth_pool = self.use_pool and conf.ldap.use_auth_pool
self.auth_pool_size = conf.ldap.auth_pool_size
self.auth_pool_conn_lifetime = conf.ldap.auth_pool_connection_lifetime
if self.options_name is not None:
self.suffix = conf.ldap.suffix
dn = '%s_tree_dn' % self.options_name
self.tree_dn = (getattr(conf.ldap, dn)
or '%s,%s' % (self.DEFAULT_OU, self.suffix))
idatt = '%s_id_attribute' % self.options_name
self.id_attr = getattr(conf.ldap, idatt) or self.DEFAULT_ID_ATTR
objclass = '%s_objectclass' % self.options_name
self.object_class = (getattr(conf.ldap, objclass)
or self.DEFAULT_OBJECTCLASS)
for k, v in self.attribute_options_names.items():
v = '%s_%s_attribute' % (self.options_name, v)
self.attribute_mapping[k] = getattr(conf.ldap, v)
attr_mapping_opt = ('%s_additional_attribute_mapping' %
self.options_name)
attr_mapping = (getattr(conf.ldap, attr_mapping_opt)
or self.DEFAULT_EXTRA_ATTR_MAPPING)
self.extra_attr_mapping = self._parse_extra_attrs(attr_mapping)
ldap_filter = '%s_filter' % self.options_name
self.ldap_filter = getattr(conf.ldap,
ldap_filter) or self.DEFAULT_FILTER
allow_create = '%s_allow_create' % self.options_name
self.allow_create = getattr(conf.ldap, allow_create)
allow_update = '%s_allow_update' % self.options_name
self.allow_update = getattr(conf.ldap, allow_update)
allow_delete = '%s_allow_delete' % self.options_name
self.allow_delete = getattr(conf.ldap, allow_delete)
member_attribute = '%s_member_attribute' % self.options_name
self.member_attribute = getattr(conf.ldap, member_attribute, None)
self.structural_classes = self.DEFAULT_STRUCTURAL_CLASSES
if self.notfound_arg is None:
self.notfound_arg = self.options_name + '_id'
attribute_ignore = '%s_attribute_ignore' % self.options_name
self.attribute_ignore = getattr(conf.ldap, attribute_ignore)
self.use_dumb_member = conf.ldap.use_dumb_member
self.dumb_member = (conf.ldap.dumb_member or
self.DUMB_MEMBER_DN)
self.subtree_delete_enabled = conf.ldap.allow_subtree_delete
def _not_found(self, object_id):
if self.NotFound is None:
return exception.NotFound(target=object_id)
else:
return self.NotFound(**{self.notfound_arg: object_id})
def _parse_extra_attrs(self, option_list):
mapping = {}
for item in option_list:
try:
ldap_attr, attr_map = item.split(':')
except Exception:
LOG.warn(_LW(
'Invalid additional attribute mapping: "%s". '
'Format must be <ldap_attribute>:<keystone_attribute>'),
item)
continue
mapping[ldap_attr] = attr_map
return mapping
def _is_dumb_member(self, member_dn):
"""Checks that member is a dumb member.
:param member_dn: DN of member to be checked.
"""
return (self.use_dumb_member
and is_dn_equal(member_dn, self.dumb_member))
def get_connection(self, user=None, password=None, end_user_auth=False):
use_pool = self.use_pool
pool_size = self.pool_size
pool_conn_lifetime = self.pool_conn_lifetime
if end_user_auth:
if not self.use_auth_pool:
use_pool = False
else:
pool_size = self.auth_pool_size
pool_conn_lifetime = self.auth_pool_conn_lifetime
conn = _get_connection(self.LDAP_URL, use_pool,
use_auth_pool=end_user_auth)
conn = KeystoneLDAPHandler(conn=conn)
conn.connect(self.LDAP_URL,
page_size=self.page_size,
alias_dereferencing=self.alias_dereferencing,
use_tls=self.use_tls,
tls_cacertfile=self.tls_cacertfile,
tls_cacertdir=self.tls_cacertdir,
tls_req_cert=self.tls_req_cert,
chase_referrals=self.chase_referrals,
debug_level=self.debug_level,
use_pool=use_pool,
pool_size=pool_size,
pool_retry_max=self.pool_retry_max,
pool_retry_delay=self.pool_retry_delay,
pool_conn_timeout=self.pool_conn_timeout,
pool_conn_lifetime=pool_conn_lifetime
)
if user is None:
user = self.LDAP_USER
if password is None:
password = self.LDAP_PASSWORD
# not all LDAP servers require authentication, so we don't bind
# if we don't have any user/pass
if user and password:
conn.simple_bind_s(user, password)
return conn
def _id_to_dn_string(self, object_id):
return u'%s=%s,%s' % (self.id_attr,
ldap.dn.escape_dn_chars(
six.text_type(object_id)),
self.tree_dn)
def _id_to_dn(self, object_id):
if self.LDAP_SCOPE == ldap.SCOPE_ONELEVEL:
return self._id_to_dn_string(object_id)
with self.get_connection() as conn:
search_result = conn.search_s(
self.tree_dn, self.LDAP_SCOPE,
u'(&(%(id_attr)s=%(id)s)(objectclass=%(objclass)s))' %
{'id_attr': self.id_attr,
'id': ldap.filter.escape_filter_chars(
six.text_type(object_id)),
'objclass': self.object_class},
attrlist=DN_ONLY)
if search_result:
dn, attrs = search_result[0]
return dn
else:
return self._id_to_dn_string(object_id)
@staticmethod
def _dn_to_id(dn):
return utf8_decode(ldap.dn.str2dn(utf8_encode(dn))[0][0][1])
def _ldap_res_to_model(self, res):
# LDAP attribute names may be returned in a different case than
# they are defined in the mapping, so we need to check for keys
# in a case-insensitive way. We use the case specified in the
# mapping for the model to ensure we have a predictable way of
# retrieving values later.
lower_res = {k.lower(): v for k, v in res[1].items()}
id_attrs = lower_res.get(self.id_attr.lower())
if not id_attrs:
message = _('ID attribute %(id_attr)s not found in LDAP '
'object %(dn)s') % ({'id_attr': self.id_attr,
'dn': res[0]})
raise exception.NotFound(message=message)
if len(id_attrs) > 1:
# FIXME(gyee): if this is a multi-value attribute and it has
# multiple values, we can't use it as ID. Retain the dn_to_id
# logic here so it does not potentially break existing
# deployments. We need to fix our read-write LDAP logic so
# it does not get the ID from DN.
message = _LW('ID attribute %(id_attr)s for LDAP object %(dn)s '
'has multiple values and therefore cannot be used '
'as an ID. Will get the ID from DN instead') % (
{'id_attr': self.id_attr,
'dn': res[0]})
LOG.warn(message)
id_val = self._dn_to_id(res[0])
else:
id_val = id_attrs[0]
obj = self.model(id=id_val)
for k in obj.known_keys:
if k in self.attribute_ignore:
continue
try:
map_attr = self.attribute_mapping.get(k, k)
if map_attr is None:
# Ignore attributes that are mapped to None.
continue
v = lower_res[map_attr.lower()]
except KeyError:
pass
else:
try:
obj[k] = v[0]
except IndexError:
obj[k] = None
return obj
def check_allow_create(self):
if not self.allow_create:
action = _('LDAP %s create') % self.options_name
raise exception.ForbiddenAction(action=action)
def check_allow_update(self):
if not self.allow_update:
action = _('LDAP %s update') % self.options_name
raise exception.ForbiddenAction(action=action)
def check_allow_delete(self):
if not self.allow_delete:
action = _('LDAP %s delete') % self.options_name
raise exception.ForbiddenAction(action=action)
def affirm_unique(self, values):
if values.get('name') is not None:
try:
self.get_by_name(values['name'])
except exception.NotFound:
pass
else:
raise exception.Conflict(type=self.options_name,
details=_('Duplicate name, %s.') %
values['name'])
if values.get('id') is not None:
try:
self.get(values['id'])
except exception.NotFound:
pass
else:
raise exception.Conflict(type=self.options_name,
details=_('Duplicate ID, %s.') %
values['id'])
def create(self, values):
self.affirm_unique(values)
object_classes = self.structural_classes + [self.object_class]
attrs = [('objectClass', object_classes)]
for k, v in values.items():
if k in self.attribute_ignore:
continue
if k == 'id':
# no need to check if v is None as 'id' will always have
# a value
attrs.append((self.id_attr, [v]))
elif v is not None:
attr_type = self.attribute_mapping.get(k, k)
if attr_type is not None:
attrs.append((attr_type, [v]))
extra_attrs = [attr for attr, name
in self.extra_attr_mapping.items()
if name == k]
for attr in extra_attrs:
attrs.append((attr, [v]))
if 'groupOfNames' in object_classes and self.use_dumb_member:
attrs.append(('member', [self.dumb_member]))
with self.get_connection() as conn:
conn.add_s(self._id_to_dn(values['id']), attrs)
return values
def _ldap_get(self, object_id, ldap_filter=None):
query = (u'(&(%(id_attr)s=%(id)s)'
u'%(filter)s'
u'(objectClass=%(object_class)s))'
% {'id_attr': self.id_attr,
'id': ldap.filter.escape_filter_chars(
six.text_type(object_id)),
'filter': (ldap_filter or self.ldap_filter or ''),
'object_class': self.object_class})
with self.get_connection() as conn:
try:
attrs = list(set(([self.id_attr] +
list(self.attribute_mapping.values()) +
list(self.extra_attr_mapping.keys()))))
res = conn.search_s(self.tree_dn,
self.LDAP_SCOPE,
query,
attrs)
except ldap.NO_SUCH_OBJECT:
return None
try:
return res[0]
except IndexError:
return None
def _ldap_get_all(self, ldap_filter=None):
query = u'(&%s(objectClass=%s))' % (ldap_filter or
self.ldap_filter or
'', self.object_class)
with self.get_connection() as conn:
try:
attrs = list(set(([self.id_attr] +
list(self.attribute_mapping.values()) +
list(self.extra_attr_mapping.keys()))))
return conn.search_s(self.tree_dn,
self.LDAP_SCOPE,
query,
attrs)
except ldap.NO_SUCH_OBJECT:
return []
def _ldap_get_list(self, search_base, scope, query_params=None,
attrlist=None):
query = u'(objectClass=%s)' % self.object_class
if query_params:
def calc_filter(attrname, value):
val_esc = ldap.filter.escape_filter_chars(value)
return '(%s=%s)' % (attrname, val_esc)
query = (u'(&%s%s)' %
(query, ''.join([calc_filter(k, v) for k, v in
query_params.items()])))
with self.get_connection() as conn:
return conn.search_s(search_base, scope, query, attrlist)
def get(self, object_id, ldap_filter=None):
res = self._ldap_get(object_id, ldap_filter)
if res is None:
raise self._not_found(object_id)
else:
return self._ldap_res_to_model(res)
def get_by_name(self, name, ldap_filter=None):
query = (u'(%s=%s)' % (self.attribute_mapping['name'],
ldap.filter.escape_filter_chars(
six.text_type(name))))
res = self.get_all(query)
try:
return res[0]
except IndexError:
raise self._not_found(name)
def get_all(self, ldap_filter=None):
return [self._ldap_res_to_model(x)
for x in self._ldap_get_all(ldap_filter)]
def update(self, object_id, values, old_obj=None):
if old_obj is None:
old_obj = self.get(object_id)
modlist = []
for k, v in values.items():
if k == 'id':
# id can't be modified.
continue
if k in self.attribute_ignore:
# Handle 'enabled' specially since can't disable if ignored.
if k == 'enabled' and (not v):
action = _("Disabling an entity where the 'enable' "
"attribute is ignored by configuration.")
raise exception.ForbiddenAction(action=action)
continue
# attribute value has not changed
if k in old_obj and old_obj[k] == v:
continue
if k in self.immutable_attrs:
msg = (_("Cannot change %(option_name)s %(attr)s") %
{'option_name': self.options_name, 'attr': k})
raise exception.ValidationError(msg)
if v is None:
if old_obj.get(k) is not None:
modlist.append((ldap.MOD_DELETE,
self.attribute_mapping.get(k, k),
None))
continue
current_value = old_obj.get(k)
if current_value is None:
op = ldap.MOD_ADD
modlist.append((op, self.attribute_mapping.get(k, k), [v]))
elif current_value != v:
op = ldap.MOD_REPLACE
modlist.append((op, self.attribute_mapping.get(k, k), [v]))
if modlist:
with self.get_connection() as conn:
try:
conn.modify_s(self._id_to_dn(object_id), modlist)
except ldap.NO_SUCH_OBJECT:
raise self._not_found(object_id)
return self.get(object_id)
def delete(self, object_id):
with self.get_connection() as conn:
try:
conn.delete_s(self._id_to_dn(object_id))
except ldap.NO_SUCH_OBJECT:
raise self._not_found(object_id)
def deleteTree(self, object_id):
tree_delete_control = ldap.controls.LDAPControl(CONTROL_TREEDELETE,
0,
None)
with self.get_connection() as conn:
try:
conn.delete_ext_s(self._id_to_dn(object_id),
serverctrls=[tree_delete_control])
except ldap.NO_SUCH_OBJECT:
raise self._not_found(object_id)
except ldap.NOT_ALLOWED_ON_NONLEAF:
# Most LDAP servers do not support the tree_delete_control.
# In these servers, the usual idiom is to first perform a
# search to get the entries to delete, then delete them in
# in order of child to parent, since LDAP forbids the
# deletion of a parent entry before deleting the children
# of that parent. The simplest way to do that is to delete
# the entries in order of the length of the DN, from longest
# to shortest DN.
dn = self._id_to_dn(object_id)
scope = ldap.SCOPE_SUBTREE
# With some directory servers, an entry with objectclass
# ldapsubentry will not be returned unless it is explicitly
# requested, by specifying the objectclass in the search
# filter. We must specify this, with objectclass=*, in an
# LDAP filter OR clause, in order to return all entries
filt = '(|(objectclass=*)(objectclass=ldapsubentry))'
# We only need the DNs of the entries. Since no attributes
# will be returned, we do not have to specify attrsonly=1.
entries = conn.search_s(dn, scope, filt, attrlist=DN_ONLY)
if entries:
for dn in sorted((e[0] for e in entries),
key=len, reverse=True):
conn.delete_s(dn)
else:
LOG.debug('No entries in LDAP subtree %s', dn)
def add_member(self, member_dn, member_list_dn):
"""Add member to the member list.
:param member_dn: DN of member to be added.
:param member_list_dn: DN of group to which the
member will be added.
:raises: exception.Conflict: If the user was already a member.
self.NotFound: If the group entry didn't exist.
"""
with self.get_connection() as conn:
try:
mod = (ldap.MOD_ADD, self.member_attribute, member_dn)
conn.modify_s(member_list_dn, [mod])
except ldap.TYPE_OR_VALUE_EXISTS:
raise exception.Conflict(_('Member %(member)s '
'is already a member'
' of group %(group)s') % {
'member': member_dn,
'group': member_list_dn})
except ldap.NO_SUCH_OBJECT:
raise self._not_found(member_list_dn)
def remove_member(self, member_dn, member_list_dn):
"""Remove member from the member list.
:param member_dn: DN of member to be removed.
:param member_list_dn: DN of group from which the
member will be removed.
:raises: self.NotFound: If the group entry didn't exist.
ldap.NO_SUCH_ATTRIBUTE: If the user wasn't a member.
"""
with self.get_connection() as conn:
try:
mod = (ldap.MOD_DELETE, self.member_attribute, member_dn)
conn.modify_s(member_list_dn, [mod])
except ldap.NO_SUCH_OBJECT:
raise self._not_found(member_list_dn)
def _delete_tree_nodes(self, search_base, scope, query_params=None):
query = u'(objectClass=%s)' % self.object_class
if query_params:
query = (u'(&%s%s)' %
(query, ''.join(['(%s=%s)'
% (k, ldap.filter.escape_filter_chars(v))
for k, v in
query_params.items()])))
not_deleted_nodes = []
with self.get_connection() as conn:
try:
nodes = conn.search_s(search_base, scope, query,
attrlist=DN_ONLY)
except ldap.NO_SUCH_OBJECT:
LOG.debug('Could not find entry with dn=%s', search_base)
raise self._not_found(self._dn_to_id(search_base))
else:
for node_dn, _t in nodes:
try:
conn.delete_s(node_dn)
except ldap.NO_SUCH_OBJECT:
not_deleted_nodes.append(node_dn)
if not_deleted_nodes:
LOG.warn(_LW("When deleting entries for %(search_base)s, could not"
" delete nonexistent entries %(entries)s%(dots)s"),
{'search_base': search_base,
'entries': not_deleted_nodes[:3],
'dots': '...' if len(not_deleted_nodes) > 3 else ''})
def filter_query(self, hints, query=None):
"""Applies filtering to a query.
:param hints: contains the list of filters, which may be None,
indicating that there are no filters to be applied.
If it's not None, then any filters satisfied here will be
removed so that the caller will know if any filters
remain to be applied.
:param query: LDAP query into which to include filters
:returns query: LDAP query, updated with any filters satisfied
"""
def build_filter(filter_, hints):
"""Build a filter for the query.
:param filter_: the dict that describes this filter
:param hints: contains the list of filters yet to be satisfied.
:returns query: LDAP query term to be added
"""
ldap_attr = self.attribute_mapping[filter_['name']]
val_esc = ldap.filter.escape_filter_chars(filter_['value'])
if filter_['case_sensitive']:
# NOTE(henry-nash): Although dependent on the schema being
# used, most LDAP attributes are configured with case
# insensitive matching rules, so we'll leave this to the
# controller to filter.
return
if filter_['name'] == 'enabled':
# NOTE(henry-nash): Due to the different options for storing
# the enabled attribute (e,g, emulated or not), for now we
# don't try and filter this at the driver level - we simply
# leave the filter to be handled by the controller. It seems
# unlikley that this will cause a signifcant performance
# issue.
return
# TODO(henry-nash): Currently there are no booleans (other than
# 'enabled' that is handled above) on which you can filter. If
# there were, we would need to add special handling here to
# convert the booleans values to 'TRUE' and 'FALSE'. To do that
# we would also need to know which filter keys were actually
# booleans (this is related to bug #1411478).
if filter_['comparator'] == 'equals':
query_term = (u'(%(attr)s=%(val)s)'
% {'attr': ldap_attr, 'val': val_esc})
elif filter_['comparator'] == 'contains':
query_term = (u'(%(attr)s=*%(val)s*)'
% {'attr': ldap_attr, 'val': val_esc})
elif filter_['comparator'] == 'startswith':
query_term = (u'(%(attr)s=%(val)s*)'
% {'attr': ldap_attr, 'val': val_esc})
elif filter_['comparator'] == 'endswith':
query_term = (u'(%(attr)s=*%(val)s)'
% {'attr': ldap_attr, 'val': val_esc})
else:
# It's a filter we don't understand, so let the caller
# work out if they need to do something with it.
return
return query_term
if query is None:
# make sure query is a string so the ldap filter is properly
# constructed from filter_list later
query = ''
if hints is None:
return query
filter_list = []
satisfied_filters = []
for filter_ in hints.filters:
if filter_['name'] not in self.attribute_mapping:
continue
new_filter = build_filter(filter_, hints)
if new_filter is not None:
filter_list.append(new_filter)
satisfied_filters.append(filter_)
if filter_list:
query = u'(&%s%s)' % (query, ''.join(filter_list))
# Remove satisfied filters, then the caller will know remaining filters
for filter_ in satisfied_filters:
hints.filters.remove(filter_)
return query
class EnabledEmuMixIn(BaseLdap):
"""Emulates boolean 'enabled' attribute if turned on.
Creates groupOfNames holding all enabled objects of this class, all missing
objects are considered disabled.
Options:
* $name_enabled_emulation - boolean, on/off
* $name_enabled_emulation_dn - DN of that groupOfNames, default is
cn=enabled_${name}s,${tree_dn}
Where ${name}s is the plural of self.options_name ('users' or 'tenants'),
${tree_dn} is self.tree_dn.
"""
def __init__(self, conf):
super(EnabledEmuMixIn, self).__init__(conf)
enabled_emulation = '%s_enabled_emulation' % self.options_name
self.enabled_emulation = getattr(conf.ldap, enabled_emulation)
enabled_emulation_dn = '%s_enabled_emulation_dn' % self.options_name
self.enabled_emulation_dn = getattr(conf.ldap, enabled_emulation_dn)
if not self.enabled_emulation_dn:
naming_attr_name = 'cn'
naming_attr_value = 'enabled_%ss' % self.options_name
sub_vals = (naming_attr_name, naming_attr_value, self.tree_dn)
self.enabled_emulation_dn = '%s=%s,%s' % sub_vals
naming_attr = (naming_attr_name, [naming_attr_value])
else:
# Extract the attribute name and value from the configured DN.
naming_dn = ldap.dn.str2dn(utf8_encode(self.enabled_emulation_dn))
naming_rdn = naming_dn[0][0]
naming_attr = (utf8_decode(naming_rdn[0]),
utf8_decode(naming_rdn[1]))
self.enabled_emulation_naming_attr = naming_attr
def _get_enabled(self, object_id, conn):
dn = self._id_to_dn(object_id)
query = '(member=%s)' % dn
try:
enabled_value = conn.search_s(self.enabled_emulation_dn,
ldap.SCOPE_BASE,
query, ['cn'])
except ldap.NO_SUCH_OBJECT:
return False
else:
return bool(enabled_value)
def _add_enabled(self, object_id):
with self.get_connection() as conn:
if not self._get_enabled(object_id, conn):
modlist = [(ldap.MOD_ADD,
'member',
[self._id_to_dn(object_id)])]
try:
conn.modify_s(self.enabled_emulation_dn, modlist)
except ldap.NO_SUCH_OBJECT:
attr_list = [('objectClass', ['groupOfNames']),
('member', [self._id_to_dn(object_id)]),
self.enabled_emulation_naming_attr]
if self.use_dumb_member:
attr_list[1][1].append(self.dumb_member)
conn.add_s(self.enabled_emulation_dn, attr_list)
def _remove_enabled(self, object_id):
modlist = [(ldap.MOD_DELETE,
'member',
[self._id_to_dn(object_id)])]
with self.get_connection() as conn:
try:
conn.modify_s(self.enabled_emulation_dn, modlist)
except (ldap.NO_SUCH_OBJECT, ldap.NO_SUCH_ATTRIBUTE):
pass
def create(self, values):
if self.enabled_emulation:
enabled_value = values.pop('enabled', True)
ref = super(EnabledEmuMixIn, self).create(values)
if 'enabled' not in self.attribute_ignore:
if enabled_value:
self._add_enabled(ref['id'])
ref['enabled'] = enabled_value
return ref
else:
return super(EnabledEmuMixIn, self).create(values)
def get(self, object_id, ldap_filter=None):
with self.get_connection() as conn:
ref = super(EnabledEmuMixIn, self).get(object_id, ldap_filter)
if ('enabled' not in self.attribute_ignore and
self.enabled_emulation):
ref['enabled'] = self._get_enabled(object_id, conn)
return ref
def get_all(self, ldap_filter=None):
if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
# had to copy BaseLdap.get_all here to ldap_filter by DN
tenant_list = [self._ldap_res_to_model(x)
for x in self._ldap_get_all(ldap_filter)
if x[0] != self.enabled_emulation_dn]
with self.get_connection() as conn:
for tenant_ref in tenant_list:
tenant_ref['enabled'] = self._get_enabled(
tenant_ref['id'], conn)
return tenant_list
else:
return super(EnabledEmuMixIn, self).get_all(ldap_filter)
def update(self, object_id, values, old_obj=None):
if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
data = values.copy()
enabled_value = data.pop('enabled', None)
ref = super(EnabledEmuMixIn, self).update(object_id, data, old_obj)
if enabled_value is not None:
if enabled_value:
self._add_enabled(object_id)
else:
self._remove_enabled(object_id)
ref['enabled'] = enabled_value
return ref
else:
return super(EnabledEmuMixIn, self).update(
object_id, values, old_obj)
def delete(self, object_id):
if self.enabled_emulation:
self._remove_enabled(object_id)
super(EnabledEmuMixIn, self).delete(object_id)
class ProjectLdapStructureMixin(object):
"""Project LDAP Structure shared between LDAP backends.
This is shared between the resource and assignment LDAP backends.
"""
DEFAULT_OU = 'ou=Groups'
DEFAULT_STRUCTURAL_CLASSES = []
DEFAULT_OBJECTCLASS = 'groupOfNames'
DEFAULT_ID_ATTR = 'cn'
NotFound = exception.ProjectNotFound
notfound_arg = 'project_id' # NOTE(yorik-sar): while options_name = tenant
options_name = 'project'
attribute_options_names = {'name': 'name',
'description': 'desc',
'enabled': 'enabled',
'domain_id': 'domain_id'}
immutable_attrs = ['name']
| apache-2.0 | 6,774,204,902,072,156,000 | 38.926969 | 79 | 0.568709 | false |
olivetree123/redash-x | redash/handlers/base.py | 1 | 1202 | from flask_restful import Resource, abort
from flask_login import current_user, login_required
from peewee import DoesNotExist
from redash.authentication.org_resolving import current_org
from redash.tasks import record_event
class BaseResource(Resource):
decorators = [login_required]
def __init__(self, *args, **kwargs):
super(BaseResource, self).__init__(*args, **kwargs)
self._user = None
def dispatch_request(self, *args, **kwargs):
kwargs.pop('org_slug', None)
return super(BaseResource, self).dispatch_request(*args, **kwargs)
@property
def current_user(self):
return current_user._get_current_object()
@property
def current_org(self):
return current_org._get_current_object()
def record_event(self, options):
options.update({
'user_id': self.current_user.id,
'org_id': self.current_org.id
})
record_event.delay(options)
def require_fields(req, fields):
for f in fields:
if f not in req:
abort(400)
def get_object_or_404(fn, *args, **kwargs):
try:
return fn(*args, **kwargs)
except DoesNotExist:
abort(404)
| bsd-2-clause | -1,565,178,672,739,305,200 | 24.574468 | 74 | 0.635607 | false |
mfiers/Moa | moa/plugin/job/openLavaActor.py | 1 | 9361 | # Copyright 2009-2011 Mark Fiers
# The New Zealand Institute for Plant & Food Research
#
# This file is part of Moa - http://github.com/mfiers/Moa
#
# Licensed under the GPL license (see 'COPYING')
#
"""
**sgeActor** - Run jobs through SGE
-----------------------------------------------------------
"""
import os
import stat
import subprocess as sp
import sys
import tempfile
import jinja2
import moa.logger
import moa.ui
from moa.sysConf import sysConf
l = moa.logger.getLogger(__name__)
#l.setLevel(moa.logger.DEBUG)
def hook_defineCommandOptions(job, parser):
parser.add_argument('--ol', action='store_const', const='openlava',
dest='actorId', help='Use OpenLava as actor')
parser.add_argument('--olq', default='normal', dest='openlavaQueue',
help='The Openlava queue to submit this job to')
parser.add_argument('--olx', default='', dest='openlavaExtra',
help='Extra arguments for bsub')
parser.add_argument('--oln', default=1, type=int, dest='openlavaProcs',
help='The number of processors the jobs requires')
parser.add_argument('--oldummy', default=False, dest='openlavaDummy',
action='store_true',
help='Do not execute - just create a script to run')
parser.add_argument('--olm', default="", dest='openlavaHost',
help='The host to use for openlava')
def _writeOlTmpFile(wd, _script):
#save the file
tmpdir = os.path.join(wd, '.moa', 'tmp')
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
tf = tempfile.NamedTemporaryFile(dir=tmpdir, prefix='openlava.',
delete=False, suffix='.sh')
if isinstance(_script, list):
tf.write("\n".join(_script))
else:
tf.write(str(_script))
tf.close()
os.chmod(tf.name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
return tf.name
@moa.actor.async
def openlavaRunner(wd, cl, conf={}, **kwargs):
"""
Run the job using OPENLAVA
what does this function do?
- put env in the environment
- Execute the commandline (in cl)
- store stdout & stderr in log files
- return the rc
"""
#see if we can get a command
command = kwargs.get('command', 'unknown')
if command == 'unknown':
l.critical("runner should be called with a command")
sys.exit(-1)
l.debug("starting openlava actor for %s" % command)
# this is a trick to get the real path of the log dir - but not of
# any underlying directory - in case paths are mounted differently
# on different hosts
outDir = os.path.abspath(os.path.join(wd, '.moa', 'log.latest'))
outDir = outDir.rsplit('.moa', 1)[0] + '.moa' + \
os.path.realpath(outDir).rsplit('.moa', 1)[1]
sysConf.job.data.openlava.outDir = outDir
if not os.path.exists(outDir):
try:
os.makedirs(outDir)
except OSError:
pass
#expect the cl to be nothing more than a single script to execute
outfile = os.path.join(outDir, 'stdout')
errfile = os.path.join(outDir, 'stderr')
sysConf.job.data.openlava.outfile = outfile
sysConf.job.data.openlava.errfile = errfile
bsub_cl = ['bsub']
sc = []
def s(*cl):
sc.append(" ".join(map(str, cl)))
s("#!/bin/bash")
s("#BSUB -o %s" % outfile)
s("#BSUB -e %s" % errfile)
s("#BSUB -q %s" % sysConf.args.openlavaQueue)
if '--oln' in sys.argv:
procs = sysConf.args.openlavaProcs
else:
procs = sysConf.job.conf.get('threads', sysConf.args.openlavaProcs)
s("#BSUB -C %d" % procs)
if sysConf.args.openlavaExtra.strip():
s("#BSUB %s" % sysConf.args.openlavaExtra)
if '--olm' in sys.argv:
s("#BSUB -m %s" % sysConf.args.openlavaHost)
#bsub_cl.extend(["-m", sysConf.args.openlavaHost])
if command == 'run':
prep_jids = sysConf.job.data.openlava.jids.get('prepare', [])
#hold until the 'prepare' jobs are done
#l.critical("Prepare jids - wait for these! %s" % prep_jids)
for j in prep_jids:
s("#BSUB -w 'done(%d)'" % j)
#bsub_cl.extend(["-w", "'done(%d)'" % j])
elif command == 'finish':
run_jids = sysConf.job.data.openlava.jids.get('run', [])
#hold until the 'prepare' jobs are done
for j in run_jids:
s("#BSUB -w 'done(%d)'" % j)
#bsub_cl.extend(["-w", "'done(%d)'" % j])
#give it a reasonable name
jobname = ("%s_%s" % (wd.split('/')[-1], command[0]))
bsub_cl.extend(['-J', jobname])
s("#BSUB -J '%s'" % jobname)
#dump the configuration in the environment
s("")
s("## ensure we're in the correct directory")
s("cd", wd)
s("")
s("## Defining moa specific environment variables")
s("")
confkeys = sorted(conf.keys())
for k in confkeys:
# to prevent collusion, prepend all env variables
# with 'moa_'
if k[0] == '_' or k[:3] == 'moa':
outk = k
else:
outk = 'moa_' + k
v = conf[k]
#this should not happen:
if ' ' in outk:
continue
if isinstance(v, list):
s("%s='%s'" % (outk, " ".join(v)))
elif isinstance(v, dict):
continue
else:
s("%s='%s'" % (outk, v))
s("")
s("## Run the command")
s("")
s(*cl)
if sysConf.args.openlavaDummy:
# Dummy mode - do not execute - just write the script.
ii = 0
while True:
outFile = os.path.join(wd, 'openlava.%s.%d.bash' % (command, ii))
if not os.path.exists(outFile):
break
ii += 1
with open(outFile, 'w') as F:
F.write("\n".join(sc))
moa.ui.message("Created openlava submit script: %s" %
outFile.rsplit('/', 1)[1])
moa.ui.message("now run:")
moa.ui.message(" %s < %s" % ((" ".join(map(str, bsub_cl))),
outFile.rsplit('/', 1)[1]))
return 0
tmpfile = _writeOlTmpFile(wd, sc)
moa.ui.message("Running %s:" % " ".join(map(str, bsub_cl)))
moa.ui.message("(copy of) the bsub script: %s" % tmpfile)
p = sp.Popen(map(str, bsub_cl), cwd=wd, stdout=sp.PIPE, stdin=sp.PIPE)
o, e = p.communicate("\n".join(sc))
jid = int(o.split("<")[1].split(">")[0])
moa.ui.message("Submitted a job to openlava with id %d" % jid)
if not sysConf.job.data.openlava.jids.get(command):
sysConf.job.data.openlava.jids[command] = []
#moa.ui.message("submitted job with openlava job id %s " % jid)
#store the job id submitted
if not sysConf.job.data.openlava.jids.get(command):
sysConf.job.data.openlava.jids[command] = []
if not sysConf.job.data.openlava.get('alljids'):
sysConf.job.data.openlava.alljids = []
sysConf.job.data.openlava.jids[command].append(jid)
sysConf.job.data.openlava.alljids.append(jid)
l.debug("jids stored %s" % str(sysConf.job.data.openlava.jids))
return p.returncode
OnSuccessScript = """#!/bin/bash
#BSUB -o {{ job.data.openlava.outfile }}
#BSUB -w {{ job.data.openlava.errfile }}
#BSUB -q {{ args.openlavaQueue }}
#BSUB -J "{{ job.data.openlava.uid }}_Ok"
{% if args.openlavaHost -%}
#BSUB -m {{ args.openlavaHost }}
{%- endif %}
#BSUB -w '({%- for j in job.data.openlava.alljids -%}
{%- if loop.index0 > 0 %}&&{% endif -%}
done({{j}})
{%- endfor -%})'
cd {{ job.wd }}
echo "Openlava OnSuccess Start"
echo "Killing the OnError job"
bkill -J "{{ job.data.openlava.uid }}_Err"
moasetstatus success
"""
OnErrorScript = """#!/bin/bash
## only run this job if there is a single job
#BSUB -o {{ job.data.openlava.outfile }}
#BSUB -w {{ job.data.openlava.errfile }}
#BSUB -q {{ args.openlavaQueue }}
#BSUB -J "{{ job.data.openlava.uid }}_Err"
{% if args.openlavaHost -%}
#BSUB -m {{ args.openlavaHost }}
{%- endif %}
#BSUB -w '({%- for j in job.data.openlava.alljids -%}
{%- if loop.index0 > 0 %}||{% endif -%}
exit({{j}},!=0)
{%- endfor -%}
)'
cd {{ job.wd }}
echo "Openlava OnError Start"
echo "Killing the all other jobs"
#killing all jobs
{% for j in job.data.openlava.alljids %}
bkill -s 9 {{ j }}
{% endfor %}
bkill -J "{{ job.data.openlava.uid }}_Ok"
moasetstatus error
"""
def hook_async_exit(job):
"""
Need to exit here, and reconvene once all jobs have executed
"""
#make sure that this is the correct actor
actor = moa.actor.getActor()
if actor.__name__ != 'openlavaRunner':
return
jidlist = sysConf.job.data.openlava.get('alljids', [])
if len(jidlist) == 0:
return
uid = "%s.%s" % (job.wd.split('/')[-1],max(jidlist))
sysConf.job.data.openlava.uid = uid
onsuccess = jinja2.Template(OnSuccessScript).render(sysConf)
onerror = jinja2.Template(OnErrorScript).render(sysConf)
with open('succ', 'w') as F:
F.write(onsuccess)
with open('onerr', 'w') as F:
F.write(onerror)
P = sp.Popen('bsub', stdin=sp.PIPE)
P.communicate(onsuccess)
P = sp.Popen('bsub', stdin=sp.PIPE)
P.communicate(onerror)
#register this actor globally
sysConf.actor.actors['openlava'] = openlavaRunner
sysConf.actor.openlava.jids = []
| gpl-3.0 | -2,962,970,957,543,276,500 | 28.253125 | 77 | 0.577075 | false |
stefanseefeld/numba | numba/tests/test_complex.py | 1 | 11311 | from __future__ import print_function, absolute_import, division
import cmath
import itertools
import math
import sys
from numba import unittest_support as unittest
from numba.compiler import compile_isolated, Flags, utils
from numba import types
from .support import TestCase, tag
from .complex_usecases import *
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
no_pyobj_flags = Flags()
class BaseComplexTest(object):
def basic_values(self):
reals = [-0.0, +0.0, 1, -1, +1.5, -3.5,
float('-inf'), float('+inf')]
if sys.platform != 'win32':
reals += [float('nan')]
return [complex(x, y) for x, y in itertools.product(reals, reals)]
def more_values(self):
reals = [-0.0, +0.0, 1, -1, -math.pi, +math.pi,
float('-inf'), float('+inf')]
if sys.platform != 'win32':
reals += [float('nan')]
return [complex(x, y) for x, y in itertools.product(reals, reals)]
def non_nan_values(self):
reals = [-0.0, +0.0, 1, -1, -math.pi, +math.pi,
float('inf'), float('-inf')]
return [complex(x, y) for x, y in itertools.product(reals, reals)]
def run_unary(self, pyfunc, x_types, x_values, ulps=1,
flags=enable_pyobj_flags):
for tx in x_types:
cr = compile_isolated(pyfunc, [tx], flags=flags)
cfunc = cr.entry_point
prec = 'single' if tx in (types.float32, types.complex64) else 'double'
for vx in x_values:
try:
expected = pyfunc(vx)
except ValueError as e:
self.assertIn("math domain error", str(e))
continue
got = cfunc(vx)
msg = 'for input %r with prec %r' % (vx, prec)
self.assertPreciseEqual(got, expected, prec=prec,
ulps=ulps, msg=msg)
def run_binary(self, pyfunc, value_types, values, ulps=1,
flags=enable_pyobj_flags):
for tx, ty in value_types:
cr = compile_isolated(pyfunc, [tx, ty], flags=flags)
cfunc = cr.entry_point
prec = ('single'
if set([tx, ty]) & set([types.float32, types.complex64])
else 'double')
for vx, vy in values:
try:
expected = pyfunc(vx, vy)
except ValueError as e:
self.assertIn("math domain error", str(e))
continue
except ZeroDivisionError:
continue
got = cfunc(vx, vy)
msg = 'for input %r with prec %r' % ((vx, vy), prec)
self.assertPreciseEqual(got, expected, prec=prec,
ulps=ulps, msg=msg)
class TestComplex(BaseComplexTest, TestCase):
def test_real(self, flags=enable_pyobj_flags):
self.run_unary(real_usecase, [types.complex64, types.complex128],
self.basic_values(), flags=flags)
self.run_unary(real_usecase, [types.int8, types.int64],
[1, 0, -3], flags=flags)
self.run_unary(real_usecase, [types.float32, types.float64],
[1.5, -0.5], flags=flags)
def test_real_npm(self):
self.test_real(flags=no_pyobj_flags)
def test_imag(self, flags=enable_pyobj_flags):
self.run_unary(imag_usecase, [types.complex64, types.complex128],
self.basic_values(), flags=flags)
self.run_unary(imag_usecase, [types.int8, types.int64],
[1, 0, -3], flags=flags)
self.run_unary(imag_usecase, [types.float32, types.float64],
[1.5, -0.5], flags=flags)
def test_imag_npm(self):
self.test_imag(flags=no_pyobj_flags)
def test_conjugate(self, flags=enable_pyobj_flags):
self.run_unary(conjugate_usecase, [types.complex64, types.complex128],
self.basic_values(), flags=flags)
self.run_unary(conjugate_usecase, [types.int8, types.int64],
[1, 0, -3], flags=flags)
self.run_unary(conjugate_usecase, [types.float32, types.float64],
[1.5, -0.5], flags=flags)
def test_conjugate_npm(self):
self.test_conjugate(flags=no_pyobj_flags)
def test_div(self, flags=enable_pyobj_flags):
"""
Test complex.__div__ implementation with non-trivial values.
"""
# XXX Fold into test_operator?
values = list(itertools.product(self.more_values(), self.more_values()))
value_types = [(types.complex128, types.complex128),
(types.complex64, types.complex64)]
self.run_binary(div_usecase, value_types, values, flags=flags)
@tag('important')
def test_div_npm(self):
self.test_div(flags=no_pyobj_flags)
class TestCMath(BaseComplexTest, TestCase):
"""
Tests for cmath module support.
"""
def check_predicate_func(self, pyfunc, flags):
self.run_unary(pyfunc, [types.complex128, types.complex64],
self.basic_values(), flags=flags)
def check_unary_func(self, pyfunc, flags, ulps=1, values=None):
self.run_unary(pyfunc, [types.complex128],
values or self.more_values(), flags=flags, ulps=ulps)
# Avoid discontinuities around pi when in single precision.
self.run_unary(pyfunc, [types.complex64],
values or self.basic_values(), flags=flags, ulps=ulps)
# Conversions
def test_phase(self):
self.check_unary_func(phase_usecase, enable_pyobj_flags)
def test_phase_npm(self):
self.check_unary_func(phase_usecase, no_pyobj_flags)
def test_polar(self):
self.check_unary_func(polar_usecase, enable_pyobj_flags)
def test_polar_npm(self):
self.check_unary_func(polar_usecase, no_pyobj_flags)
def test_rect(self, flags=enable_pyobj_flags):
def do_test(tp, seed_values):
values = [(z.real, z.imag) for z in seed_values
if not math.isinf(z.imag) or z.real == 0]
self.run_binary(rect_usecase, [(tp, tp)], values, flags=flags)
do_test(types.float64, self.more_values())
# Avoid discontinuities around pi when in single precision.
do_test(types.float32, self.basic_values())
def test_rect_npm(self):
self.test_rect(flags=no_pyobj_flags)
# Classification
def test_isnan(self, flags=enable_pyobj_flags):
self.check_predicate_func(isnan_usecase, enable_pyobj_flags)
@tag('important')
def test_isnan_npm(self):
self.check_predicate_func(isnan_usecase, no_pyobj_flags)
def test_isinf(self, flags=enable_pyobj_flags):
self.check_predicate_func(isinf_usecase, enable_pyobj_flags)
@tag('important')
def test_isinf_npm(self):
self.check_predicate_func(isinf_usecase, no_pyobj_flags)
@unittest.skipIf(utils.PYVERSION < (3, 2), "needs Python 3.2+")
def test_isfinite(self, flags=enable_pyobj_flags):
self.check_predicate_func(isfinite_usecase, enable_pyobj_flags)
@unittest.skipIf(utils.PYVERSION < (3, 2), "needs Python 3.2+")
def test_isfinite_npm(self):
self.check_predicate_func(isfinite_usecase, no_pyobj_flags)
# Power and logarithms
def test_exp(self):
self.check_unary_func(exp_usecase, enable_pyobj_flags, ulps=2)
def test_exp_npm(self):
# Aggressive optimization fixes the following subnormal float problem.
## The two tests are failing due to subnormal float problems.
## We are seeing (6.9532198665326e-310+2.1221202807e-314j) != 0j
self.check_unary_func(exp_usecase, no_pyobj_flags, ulps=2)
def test_log(self):
self.check_unary_func(log_usecase, enable_pyobj_flags)
def test_log_npm(self):
self.check_unary_func(log_usecase, no_pyobj_flags)
def test_log_base(self, flags=enable_pyobj_flags):
values = list(itertools.product(self.more_values(), self.more_values()))
value_types = [(types.complex128, types.complex128),
(types.complex64, types.complex64)]
self.run_binary(log_base_usecase, value_types, values, flags=flags,
ulps=3)
def test_log_base_npm(self):
self.test_log_base(flags=no_pyobj_flags)
def test_log10(self):
self.check_unary_func(log10_usecase, enable_pyobj_flags)
def test_log10_npm(self):
self.check_unary_func(log10_usecase, no_pyobj_flags)
def test_sqrt(self):
self.check_unary_func(sqrt_usecase, enable_pyobj_flags)
def test_sqrt_npm(self):
self.check_unary_func(sqrt_usecase, no_pyobj_flags)
# Trigonometric functions
def test_acos(self):
self.check_unary_func(acos_usecase, enable_pyobj_flags, ulps=2)
def test_acos_npm(self):
self.check_unary_func(acos_usecase, no_pyobj_flags, ulps=2)
def test_asin(self):
self.check_unary_func(asin_usecase, enable_pyobj_flags, ulps=2)
def test_asin_npm(self):
self.check_unary_func(asin_usecase, no_pyobj_flags, ulps=2)
def test_atan(self):
self.check_unary_func(atan_usecase, enable_pyobj_flags, ulps=2,)
def test_atan_npm(self):
self.check_unary_func(atan_usecase, no_pyobj_flags, ulps=2,)
def test_cos(self):
self.check_unary_func(cos_usecase, enable_pyobj_flags, ulps=2)
def test_cos_npm(self):
self.check_unary_func(cos_usecase, no_pyobj_flags, ulps=2)
def test_sin(self):
# See test_sinh.
self.check_unary_func(sin_usecase, enable_pyobj_flags)
def test_sin_npm(self):
self.check_unary_func(sin_usecase, no_pyobj_flags)
def test_tan(self):
self.check_unary_func(tan_usecase, enable_pyobj_flags, ulps=2)
def test_tan_npm(self):
self.check_unary_func(tan_usecase, enable_pyobj_flags, ulps=2)
# Hyperbolic functions
def test_acosh(self):
self.check_unary_func(acosh_usecase, enable_pyobj_flags)
def test_acosh_npm(self):
self.check_unary_func(acosh_usecase, no_pyobj_flags)
def test_asinh(self):
self.check_unary_func(asinh_usecase, enable_pyobj_flags, ulps=2)
def test_asinh_npm(self):
self.check_unary_func(asinh_usecase, no_pyobj_flags, ulps=2)
def test_atanh(self):
self.check_unary_func(atanh_usecase, enable_pyobj_flags, ulps=2)
def test_atanh_npm(self):
self.check_unary_func(atanh_usecase, no_pyobj_flags, ulps=2)
def test_cosh(self):
self.check_unary_func(cosh_usecase, enable_pyobj_flags, ulps=2)
def test_cosh_npm(self):
self.check_unary_func(cosh_usecase, no_pyobj_flags, ulps=2)
def test_sinh(self):
self.check_unary_func(sinh_usecase, enable_pyobj_flags)
def test_sinh_npm(self):
self.check_unary_func(sinh_usecase, no_pyobj_flags)
def test_tanh(self):
self.check_unary_func(tanh_usecase, enable_pyobj_flags, ulps=2)
def test_tanh_npm(self):
self.check_unary_func(tanh_usecase, enable_pyobj_flags, ulps=2)
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | -6,641,714,589,291,190,000 | 35.022293 | 83 | 0.604721 | false |
hackerbot/DjangoDev | django/forms/models.py | 2 | 55275 | """
Helper functions for creating Form classes from Django models
and database field objects.
"""
from __future__ import unicode_literals
from collections import OrderedDict
from itertools import chain
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldError, ImproperlyConfigured, ValidationError,
)
from django.forms.fields import ChoiceField, Field
from django.forms.forms import BaseForm, DeclarativeFieldsMetaclass
from django.forms.formsets import BaseFormSet, formset_factory
from django.forms.utils import ErrorList
from django.forms.widgets import (
HiddenInput, MultipleHiddenInput, SelectMultiple,
)
from django.utils import six
from django.utils.encoding import force_text, smart_text
from django.utils.text import capfirst, get_text_list
from django.utils.translation import ugettext, ugettext_lazy as _
__all__ = (
'ModelForm', 'BaseModelForm', 'model_to_dict', 'fields_for_model',
'save_instance', 'ModelChoiceField', 'ModelMultipleChoiceField',
'ALL_FIELDS', 'BaseModelFormSet', 'modelformset_factory',
'BaseInlineFormSet', 'inlineformset_factory', 'modelform_factory',
)
ALL_FIELDS = '__all__'
def construct_instance(form, instance, fields=None, exclude=None):
"""
Constructs and returns a model instance from the bound ``form``'s
``cleaned_data``, but does not save the returned instance to the
database.
"""
from django.db import models
opts = instance._meta
cleaned_data = form.cleaned_data
file_field_list = []
for f in opts.fields:
if not f.editable or isinstance(f, models.AutoField) \
or f.name not in cleaned_data:
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
# Defer saving file-type fields until after the other fields, so a
# callable upload_to can use the values from other fields.
if isinstance(f, models.FileField):
file_field_list.append(f)
else:
f.save_form_data(instance, cleaned_data[f.name])
for f in file_field_list:
f.save_form_data(instance, cleaned_data[f.name])
return instance
def save_instance(form, instance, fields=None, fail_message='saved',
commit=True, exclude=None, construct=True):
"""
Saves bound Form ``form``'s cleaned_data into model instance ``instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
If construct=False, assume ``instance`` has already been constructed and
just needs to be saved.
"""
if construct:
instance = construct_instance(form, instance, fields, exclude)
opts = instance._meta
if form.errors:
raise ValueError("The %s could not be %s because the data didn't"
" validate." % (opts.object_name, fail_message))
# Wrap up the saving of m2m data as a function.
def save_m2m():
cleaned_data = form.cleaned_data
# Note that for historical reasons we want to include also
# virtual_fields here. (GenericRelation was previously a fake
# m2m field).
for f in chain(opts.many_to_many, opts.virtual_fields):
if not hasattr(f, 'save_form_data'):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if f.name in cleaned_data:
f.save_form_data(instance, cleaned_data[f.name])
if commit:
# If we are committing, save the instance and the m2m data immediately.
instance.save()
save_m2m()
else:
# We're not committing. Add a method to the form to allow deferred
# saving of m2m data.
form.save_m2m = save_m2m
return instance
# ModelForms #################################################################
def model_to_dict(instance, fields=None, exclude=None):
"""
Returns a dict containing the data in ``instance`` suitable for passing as
a Form's ``initial`` keyword argument.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned dict.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned dict, even if they are listed in
the ``fields`` argument.
"""
# avoid a circular import
from django.db.models.fields.related import ManyToManyField
opts = instance._meta
data = {}
for f in chain(opts.concrete_fields, opts.virtual_fields, opts.many_to_many):
if not getattr(f, 'editable', False):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if isinstance(f, ManyToManyField):
# If the object doesn't have a primary key yet, just use an empty
# list for its m2m fields. Calling f.value_from_object will raise
# an exception.
if instance.pk is None:
data[f.name] = []
else:
# MultipleChoiceWidget needs a list of pks, not object instances.
qs = f.value_from_object(instance)
if qs._result_cache is not None:
data[f.name] = [item.pk for item in qs]
else:
data[f.name] = list(qs.values_list('pk', flat=True))
else:
data[f.name] = f.value_from_object(instance)
return data
def fields_for_model(model, fields=None, exclude=None, widgets=None,
formfield_callback=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
field_classes=None):
"""
Returns a ``OrderedDict`` containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
``localized_fields`` is a list of names of fields which should be localized.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``field_classes`` is a dictionary of model field names mapped to a form
field class.
"""
field_list = []
ignored = []
opts = model._meta
# Avoid circular import
from django.db.models.fields import Field as ModelField
sortable_virtual_fields = [f for f in opts.virtual_fields
if isinstance(f, ModelField)]
for f in sorted(chain(opts.concrete_fields, sortable_virtual_fields, opts.many_to_many)):
if not getattr(f, 'editable', False):
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
kwargs = {}
if widgets and f.name in widgets:
kwargs['widget'] = widgets[f.name]
if localized_fields == ALL_FIELDS or (localized_fields and f.name in localized_fields):
kwargs['localize'] = True
if labels and f.name in labels:
kwargs['label'] = labels[f.name]
if help_texts and f.name in help_texts:
kwargs['help_text'] = help_texts[f.name]
if error_messages and f.name in error_messages:
kwargs['error_messages'] = error_messages[f.name]
if field_classes and f.name in field_classes:
kwargs['form_class'] = field_classes[f.name]
if formfield_callback is None:
formfield = f.formfield(**kwargs)
elif not callable(formfield_callback):
raise TypeError('formfield_callback must be a function or callable')
else:
formfield = formfield_callback(f, **kwargs)
if formfield:
field_list.append((f.name, formfield))
else:
ignored.append(f.name)
field_dict = OrderedDict(field_list)
if fields:
field_dict = OrderedDict(
[(f, field_dict.get(f)) for f in fields
if ((not exclude) or (exclude and f not in exclude)) and (f not in ignored)]
)
return field_dict
class ModelFormOptions(object):
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.widgets = getattr(options, 'widgets', None)
self.localized_fields = getattr(options, 'localized_fields', None)
self.labels = getattr(options, 'labels', None)
self.help_texts = getattr(options, 'help_texts', None)
self.error_messages = getattr(options, 'error_messages', None)
self.field_classes = getattr(options, 'field_classes', None)
class ModelFormMetaclass(DeclarativeFieldsMetaclass):
def __new__(mcs, name, bases, attrs):
formfield_callback = attrs.pop('formfield_callback', None)
new_class = super(ModelFormMetaclass, mcs).__new__(mcs, name, bases, attrs)
if bases == (BaseModelForm,):
return new_class
opts = new_class._meta = ModelFormOptions(getattr(new_class, 'Meta', None))
# We check if a string was passed to `fields` or `exclude`,
# which is likely to be a mistake where the user typed ('foo') instead
# of ('foo',)
for opt in ['fields', 'exclude', 'localized_fields']:
value = getattr(opts, opt)
if isinstance(value, six.string_types) and value != ALL_FIELDS:
msg = ("%(model)s.Meta.%(opt)s cannot be a string. "
"Did you mean to type: ('%(value)s',)?" % {
'model': new_class.__name__,
'opt': opt,
'value': value,
})
raise TypeError(msg)
if opts.model:
# If a model is defined, extract form fields from it.
if opts.fields is None and opts.exclude is None:
raise ImproperlyConfigured(
"Creating a ModelForm without either the 'fields' attribute "
"or the 'exclude' attribute is prohibited; form %s "
"needs updating." % name
)
if opts.fields == ALL_FIELDS:
# Sentinel for fields_for_model to indicate "get the list of
# fields from the model"
opts.fields = None
fields = fields_for_model(opts.model, opts.fields, opts.exclude,
opts.widgets, formfield_callback,
opts.localized_fields, opts.labels,
opts.help_texts, opts.error_messages,
opts.field_classes)
# make sure opts.fields doesn't specify an invalid field
none_model_fields = [k for k, v in six.iteritems(fields) if not v]
missing_fields = (set(none_model_fields) -
set(new_class.declared_fields.keys()))
if missing_fields:
message = 'Unknown field(s) (%s) specified for %s'
message = message % (', '.join(missing_fields),
opts.model.__name__)
raise FieldError(message)
# Override default model fields with any custom declared ones
# (plus, include all the other declared fields).
fields.update(new_class.declared_fields)
else:
fields = new_class.declared_fields
new_class.base_fields = fields
return new_class
class BaseModelForm(BaseForm):
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=None,
empty_permitted=False, instance=None):
opts = self._meta
if opts.model is None:
raise ValueError('ModelForm has no model class specified.')
if instance is None:
# if we didn't get an instance, instantiate a new one
self.instance = opts.model()
object_data = {}
else:
self.instance = instance
object_data = model_to_dict(instance, opts.fields, opts.exclude)
# if initial was provided, it should override the values from instance
if initial is not None:
object_data.update(initial)
# self._validate_unique will be set to True by BaseModelForm.clean().
# It is False by default so overriding self.clean() and failing to call
# super will stop validate_unique from being called.
self._validate_unique = False
super(BaseModelForm, self).__init__(data, files, auto_id, prefix, object_data,
error_class, label_suffix, empty_permitted)
# Apply ``limit_choices_to`` to each field.
for field_name in self.fields:
formfield = self.fields[field_name]
if hasattr(formfield, 'queryset') and hasattr(formfield, 'get_limit_choices_to'):
limit_choices_to = formfield.get_limit_choices_to()
if limit_choices_to is not None:
formfield.queryset = formfield.queryset.complex_filter(limit_choices_to)
def _get_validation_exclusions(self):
"""
For backwards-compatibility, several types of fields need to be
excluded from model validation. See the following tickets for
details: #12507, #12521, #12553
"""
exclude = []
# Build up a list of fields that should be excluded from model field
# validation and unique checks.
for f in self.instance._meta.fields:
field = f.name
# Exclude fields that aren't on the form. The developer may be
# adding these values to the model after form validation.
if field not in self.fields:
exclude.append(f.name)
# Don't perform model validation on fields that were defined
# manually on the form and excluded via the ModelForm's Meta
# class. See #12901.
elif self._meta.fields and field not in self._meta.fields:
exclude.append(f.name)
elif self._meta.exclude and field in self._meta.exclude:
exclude.append(f.name)
# Exclude fields that failed form validation. There's no need for
# the model fields to validate them as well.
elif field in self._errors.keys():
exclude.append(f.name)
# Exclude empty fields that are not required by the form, if the
# underlying model field is required. This keeps the model field
# from raising a required error. Note: don't exclude the field from
# validation if the model field allows blanks. If it does, the blank
# value may be included in a unique check, so cannot be excluded
# from validation.
else:
form_field = self.fields[field]
field_value = self.cleaned_data.get(field, None)
if not f.blank and not form_field.required and field_value in form_field.empty_values:
exclude.append(f.name)
return exclude
def clean(self):
self._validate_unique = True
return self.cleaned_data
def _update_errors(self, errors):
# Override any validation error messages defined at the model level
# with those defined at the form level.
opts = self._meta
for field, messages in errors.error_dict.items():
if (field == NON_FIELD_ERRORS and opts.error_messages and
NON_FIELD_ERRORS in opts.error_messages):
error_messages = opts.error_messages[NON_FIELD_ERRORS]
elif field in self.fields:
error_messages = self.fields[field].error_messages
else:
continue
for message in messages:
if (isinstance(message, ValidationError) and
message.code in error_messages):
message.message = error_messages[message.code]
self.add_error(None, errors)
def _post_clean(self):
opts = self._meta
exclude = self._get_validation_exclusions()
# a subset of `exclude` which won't have the InlineForeignKeyField
# if we're adding a new object since that value doesn't exist
# until after the new instance is saved to the database.
construct_instance_exclude = list(exclude)
# Foreign Keys being used to represent inline relationships
# are excluded from basic field value validation. This is for two
# reasons: firstly, the value may not be supplied (#12507; the
# case of providing new values to the admin); secondly the
# object being referred to may not yet fully exist (#12749).
# However, these fields *must* be included in uniqueness checks,
# so this can't be part of _get_validation_exclusions().
for name, field in self.fields.items():
if isinstance(field, InlineForeignKeyField):
if self.cleaned_data.get(name) is not None and self.cleaned_data[name]._state.adding:
construct_instance_exclude.append(name)
exclude.append(name)
# Update the model instance with self.cleaned_data.
self.instance = construct_instance(self, self.instance, opts.fields, construct_instance_exclude)
try:
self.instance.full_clean(exclude=exclude, validate_unique=False)
except ValidationError as e:
self._update_errors(e)
# Validate uniqueness if needed.
if self._validate_unique:
self.validate_unique()
def validate_unique(self):
"""
Calls the instance's validate_unique() method and updates the form's
validation errors if any were raised.
"""
exclude = self._get_validation_exclusions()
try:
self.instance.validate_unique(exclude=exclude)
except ValidationError as e:
self._update_errors(e)
def save(self, commit=True):
"""
Saves this ``form``'s cleaned_data into model instance
``self.instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
"""
if self.instance.pk is None:
fail_message = 'created'
else:
fail_message = 'changed'
return save_instance(self, self.instance, self._meta.fields,
fail_message, commit, self._meta.exclude,
construct=False)
save.alters_data = True
class ModelForm(six.with_metaclass(ModelFormMetaclass, BaseModelForm)):
pass
def modelform_factory(model, form=ModelForm, fields=None, exclude=None,
formfield_callback=None, widgets=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
field_classes=None):
"""
Returns a ModelForm containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields. If omitted or '__all__',
all fields will be used.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``localized_fields`` is a list of names of fields which should be localized.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``field_classes`` is a dictionary of model field names mapped to a form
field class.
"""
# Create the inner Meta class. FIXME: ideally, we should be able to
# construct a ModelForm without creating and passing in a temporary
# inner class.
# Build up a list of attributes that the Meta object will have.
attrs = {'model': model}
if fields is not None:
attrs['fields'] = fields
if exclude is not None:
attrs['exclude'] = exclude
if widgets is not None:
attrs['widgets'] = widgets
if localized_fields is not None:
attrs['localized_fields'] = localized_fields
if labels is not None:
attrs['labels'] = labels
if help_texts is not None:
attrs['help_texts'] = help_texts
if error_messages is not None:
attrs['error_messages'] = error_messages
if field_classes is not None:
attrs['field_classes'] = field_classes
# If parent form class already has an inner Meta, the Meta we're
# creating needs to inherit from the parent's inner meta.
parent = (object,)
if hasattr(form, 'Meta'):
parent = (form.Meta, object)
Meta = type(str('Meta'), parent, attrs)
# Give this new form class a reasonable name.
class_name = model.__name__ + str('Form')
# Class attributes for the new form class.
form_class_attrs = {
'Meta': Meta,
'formfield_callback': formfield_callback
}
if (getattr(Meta, 'fields', None) is None and
getattr(Meta, 'exclude', None) is None):
raise ImproperlyConfigured(
"Calling modelform_factory without defining 'fields' or "
"'exclude' explicitly is prohibited."
)
# Instantiate type(form) in order to use the same metaclass as form.
return type(form)(class_name, (form,), form_class_attrs)
# ModelFormSets ##############################################################
class BaseModelFormSet(BaseFormSet):
"""
A ``FormSet`` for editing a queryset and/or adding new objects to it.
"""
model = None
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
queryset=None, **kwargs):
self.queryset = queryset
self.initial_extra = kwargs.pop('initial', None)
defaults = {'data': data, 'files': files, 'auto_id': auto_id, 'prefix': prefix}
defaults.update(kwargs)
super(BaseModelFormSet, self).__init__(**defaults)
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if not (self.data or self.files):
return len(self.get_queryset())
return super(BaseModelFormSet, self).initial_form_count()
def _existing_object(self, pk):
if not hasattr(self, '_object_dict'):
self._object_dict = {o.pk: o for o in self.get_queryset()}
return self._object_dict.get(pk)
def _get_to_python(self, field):
"""
If the field is a related field, fetch the concrete field's (that
is, the ultimate pointed-to field's) to_python.
"""
while field.remote_field is not None:
field = field.remote_field.get_related_field()
return field.to_python
def _construct_form(self, i, **kwargs):
if self.is_bound and i < self.initial_form_count():
pk_key = "%s-%s" % (self.add_prefix(i), self.model._meta.pk.name)
pk = self.data[pk_key]
pk_field = self.model._meta.pk
to_python = self._get_to_python(pk_field)
pk = to_python(pk)
kwargs['instance'] = self._existing_object(pk)
if i < self.initial_form_count() and 'instance' not in kwargs:
kwargs['instance'] = self.get_queryset()[i]
if i >= self.initial_form_count() and self.initial_extra:
# Set initial values for extra forms
try:
kwargs['initial'] = self.initial_extra[i - self.initial_form_count()]
except IndexError:
pass
return super(BaseModelFormSet, self)._construct_form(i, **kwargs)
def get_queryset(self):
if not hasattr(self, '_queryset'):
if self.queryset is not None:
qs = self.queryset
else:
qs = self.model._default_manager.get_queryset()
# If the queryset isn't already ordered we need to add an
# artificial ordering here to make sure that all formsets
# constructed from this queryset have the same form order.
if not qs.ordered:
qs = qs.order_by(self.model._meta.pk.name)
# Removed queryset limiting here. As per discussion re: #13023
# on django-dev, max_num should not prevent existing
# related objects/inlines from being displayed.
self._queryset = qs
return self._queryset
def save_new(self, form, commit=True):
"""Saves and returns a new model instance for the given form."""
return form.save(commit=commit)
def save_existing(self, form, instance, commit=True):
"""Saves and returns an existing model instance for the given form."""
return form.save(commit=commit)
def save(self, commit=True):
"""Saves model instances for every form, adding and changing instances
as necessary, and returns the list of instances.
"""
if not commit:
self.saved_forms = []
def save_m2m():
for form in self.saved_forms:
form.save_m2m()
self.save_m2m = save_m2m
return self.save_existing_objects(commit) + self.save_new_objects(commit)
save.alters_data = True
def clean(self):
self.validate_unique()
def validate_unique(self):
# Collect unique_checks and date_checks to run from all the forms.
all_unique_checks = set()
all_date_checks = set()
forms_to_delete = self.deleted_forms
valid_forms = [form for form in self.forms if form.is_valid() and form not in forms_to_delete]
for form in valid_forms:
exclude = form._get_validation_exclusions()
unique_checks, date_checks = form.instance._get_unique_checks(exclude=exclude)
all_unique_checks = all_unique_checks.union(set(unique_checks))
all_date_checks = all_date_checks.union(set(date_checks))
errors = []
# Do each of the unique checks (unique and unique_together)
for uclass, unique_check in all_unique_checks:
seen_data = set()
for form in valid_forms:
# get data for each field of each of unique_check
row_data = (form.cleaned_data[field]
for field in unique_check if field in form.cleaned_data)
# Reduce Model instances to their primary key values
row_data = tuple(d._get_pk_val() if hasattr(d, '_get_pk_val') else d
for d in row_data)
if row_data and None not in row_data:
# if we've already seen it then we have a uniqueness failure
if row_data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_unique_error_message(unique_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
for field in unique_check:
if field in form.cleaned_data:
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(row_data)
# iterate over each of the date checks now
for date_check in all_date_checks:
seen_data = set()
uclass, lookup, field, unique_for = date_check
for form in valid_forms:
# see if we have data for both fields
if (form.cleaned_data and form.cleaned_data[field] is not None
and form.cleaned_data[unique_for] is not None):
# if it's a date lookup we need to get the data for all the fields
if lookup == 'date':
date = form.cleaned_data[unique_for]
date_data = (date.year, date.month, date.day)
# otherwise it's just the attribute on the date/datetime
# object
else:
date_data = (getattr(form.cleaned_data[unique_for], lookup),)
data = (form.cleaned_data[field],) + date_data
# if we've already seen it then we have a uniqueness failure
if data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_date_error_message(date_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(data)
if errors:
raise ValidationError(errors)
def get_unique_error_message(self, unique_check):
if len(unique_check) == 1:
return ugettext("Please correct the duplicate data for %(field)s.") % {
"field": unique_check[0],
}
else:
return ugettext("Please correct the duplicate data for %(field)s, "
"which must be unique.") % {
"field": get_text_list(unique_check, six.text_type(_("and"))),
}
def get_date_error_message(self, date_check):
return ugettext("Please correct the duplicate data for %(field_name)s "
"which must be unique for the %(lookup)s in %(date_field)s.") % {
'field_name': date_check[2],
'date_field': date_check[3],
'lookup': six.text_type(date_check[1]),
}
def get_form_error(self):
return ugettext("Please correct the duplicate values below.")
def save_existing_objects(self, commit=True):
self.changed_objects = []
self.deleted_objects = []
if not self.initial_forms:
return []
saved_instances = []
forms_to_delete = self.deleted_forms
for form in self.initial_forms:
obj = form.instance
if form in forms_to_delete:
# If the pk is None, it means that the object can't be
# deleted again. Possible reason for this is that the
# object was already deleted from the DB. Refs #14877.
if obj.pk is None:
continue
self.deleted_objects.append(obj)
if commit:
obj.delete()
elif form.has_changed():
self.changed_objects.append((obj, form.changed_data))
saved_instances.append(self.save_existing(form, obj, commit=commit))
if not commit:
self.saved_forms.append(form)
return saved_instances
def save_new_objects(self, commit=True):
self.new_objects = []
for form in self.extra_forms:
if not form.has_changed():
continue
# If someone has marked an add form for deletion, don't save the
# object.
if self.can_delete and self._should_delete_form(form):
continue
self.new_objects.append(self.save_new(form, commit=commit))
if not commit:
self.saved_forms.append(form)
return self.new_objects
def add_fields(self, form, index):
"""Add a hidden field for the object's primary key."""
from django.db.models import AutoField, OneToOneField, ForeignKey
self._pk_field = pk = self.model._meta.pk
# If a pk isn't editable, then it won't be on the form, so we need to
# add it here so we can tell which object is which when we get the
# data back. Generally, pk.editable should be false, but for some
# reason, auto_created pk fields and AutoField's editable attribute is
# True, so check for that as well.
def pk_is_not_editable(pk):
return ((not pk.editable) or (pk.auto_created or isinstance(pk, AutoField))
or (pk.remote_field and pk.remote_field.parent_link and pk_is_not_editable(pk.remote_field.model._meta.pk)))
if pk_is_not_editable(pk) or pk.name not in form.fields:
if form.is_bound:
# If we're adding the related instance, ignore its primary key
# as it could be an auto-generated default which isn't actually
# in the database.
pk_value = None if form.instance._state.adding else form.instance.pk
else:
try:
if index is not None:
pk_value = self.get_queryset()[index].pk
else:
pk_value = None
except IndexError:
pk_value = None
if isinstance(pk, OneToOneField) or isinstance(pk, ForeignKey):
qs = pk.remote_field.model._default_manager.get_queryset()
else:
qs = self.model._default_manager.get_queryset()
qs = qs.using(form.instance._state.db)
if form._meta.widgets:
widget = form._meta.widgets.get(self._pk_field.name, HiddenInput)
else:
widget = HiddenInput
form.fields[self._pk_field.name] = ModelChoiceField(qs, initial=pk_value, required=False, widget=widget)
super(BaseModelFormSet, self).add_fields(form, index)
def modelformset_factory(model, form=ModelForm, formfield_callback=None,
formset=BaseModelFormSet, extra=1, can_delete=False,
can_order=False, max_num=None, fields=None, exclude=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
min_num=None, validate_min=False, field_classes=None):
"""
Returns a FormSet class for the given Django model class.
"""
meta = getattr(form, 'Meta', None)
if meta is None:
meta = type(str('Meta'), (object,), {})
if (getattr(meta, 'fields', fields) is None and
getattr(meta, 'exclude', exclude) is None):
raise ImproperlyConfigured(
"Calling modelformset_factory without defining 'fields' or "
"'exclude' explicitly is prohibited."
)
form = modelform_factory(model, form=form, fields=fields, exclude=exclude,
formfield_callback=formfield_callback,
widgets=widgets, localized_fields=localized_fields,
labels=labels, help_texts=help_texts,
error_messages=error_messages, field_classes=field_classes)
FormSet = formset_factory(form, formset, extra=extra, min_num=min_num, max_num=max_num,
can_order=can_order, can_delete=can_delete,
validate_min=validate_min, validate_max=validate_max)
FormSet.model = model
return FormSet
# InlineFormSets #############################################################
class BaseInlineFormSet(BaseModelFormSet):
"""A formset for child objects related to a parent."""
def __init__(self, data=None, files=None, instance=None,
save_as_new=False, prefix=None, queryset=None, **kwargs):
if instance is None:
self.instance = self.fk.remote_field.model()
else:
self.instance = instance
self.save_as_new = save_as_new
if queryset is None:
queryset = self.model._default_manager
if self.instance.pk is not None:
qs = queryset.filter(**{self.fk.name: self.instance})
else:
qs = queryset.none()
super(BaseInlineFormSet, self).__init__(data, files, prefix=prefix,
queryset=qs, **kwargs)
def initial_form_count(self):
if self.save_as_new:
return 0
return super(BaseInlineFormSet, self).initial_form_count()
def _construct_form(self, i, **kwargs):
form = super(BaseInlineFormSet, self)._construct_form(i, **kwargs)
if self.save_as_new:
# Remove the primary key from the form's data, we are only
# creating new instances
form.data[form.add_prefix(self._pk_field.name)] = None
# Remove the foreign key from the form's data
form.data[form.add_prefix(self.fk.name)] = None
# Set the fk value here so that the form can do its validation.
fk_value = self.instance.pk
if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name:
fk_value = getattr(self.instance, self.fk.remote_field.field_name)
fk_value = getattr(fk_value, 'pk', fk_value)
setattr(form.instance, self.fk.get_attname(), fk_value)
return form
@classmethod
def get_default_prefix(cls):
return cls.fk.remote_field.get_accessor_name(model=cls.model).replace('+', '')
def save_new(self, form, commit=True):
# Ensure the latest copy of the related instance is present on each
# form (it may have been saved after the formset was originally
# instantiated).
setattr(form.instance, self.fk.name, self.instance)
# Use commit=False so we can assign the parent key afterwards, then
# save the object.
obj = form.save(commit=False)
pk_value = getattr(self.instance, self.fk.remote_field.field_name)
setattr(obj, self.fk.get_attname(), getattr(pk_value, 'pk', pk_value))
if commit:
obj.save()
# form.save_m2m() can be called via the formset later on if commit=False
if commit and hasattr(form, 'save_m2m'):
form.save_m2m()
return obj
def add_fields(self, form, index):
super(BaseInlineFormSet, self).add_fields(form, index)
if self._pk_field == self.fk:
name = self._pk_field.name
kwargs = {'pk_field': True}
else:
# The foreign key field might not be on the form, so we poke at the
# Model field to get the label, since we need that for error messages.
name = self.fk.name
kwargs = {
'label': getattr(form.fields.get(name), 'label', capfirst(self.fk.verbose_name))
}
if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name:
kwargs['to_field'] = self.fk.remote_field.field_name
# If we're adding a new object, ignore a parent's auto-generated pk
# as it will be regenerated on the save request.
if self.instance._state.adding and form._meta.model._meta.pk.has_default():
self.instance.pk = None
form.fields[name] = InlineForeignKeyField(self.instance, **kwargs)
# Add the generated field to form._meta.fields if it's defined to make
# sure validation isn't skipped on that field.
if form._meta.fields:
if isinstance(form._meta.fields, tuple):
form._meta.fields = list(form._meta.fields)
form._meta.fields.append(self.fk.name)
def get_unique_error_message(self, unique_check):
unique_check = [field for field in unique_check if field != self.fk.name]
return super(BaseInlineFormSet, self).get_unique_error_message(unique_check)
def _get_foreign_key(parent_model, model, fk_name=None, can_fail=False):
"""
Finds and returns the ForeignKey from model to parent if there is one
(returns None if can_fail is True and no such field exists). If fk_name is
provided, assume it is the name of the ForeignKey field. Unless can_fail is
True, an exception is raised if there is no ForeignKey from model to
parent_model.
"""
# avoid circular import
from django.db.models import ForeignKey
opts = model._meta
if fk_name:
fks_to_parent = [f for f in opts.fields if f.name == fk_name]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
if not isinstance(fk, ForeignKey) or \
(fk.remote_field.model != parent_model and
fk.remote_field.model not in parent_model._meta.get_parent_list()):
raise ValueError(
"fk_name '%s' is not a ForeignKey to '%s'." % (fk_name, parent_model._meta.label)
)
elif len(fks_to_parent) == 0:
raise ValueError(
"'%s' has no field named '%s'." % (model._meta.label, fk_name)
)
else:
# Try to discover what the ForeignKey from model to parent_model is
fks_to_parent = [
f for f in opts.fields
if isinstance(f, ForeignKey)
and (f.remote_field.model == parent_model
or f.remote_field.model in parent_model._meta.get_parent_list())
]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
elif len(fks_to_parent) == 0:
if can_fail:
return
raise ValueError(
"'%s' has no ForeignKey to '%s'." % (
model._meta.label,
parent_model._meta.label,
)
)
else:
raise ValueError(
"'%s' has more than one ForeignKey to '%s'." % (
model._meta.label,
parent_model._meta.label,
)
)
return fk
def inlineformset_factory(parent_model, model, form=ModelForm,
formset=BaseInlineFormSet, fk_name=None,
fields=None, exclude=None, extra=3, can_order=False,
can_delete=True, max_num=None, formfield_callback=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
min_num=None, validate_min=False, field_classes=None):
"""
Returns an ``InlineFormSet`` for the given kwargs.
You must provide ``fk_name`` if ``model`` has more than one ``ForeignKey``
to ``parent_model``.
"""
fk = _get_foreign_key(parent_model, model, fk_name=fk_name)
# enforce a max_num=1 when the foreign key to the parent model is unique.
if fk.unique:
max_num = 1
kwargs = {
'form': form,
'formfield_callback': formfield_callback,
'formset': formset,
'extra': extra,
'can_delete': can_delete,
'can_order': can_order,
'fields': fields,
'exclude': exclude,
'min_num': min_num,
'max_num': max_num,
'widgets': widgets,
'validate_min': validate_min,
'validate_max': validate_max,
'localized_fields': localized_fields,
'labels': labels,
'help_texts': help_texts,
'error_messages': error_messages,
'field_classes': field_classes,
}
FormSet = modelformset_factory(model, **kwargs)
FormSet.fk = fk
return FormSet
# Fields #####################################################################
class InlineForeignKeyField(Field):
"""
A basic integer field that deals with validating the given value to a
given parent instance in an inline.
"""
widget = HiddenInput
default_error_messages = {
'invalid_choice': _('The inline foreign key did not match the parent instance primary key.'),
}
def __init__(self, parent_instance, *args, **kwargs):
self.parent_instance = parent_instance
self.pk_field = kwargs.pop("pk_field", False)
self.to_field = kwargs.pop("to_field", None)
if self.parent_instance is not None:
if self.to_field:
kwargs["initial"] = getattr(self.parent_instance, self.to_field)
else:
kwargs["initial"] = self.parent_instance.pk
kwargs["required"] = False
super(InlineForeignKeyField, self).__init__(*args, **kwargs)
def clean(self, value):
if value in self.empty_values:
if self.pk_field:
return None
# if there is no value act as we did before.
return self.parent_instance
# ensure the we compare the values as equal types.
if self.to_field:
orig = getattr(self.parent_instance, self.to_field)
else:
orig = self.parent_instance.pk
if force_text(value) != force_text(orig):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return self.parent_instance
def has_changed(self, initial, data):
return False
class ModelChoiceIterator(object):
def __init__(self, field):
self.field = field
self.queryset = field.queryset
def __iter__(self):
if self.field.empty_label is not None:
yield ("", self.field.empty_label)
for obj in self.queryset.iterator():
yield self.choice(obj)
def __len__(self):
return (len(self.queryset) +
(1 if self.field.empty_label is not None else 0))
def choice(self, obj):
return (self.field.prepare_value(obj), self.field.label_from_instance(obj))
class ModelChoiceField(ChoiceField):
"""A ChoiceField whose choices are a model QuerySet."""
# This class is a subclass of ChoiceField for purity, but it doesn't
# actually use any of ChoiceField's implementation.
default_error_messages = {
'invalid_choice': _('Select a valid choice. That choice is not one of'
' the available choices.'),
}
def __init__(self, queryset, empty_label="---------",
required=True, widget=None, label=None, initial=None,
help_text='', to_field_name=None, limit_choices_to=None,
*args, **kwargs):
if required and (initial is not None):
self.empty_label = None
else:
self.empty_label = empty_label
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
Field.__init__(self, required, widget, label, initial, help_text,
*args, **kwargs)
self.queryset = queryset
self.limit_choices_to = limit_choices_to # limit the queryset later.
self.to_field_name = to_field_name
def get_limit_choices_to(self):
"""
Returns ``limit_choices_to`` for this form field.
If it is a callable, it will be invoked and the result will be
returned.
"""
if callable(self.limit_choices_to):
return self.limit_choices_to()
return self.limit_choices_to
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
# Need to force a new ModelChoiceIterator to be created, bug #11183
result.queryset = result.queryset
return result
def _get_queryset(self):
return self._queryset
def _set_queryset(self, queryset):
self._queryset = queryset
self.widget.choices = self.choices
queryset = property(_get_queryset, _set_queryset)
# this method will be used to create object labels by the QuerySetIterator.
# Override it to customize the label.
def label_from_instance(self, obj):
"""
This method is used to convert objects into strings; it's used to
generate the labels for the choices presented by this object. Subclasses
can override this method to customize the display of the choices.
"""
return smart_text(obj)
def _get_choices(self):
# If self._choices is set, then somebody must have manually set
# the property self.choices. In this case, just return self._choices.
if hasattr(self, '_choices'):
return self._choices
# Otherwise, execute the QuerySet in self.queryset to determine the
# choices dynamically. Return a fresh ModelChoiceIterator that has not been
# consumed. Note that we're instantiating a new ModelChoiceIterator *each*
# time _get_choices() is called (and, thus, each time self.choices is
# accessed) so that we can ensure the QuerySet has not been consumed. This
# construct might look complicated but it allows for lazy evaluation of
# the queryset.
return ModelChoiceIterator(self)
choices = property(_get_choices, ChoiceField._set_choices)
def prepare_value(self, value):
if hasattr(value, '_meta'):
if self.to_field_name:
return value.serializable_value(self.to_field_name)
else:
return value.pk
return super(ModelChoiceField, self).prepare_value(value)
def to_python(self, value):
if value in self.empty_values:
return None
try:
key = self.to_field_name or 'pk'
value = self.queryset.get(**{key: value})
except (ValueError, TypeError, self.queryset.model.DoesNotExist):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return value
def validate(self, value):
return Field.validate(self, value)
def has_changed(self, initial, data):
initial_value = initial if initial is not None else ''
data_value = data if data is not None else ''
return force_text(self.prepare_value(initial_value)) != force_text(data_value)
class ModelMultipleChoiceField(ModelChoiceField):
"""A MultipleChoiceField whose choices are a model QuerySet."""
widget = SelectMultiple
hidden_widget = MultipleHiddenInput
default_error_messages = {
'list': _('Enter a list of values.'),
'invalid_choice': _('Select a valid choice. %(value)s is not one of the'
' available choices.'),
'invalid_pk_value': _('"%(pk)s" is not a valid value for a primary key.')
}
def __init__(self, queryset, required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
super(ModelMultipleChoiceField, self).__init__(queryset, None,
required, widget, label, initial, help_text, *args, **kwargs)
def to_python(self, value):
if not value:
return []
return list(self._check_values(value))
def clean(self, value):
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
elif not self.required and not value:
return self.queryset.none()
if not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['list'], code='list')
qs = self._check_values(value)
# Since this overrides the inherited ModelChoiceField.clean
# we run custom validators here
self.run_validators(value)
return qs
def _check_values(self, value):
"""
Given a list of possible PK values, returns a QuerySet of the
corresponding objects. Raises a ValidationError if a given value is
invalid (not a valid PK, not in the queryset, etc.)
"""
key = self.to_field_name or 'pk'
# deduplicate given values to avoid creating many querysets or
# requiring the database backend deduplicate efficiently.
try:
value = frozenset(value)
except TypeError:
# list of lists isn't hashable, for example
raise ValidationError(
self.error_messages['list'],
code='list',
)
for pk in value:
try:
self.queryset.filter(**{key: pk})
except (ValueError, TypeError):
raise ValidationError(
self.error_messages['invalid_pk_value'],
code='invalid_pk_value',
params={'pk': pk},
)
qs = self.queryset.filter(**{'%s__in' % key: value})
pks = set(force_text(getattr(o, key)) for o in qs)
for val in value:
if force_text(val) not in pks:
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
return qs
def prepare_value(self, value):
if (hasattr(value, '__iter__') and
not isinstance(value, six.text_type) and
not hasattr(value, '_meta')):
return [super(ModelMultipleChoiceField, self).prepare_value(v) for v in value]
return super(ModelMultipleChoiceField, self).prepare_value(value)
def has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = set(force_text(value) for value in self.prepare_value(initial))
data_set = set(force_text(value) for value in data)
return data_set != initial_set
def modelform_defines_fields(form_class):
return (form_class is not None and (
hasattr(form_class, '_meta') and
(form_class._meta.fields is not None or
form_class._meta.exclude is not None)
))
| bsd-3-clause | 4,096,164,790,503,905,300 | 41.00228 | 124 | 0.589326 | false |
rleigh-dundee/openmicroscopy | components/tools/OmeroWeb/omeroweb/settings.py | 1 | 22862 | #!/usr/bin/env python
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # Django settings for OMERO.web project. # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
#
# Copyright (c) 2008 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>, 2008.
#
# Version: 1.0
#
import os.path
import sys
import datetime
import logging
import omero
import omero.config
import omero.clients
import tempfile
import exceptions
import re
from django.utils import simplejson as json
from portalocker import LockException
logger = logging.getLogger(__name__)
# LOGS
# NEVER DEPLOY a site into production with DEBUG turned on.
# Debuging mode.
# A boolean that turns on/off debug mode.
# handler404 and handler500 works only when False
if os.environ.has_key('OMERO_HOME'):
OMERO_HOME =os.environ.get('OMERO_HOME')
else:
OMERO_HOME = os.path.join(os.path.dirname(__file__), '..', '..', '..')
OMERO_HOME = os.path.normpath(OMERO_HOME)
INSIGHT_JARS = os.path.join(OMERO_HOME, "lib", "insight").replace('\\','/')
WEBSTART = False
if os.path.isdir(INSIGHT_JARS):
WEBSTART = True
# Logging
LOGDIR = os.path.join(OMERO_HOME, 'var', 'log').replace('\\','/')
if not os.path.isdir(LOGDIR):
try:
os.makedirs(LOGDIR)
except Exception, x:
exctype, value = sys.exc_info()[:2]
raise exctype, value
# DEBUG: Never deploy a site into production with DEBUG turned on.
# Logging levels: logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR logging.CRITICAL
# FORMAT: 2010-01-01 00:00:00,000 INFO [omeroweb.webadmin.webadmin_utils ] (proc.1308 ) getGuestConnection:20 Open connection is not available
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': '%(asctime)s %(levelname)5.5s [%(name)40.40s] (proc.%(process)5.5d) %(funcName)s:%(lineno)d %(message)s'
},
},
'handlers': {
'default': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOGDIR, 'OMEROweb.log').replace('\\','/'),
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 5,
'formatter':'standard',
},
'request_handler': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOGDIR, 'OMEROweb_request.log').replace('\\','/'),
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 5,
'formatter':'standard',
},
'null': {
'level':'DEBUG',
'class':'django.utils.log.NullHandler',
},
'console':{
'level':'DEBUG',
'class':'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'django.request': { # Stop SQL debug from logging to main logger
'handlers': ['request_handler'],
'level': 'DEBUG',
'propagate': False
},
'django': {
'handlers': ['null'],
'level': 'DEBUG',
'propagate': True
},
'': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True
}
}
}
# Load custom settings from etc/grid/config.xml
# Tue 2 Nov 2010 11:03:18 GMT -- ticket:3228
from omero.util.concurrency import get_event
CONFIG_XML = os.path.join(OMERO_HOME, 'etc', 'grid', 'config.xml')
count = 10
event = get_event("websettings")
while True:
try:
CONFIG_XML = omero.config.ConfigXml(CONFIG_XML)
CUSTOM_SETTINGS = CONFIG_XML.as_map()
CONFIG_XML.close()
break
except LockException:
#logger.error("Exception while loading configuration retrying...", exc_info=True)
exctype, value = sys.exc_info()[:2]
count -= 1
if not count:
raise exctype, value
else:
event.wait(1) # Wait a total of 10 seconds
except:
#logger.error("Exception while loading configuration...", exc_info=True)
exctype, value = sys.exc_info()[:2]
raise exctype, value
del event
del count
del get_event
FASTCGI = "fastcgi"
FASTCGITCP = "fastcgi-tcp"
FASTCGI_TYPES = (FASTCGI, FASTCGITCP)
DEVELOPMENT = "development"
DEFAULT_SERVER_TYPE = FASTCGITCP
ALL_SERVER_TYPES = (FASTCGITCP, FASTCGI, DEVELOPMENT)
DEFAULT_SESSION_ENGINE = 'django.contrib.sessions.backends.file'
SESSION_ENGINE_VALUES = ('django.contrib.sessions.backends.db',
'django.contrib.sessions.backends.file',
'django.contrib.sessions.backends.cache',
'django.contrib.sessions.backends.cached_db')
def parse_boolean(s):
s = s.strip().lower()
if s in ('true', '1', 't'):
return True
return False
def parse_paths(s):
return [os.path.normpath(path) for path in json.loads(s)]
def check_server_type(s):
if s not in ALL_SERVER_TYPES:
raise ValueError("Unknown server type: %s. Valid values are: %s" % (s, ALL_SERVER_TYPES))
return s
def check_session_engine(s):
if s not in SESSION_ENGINE_VALUES:
raise ValueError("Unknown session engine: %s. Valid values are: %s" % (s, SESSION_ENGINE_VALUES))
return s
def identity(x):
return x
def remove_slash(s):
if s is not None and len(s) > 0:
if s.endswith("/"):
s = s[:-1]
return s
class LeaveUnset(exceptions.Exception):
pass
def leave_none_unset(s):
if s is None:
raise LeaveUnset()
return s
CUSTOM_SETTINGS_MAPPINGS = {
"omero.web.apps": ["ADDITIONAL_APPS", '[]', json.loads],
"omero.web.public.enabled": ["PUBLIC_ENABLED", "false", parse_boolean],
"omero.web.public.url_filter": ["PUBLIC_URL_FILTER", r'^/(?!webadmin)', re.compile],
"omero.web.public.server_id": ["PUBLIC_SERVER_ID", 1, int],
"omero.web.public.user": ["PUBLIC_USER", None, leave_none_unset],
"omero.web.public.password": ["PUBLIC_PASSWORD", None, leave_none_unset],
"omero.web.public.cache.enabled": ["PUBLIC_CACHE_ENABLED", "false", parse_boolean],
"omero.web.public.cache.key": ["PUBLIC_CACHE_KEY", "omero.web.public.cache.key", str],
"omero.web.public.cache.timeout": ["PUBLIC_CACHE_TIMEOUT", 60 * 60 * 24, int],
"omero.web.databases": ["DATABASES", '{}', json.loads],
"omero.web.admins": ["ADMINS", '[]', json.loads],
"omero.web.application_server": ["APPLICATION_SERVER", DEFAULT_SERVER_TYPE, check_server_type],
"omero.web.application_server.host": ["APPLICATION_SERVER_HOST", "0.0.0.0", str],
"omero.web.application_server.port": ["APPLICATION_SERVER_PORT", "4080", str],
"omero.web.application_server.max_requests": ["APPLICATION_SERVER_MAX_REQUESTS", 400, int],
"omero.web.ping_interval": ["PING_INTERVAL", 60000, int],
"omero.web.static_url": ["STATIC_URL", "/static/", str],
"omero.web.staticfile_dirs": ["STATICFILES_DIRS", '[]', json.loads],
"omero.web.index_template": ["INDEX_TEMPLATE", None, identity],
"omero.web.caches": ["CACHES", '{}', json.loads],
"omero.web.webgateway_cache": ["WEBGATEWAY_CACHE", None, leave_none_unset],
"omero.web.session_engine": ["SESSION_ENGINE", DEFAULT_SESSION_ENGINE, check_session_engine],
"omero.web.debug": ["DEBUG", "false", parse_boolean],
"omero.web.email_host": ["EMAIL_HOST", None, identity],
"omero.web.email_host_password": ["EMAIL_HOST_PASSWORD", None, identity],
"omero.web.email_host_user": ["EMAIL_HOST_USER", None, identity],
"omero.web.email_port": ["EMAIL_PORT", None, identity],
"omero.web.email_subject_prefix": ["EMAIL_SUBJECT_PREFIX", "[OMERO.web] ", str],
"omero.web.email_use_tls": ["EMAIL_USE_TLS", "false", parse_boolean],
"omero.web.logdir": ["LOGDIR", LOGDIR, str],
"omero.web.login_view": ["LOGIN_VIEW", "weblogin", str],
"omero.web.send_broken_link_emails": ["SEND_BROKEN_LINK_EMAILS", "true", parse_boolean],
"omero.web.server_email": ["SERVER_EMAIL", None, identity],
"omero.web.server_list": ["SERVER_LIST", '[["localhost", 4064, "omero"]]', json.loads],
# Configuration options for the viewer
"omero.web.viewer.initial_zoom_level": ["VIEWER_INITIAL_ZOOM_LEVEL", -1, int],
# the following parameters configure when to show/hide the 'Volume viewer' icon in the Image metadata panel
"omero.web.open_astex_max_side": ["OPEN_ASTEX_MAX_SIDE", 400, int],
"omero.web.open_astex_min_side": ["OPEN_ASTEX_MIN_SIDE", 20, int],
"omero.web.open_astex_max_voxels": ["OPEN_ASTEX_MAX_VOXELS", 27000000, int], # 300 x 300 x 300
"omero.web.scripts_to_ignore": ["SCRIPTS_TO_IGNORE", '["/omero/figure_scripts/Movie_Figure.py", "/omero/figure_scripts/Split_View_Figure.py", "/omero/figure_scripts/Thumbnail_Figure.py", "/omero/figure_scripts/ROI_Split_Figure.py", "/omero/export_scripts/Make_Movie.py"]', parse_paths],
# Add links to the top header: links are ['Link Text', 'link'], where the url is reverse("link") OR simply 'link' (for external urls)
"omero.web.ui.top_links": ["TOP_LINKS", '[]', json.loads], # E.g. '[["Webtest", "webtest_index"]]'
# Add plugins to the right-hand & center panels: plugins are ['Label', 'include.js', 'div_id']. The javascript loads data into $('#div_id').
"omero.web.ui.right_plugins": ["RIGHT_PLUGINS", '[["Acquisition", "webclient/data/includes/right_plugin.acquisition.js.html", "metadata_tab"],'\
#'["ROIs", "webtest/webclient_plugins/right_plugin.rois.js.html", "image_roi_tab"],'\
'["Preview", "webclient/data/includes/right_plugin.preview.js.html", "preview_tab"]]', json.loads],
# E.g. Center plugin: ["Channel overlay", "webtest/webclient_plugins/center_plugin.overlay.js.html", "channel_overlay_panel"]
"omero.web.ui.center_plugins": ["CENTER_PLUGINS", '['\
#'["Split View", "webclient/data/includes/center_plugin.splitview.js.html", "split_view_panel"],'\
'["Table", "webclient/data/includes/center_plugin.table.js.html", "image_table"]]', json.loads],
# sharing no longer use this variable. replaced by request.build_absolute_uri
# after testing this line should be removed.
# "omero.web.application_host": ["APPLICATION_HOST", None, remove_slash],
# WEBSTART
"omero.web.webstart_jar": ["WEBSTART_JAR", "omero.insight.jar", str],
"omero.web.webstart_icon": ["WEBSTART_ICON", "webstart/img/icon-omero-insight.png", str],
"omero.web.webstart_heap": ["WEBSTART_HEAP", "1024m", str],
"omero.web.webstart_host": ["WEBSTART_HOST", "localhost", str],
"omero.web.webstart_port": ["WEBSTART_PORT", "4064", str],
"omero.web.webstart_class": ["WEBSTART_CLASS", "org.openmicroscopy.shoola.Main", str],
"omero.web.webstart_title": ["WEBSTART_TITLE", "OMERO.insight", str],
"omero.web.webstart_vendor": ["WEBSTART_VENDOR", "The Open Microscopy Environment", str],
"omero.web.webstart_homepage": ["WEBSTART_HOMEPAGE", "http://www.openmicroscopy.org", str],
}
for key, values in CUSTOM_SETTINGS_MAPPINGS.items():
global_name, default_value, mapping = values
try:
global_value = CUSTOM_SETTINGS[key]
values.append(False)
except KeyError:
global_value = default_value
values.append(True)
try:
globals()[global_name] = mapping(global_value)
except ValueError:
raise ValueError("Invalid %s JSON: %r" % (global_name, global_value))
except LeaveUnset:
pass
if not DEBUG:
LOGGING['loggers']['django.request']['level'] = 'INFO'
LOGGING['loggers']['django']['level'] = 'INFO'
LOGGING['loggers']['']['level'] = 'INFO'
# TEMPLATE_DEBUG: A boolean that turns on/off template debug mode. If this is True, the fancy
# error page will display a detailed report for any TemplateSyntaxError. This report contains
# the relevant snippet of the template, with the appropriate line highlighted.
# Note that Django only displays fancy error pages if DEBUG is True, alternatively error
# is handled by:
# handler404 = "omeroweb.feedback.views.handler404"
# handler500 = "omeroweb.feedback.views.handler500"
TEMPLATE_DEBUG = DEBUG
from django.views.debug import cleanse_setting
for key in sorted(CUSTOM_SETTINGS_MAPPINGS):
values = CUSTOM_SETTINGS_MAPPINGS[key]
global_name, default_value, mapping, using_default = values
source = using_default and "default" or key
global_value = globals().get(global_name, None)
if global_name.isupper():
logger.debug("%s = %r (source:%s)", global_name, cleanse_setting(global_name, global_value), source)
SITE_ID = 1
# Local time zone for this installation. Choices can be found here:
# http://www.postgresql.org/docs/8.1/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
# although not all variations may be possible on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/London'
FIRST_DAY_OF_WEEK = 0 # 0-Monday, ... 6-Sunday
# LANGUAGE_CODE: A string representing the language code for this installation. This should be
# in standard language format. For example, U.S. English is "en-us".
LANGUAGE_CODE = 'en-gb'
# SECRET_KEY: A secret key for this particular Django installation. Used to provide a seed
# in secret-key hashing algorithms. Set this to a random string -- the longer, the better.
# django-admin.py startproject creates one automatically.
# Make this unique, and don't share it with anybody.
SECRET_KEY = '@@k%g#7=%4b6ib7yr1tloma&g0s2nni6ljf!m0h&x9c712c7yj'
# USE_I18N: A boolean that specifies whether Django's internationalization system should be enabled.
# This provides an easy way to turn it off, for performance. If this is set to False, Django will
# make some optimizations so as not to load the internationalization machinery.
USE_I18N = True
# MIDDLEWARE_CLASSES: A tuple of middleware classes to use.
# See https://docs.djangoproject.com/en/1.3/topics/http/middleware/.
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
# ROOT_URLCONF: A string representing the full Python import path to your root URLconf.
# For example: "mydjangoapps.urls". Can be overridden on a per-request basis by setting
# the attribute urlconf on the incoming HttpRequest object.
ROOT_URLCONF = 'omeroweb.urls'
# STATICFILES_FINDERS: The list of finder backends that know how to find static files
# in various locations. The default will find files stored in the STATICFILES_DIRS setting
# (using django.contrib.staticfiles.finders.FileSystemFinder) and in a static subdirectory
# of each app (using django.contrib.staticfiles.finders.AppDirectoriesFinder)
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder"
)
# STATIC_URL: URL to use when referring to static files located in STATIC_ROOT.
# Example: "/site_media/static/" or "http://static.example.com/".
# If not None, this will be used as the base path for media definitions and the staticfiles
# app. It must end in a slash if set to a non-empty value.
# This var is configurable by omero.web.static_url STATIC_URL = '/static/'
# STATIC_ROOT: The absolute path to the directory where collectstatic will collect static
# files for deployment. If the staticfiles contrib app is enabled (default) the collectstatic
# management command will collect static files into this directory.
STATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static').replace('\\','/')
# STATICFILES_DIRS: This setting defines the additional locations the staticfiles app will
# traverse if the FileSystemFinder finder is enabled, e.g. if you use the collectstatic or
# findstatic management command or use the static file serving view.
if WEBSTART:
STATICFILES_DIRS += (("webstart/jars", INSIGHT_JARS),)
# TEMPLATE_CONTEXT_PROCESSORS: A tuple of callables that are used to populate the context
# in RequestContext. These callables take a request object as their argument and return
# a dictionary of items to be merged into the context.
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.contrib.messages.context_processors.messages"
)
# TEMPLATE_LOADERS: A tuple of template loader classes, specified as strings. Each Loader class
# knows how to import templates from a particular source. Optionally, a tuple can be used
# instead of a string. The first item in the tuple should be the Loader's module, subsequent items
# are passed to the Loader during initialization.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# TEMPLATE_DIRS: List of locations of the template source files, in search order. Note that these
# paths should use Unix-style forward slashes, even on Windows.
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". Always use
# forward slashes, even on Windows. Don't forget to use absolute paths, not relative paths.
# TEMPLATE_DIRS = ()
# INSTALLED_APPS: A tuple of strings designating all applications that are enabled in this Django
# installation. Each string should be a full Python path to a Python package that contains
# a Django application, as created by django-admin.py startapp.
INSTALLED_APPS = (
'django.contrib.staticfiles',
'django.contrib.markup',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'omeroweb.feedback',
'omeroweb.webadmin',
'omeroweb.webclient',
'omeroweb.webgateway',
'omeroweb.webtest',
'omeroweb.webredirect',
'omeroweb.webstart',
)
# ADDITONAL_APPS: We import any settings.py from apps. This allows them to modify settings.
for app in ADDITIONAL_APPS:
INSTALLED_APPS += ('omeroweb.%s' % app,)
try:
a = __import__('%s.settings' % app)
except ImportError:
logger.debug("Couldn't import settings from app: %s" % app)
# FEEDBACK_URL: Used in feedback.sendfeedback.SendFeedback class in order to submit
# error or comment messages to http://qa.openmicroscopy.org.uk.
FEEDBACK_URL = "qa.openmicroscopy.org.uk:80"
# IGNORABLE_404_STARTS:
# Default: ('/cgi-bin/', '/_vti_bin', '/_vti_inf')
# IGNORABLE_404_ENDS:
# Default: ('mail.pl', 'mailform.pl', 'mail.cgi', 'mailform.cgi', 'favicon.ico', '.php')
# SESSION_FILE_PATH: If you're using file-based session storage, this sets the directory in which Django
# will store session data. When the default value (None) is used, Django will use the standard temporary
# directory for the system.
SESSION_FILE_PATH = tempfile.gettempdir()
# SESSION_EXPIRE_AT_BROWSER_CLOSE: Whether to expire the session when the user closes his or her browser.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True # False
# SESSION_COOKIE_AGE: The age of session cookies, in seconds. See How to use sessions.
SESSION_COOKIE_AGE = 86400 # 1 day in sec (86400)
# FILE_UPLOAD_TEMP_DIR: The directory to store data temporarily while uploading files.
FILE_UPLOAD_TEMP_DIR = tempfile.gettempdir()
# # FILE_UPLOAD_MAX_MEMORY_SIZE: The maximum size (in bytes) that an upload will be before it gets streamed
# to the file system.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 #default 2621440 (i.e. 2.5 MB).
# DEFAULT_IMG: Used in webclient.webclient_gateway.OmeroWebGateway.defaultThumbnail in order to load default
# image while thumbnail can't be retrieved from the server.
DEFAULT_IMG = os.path.join(os.path.dirname(__file__), 'webgateway', 'static', 'webgateway', 'img', 'image128.png').replace('\\','/')
# # DEFAULT_USER: Used in webclient.webclient_gateway.OmeroWebGateway.getExperimenterDefaultPhoto in order to load default
# avatar while experimenter photo can't be retrieved from the server.
DEFAULT_USER = os.path.join(os.path.dirname(__file__), 'webgateway', 'static', 'webgateway', 'img', 'personal32.png').replace('\\','/')
# MANAGERS: A tuple in the same format as ADMINS that specifies who should get broken-link notifications when
# SEND_BROKEN_LINK_EMAILS=True.
MANAGERS = ADMINS
# PAGE: Used in varous locations where large number of data is retrieved from the server.
try:
PAGE
except:
PAGE = 200
EMAIL_TEMPLATES = {
'create_share': {
'html_content':'<p>Hi,</p><p>I would like to share some of my data with you.<br/>Please find it on the <a href="%s?server=%i">%s?server=%i</a>.</p><p>%s</p>',
'text_content':'Hi, I would like to share some of my data with you. Please find it on the %s?server=%i. /n %s'
},
'add_member_to_share': {
'html_content':'<p>Hi,</p><p>I would like to share some of my data with you.<br/>Please find it on the <a href="%s?server=%i">%s?server=%i</a>.</p><p>%s</p>',
'text_content':'Hi, I would like to share some of my data with you. Please find it on the %s?server=%i. /n %s'
},
'remove_member_from_share': {
'html_content':'<p>You were removed from the share <a href="%s?server=%i">%s?server=%i</a>. This share is no longer available for you.</p>',
'text_content':'You were removed from the share %s?server=%i. This share is no longer available for you.'
},
'add_comment_to_share': {
'html_content':'<p>New comment is available on share <a href="%s?server=%i">%s?server=%i</a>.</p>',
'text_content':'New comment is available on share %s?server=%i.'
}
}
# Load server list and freeze
from webadmin.custom_models import Server
def load_server_list():
for s in SERVER_LIST:
server = (len(s) > 2) and unicode(s[2]) or None
Server(host=unicode(s[0]), port=int(s[1]), server=server)
Server.freeze()
load_server_list()
| gpl-2.0 | -7,212,896,495,762,122,000 | 43.220503 | 290 | 0.673301 | false |
DonnchaC/onionbalance | test/functional/test_publish_master_descriptor.py | 1 | 5980 | # -*- coding: utf-8 -*-
import os
import sys
import socket
import time
import pytest
import Crypto.PublicKey.RSA
import yaml
import pexpect
import stem.control
import onionbalance.util
# Skip functional tests if Chutney environment is not running.
pytestmark = pytest.mark.skipif(
"os.environ.get('CHUTNEY_ONION_ADDRESS') is None",
reason="Skipping functional test, no Chutney environment detected")
def parse_chutney_enviroment():
"""
Read environment variables and determine chutney instance and
client addresses.
"""
tor_client = os.environ.get('CHUTNEY_CLIENT_PORT')
assert tor_client
# Calculate the address and port of clients control port
client_address, client_socks_port = tor_client.split(':')
client_ip = socket.gethostbyname(client_address)
tor_client_number = int(client_socks_port) - 9000
# Control port in the 8000-8999 range, offset by Tor client number
control_port = 8000 + tor_client_number
assert control_port
# Retrieve instance onion address exported during chutney setup
instance_address = os.environ.get('CHUTNEY_ONION_ADDRESS')
assert instance_address # Need at least 1 instance address for test
if '.onion' in instance_address:
instance_address = instance_address[:16]
return {
'client_ip': client_ip,
'control_port': control_port,
'instances': [instance_address],
}
def create_test_config_file(tmppath, private_key=None, instances=None):
"""
Setup function to create a temp directory with master key and config file.
Returns a path to the temporary config file.
.. todo:: Refactor settings.py config creation to avoid code duplication
in integration tests.
"""
if not private_key:
private_key = Crypto.PublicKey.RSA.generate(1024)
# Write private key file
key_path = tmppath.join('private_key')
key_path.write(private_key.exportKey())
assert key_path.check()
# Create YAML OnionBalance settings file for these instances
service_data = {'key': str(key_path)}
service_data['instances'] = [{'address': addr} for addr in instances]
settings_data = {
'services': [service_data],
'STATUS_SOCKET_LOCATION': str(tmppath.join('control')),
}
config_yaml = yaml.dump(settings_data, default_flow_style=False)
config_path = tmppath.join('config.yaml')
config_path.write_binary(config_yaml.encode('utf-8'))
assert config_path.check()
return str(config_path)
def test_master_descriptor_publication(tmpdir):
"""
Functional test to run OnionBalance, publish a master descriptor and
check that it can be retrieved from the DHT.
"""
chutney_config = parse_chutney_enviroment()
private_key = Crypto.PublicKey.RSA.generate(1024)
master_onion_address = onionbalance.util.calc_onion_address(private_key)
config_file_path = create_test_config_file(
tmppath=tmpdir,
private_key=private_key,
instances=chutney_config.get('instances', []),
)
assert config_file_path
# Start an OnionBalance server and monitor for correct output with pexpect
server = pexpect.spawnu("onionbalance",
args=[
'-i', chutney_config.get('client_ip'),
'-p', str(chutney_config.get('control_port')),
'-c', config_file_path,
'-v', 'debug',
], logfile=sys.stdout, timeout=15)
# Check for expected output from OnionBalance
server.expect(u"Loaded the config file")
server.expect(u"introduction point set has changed")
server.expect(u"Published a descriptor", timeout=120)
# Check Tor control port gave an uploaded event.
server.expect(u"HS_DESC UPLOADED")
# Eek, sleep to wait for descriptor upload to all replicas to finish
time.sleep(10)
# .. todo:: Also need to check and raise for any warnings or errors
# that are emitted
# Try fetch and validate the descriptor with stem
with stem.control.Controller.from_port(
address=chutney_config.get('client_ip'),
port=chutney_config.get('control_port')
) as controller:
controller.authenticate()
# get_hidden_service_descriptor() will raise exceptions if it
# cannot find the descriptors
master_descriptor = controller.get_hidden_service_descriptor(
master_onion_address)
master_ips = master_descriptor.introduction_points()
# Try retrieve a descriptor for each instance
for instance_address in chutney_config.get('instances'):
instance_descriptor = controller.get_hidden_service_descriptor(
instance_address)
instance_ips = instance_descriptor.introduction_points()
# Check if all instance IPs were included in the master descriptor
assert (set(ip.identifier for ip in instance_ips) ==
set(ip.identifier for ip in master_ips))
# Check that the control socket was created
socket_path = tmpdir.join('control')
assert socket_path.check()
# Connect to the control socket and check the output
sock_client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock_client.connect(str(socket_path))
# Read the data from the status socket
result = []
while True:
data = sock_client.recv(1024)
if not data:
break
result.append(data.decode('utf-8'))
result_data = ''.join(result)
# Check each instance is in the output
for instance_address in chutney_config.get('instances'):
assert instance_address in result_data
# Check all instances were online and all master descriptors uploaded
assert master_onion_address in result_data
assert '[offline]' not in result_data
assert '[not uploaded]' not in result_data
| gpl-3.0 | -6,428,917,183,354,752,000 | 33.367816 | 78 | 0.661371 | false |
codeman38/toggldesktop | third_party/cppclean/cpp/symbols.py | 1 | 6773 | # Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Symbol Table utility code."""
from __future__ import absolute_import
from __future__ import unicode_literals
__author__ = '[email protected] (Neal Norwitz)'
class Error(BaseException):
"""Exception raised when lookup fails."""
class Symbol(object):
"""Data container used internally."""
def __init__(self, name, parts, namespace_stack):
self.name = name
self.parts = parts
self.namespace_stack = namespace_stack
class SymbolTable(object):
"""Symbol table that can perform namespace operations."""
def __init__(self):
# None is the global namespace.
self.namespaces = {None: {}}
def _lookup_namespace(self, symbol, namespace, kind):
"""Helper for lookup_symbol that only looks up variables in a
namespace.
Args:
symbol: Symbol
namespace: pointer into self.namespaces
kind: 'kind of namespace for error message'
"""
for namespace_part in symbol.parts:
namespace = namespace.get(namespace_part)
if namespace is None:
raise Error('%s not found in %snamespace at %s' %
(symbol.name, kind, namespace_part))
result = namespace
if not isinstance(namespace, dict):
# Occurs when a component is not a namespace.
break
return result
def _lookup_global(self, symbol):
"""Helper for lookup_symbol that only looks up global variables.
Args:
symbol: Symbol
"""
assert symbol.parts
namespace = self.namespaces
if len(symbol.parts) == 1:
# If there is only one part, look in globals.
namespace = self.namespaces[None]
try:
# Try to do a normal, global namespace lookup.
return self._lookup_namespace(symbol, namespace, 'global ')
except Error as orig_exc:
try:
# The normal lookup can fail if all of the parts aren't
# namespaces. This happens with OuterClass::Inner.
namespace = self.namespaces[None]
return self._lookup_namespace(symbol, namespace, 'global ')
except Error:
raise orig_exc
def _lookup_in_all_namespaces(self, symbol):
"""Helper for lookup_symbol that looks for symbols in all namespaces.
Args:
symbol: Symbol
"""
namespace = self.namespaces
# Create a stack of namespaces.
namespace_stack = []
for current in symbol.namespace_stack:
namespace = namespace.get(current)
if namespace is None or not isinstance(namespace, dict):
break
namespace_stack.append(namespace)
# Iterate through the stack in reverse order. Need to go from
# innermost namespace to outermost.
for namespace in reversed(namespace_stack):
try:
return self._lookup_namespace(symbol, namespace, '')
except Error:
pass
return None
def lookup_symbol(self, name, namespace_stack):
"""Returns AST node and module for symbol if found.
Args:
name: 'name of the symbol to lookup'
namespace_stack: None or ['namespaces', 'in', 'current', 'scope']
Returns:
(ast.Node, module (ie, any object stored with symbol)) if found
Raises:
Error if the symbol cannot be found.
"""
# TODO(nnorwitz): a convenient API for this depends on the
# representation of the name. e.g., does symbol_name contain
# ::, is symbol_name a list of colon separated names, how are
# names prefixed with :: handled. These have different lookup
# semantics (if leading ::) or change the desirable API.
# For now assume that the symbol_name contains :: and parse it.
symbol = Symbol(name, name.split('::'), namespace_stack)
assert symbol.parts
if symbol.parts[0] == '':
# Handle absolute (global) ::symbol_names.
symbol.parts = symbol.parts[1:]
elif namespace_stack is not None:
result = self._lookup_in_all_namespaces(symbol)
if result:
return result
return self._lookup_global(symbol)
def _add(self, symbol_name, namespace, node, module):
"""Helper function for adding symbols.
See add_symbol().
"""
result = symbol_name in namespace
namespace[symbol_name] = node, module
return not result
def add_symbol(self, symbol_name, namespace_stack, node, module):
"""Adds symbol_name defined in namespace_stack to the symbol table.
Args:
symbol_name: 'name of the symbol to lookup'
namespace_stack: None or ['namespaces', 'symbol', 'defined', 'in']
node: ast.Node that defines this symbol
module: module (any object) this symbol is defined in
Returns:
bool(if symbol was *not* already present)
"""
# TODO(nnorwitz): verify symbol_name doesn't contain :: ?
if namespace_stack:
# Handle non-global symbols (ie, in some namespace).
last_namespace = self.namespaces
for namespace in namespace_stack:
last_namespace = last_namespace.setdefault(namespace, {})
else:
last_namespace = self.namespaces[None]
return self._add(symbol_name, last_namespace, node, module)
def get_namespace(self, name_seq):
"""Returns the prefix of names from name_seq that are known namespaces.
Args:
name_seq: ['names', 'of', 'possible', 'namespace', 'to', 'find']
Returns:
['names', 'that', 'are', 'namespaces', 'possibly', 'empty', 'list']
"""
namespaces = self.namespaces
result = []
for name in name_seq:
namespaces = namespaces.get(name)
if not namespaces:
break
result.append(name)
return result
| bsd-3-clause | -7,632,433,835,013,923,000 | 32.696517 | 79 | 0.598996 | false |
emccode/HeliosBurn | heliosburn/django/hbproject/webui/backends.py | 1 | 1440 | from django.conf import settings
from mongoengine.django.auth import User
import requests
import json
class HeliosAuthBackend(object):
"""
Authenticate against the API.
"""
def authenticate(self, username=None, password=None):
payload = {'username': username, 'password': password}
url = '%s/auth/login/' % (settings.API_BASE_URL,)
r = requests.post(url, data=json.dumps(payload))
if r.status_code == requests.codes.ok:
token = r.headers.get('x-auth-token')
if not token:
return None
try:
user = User.objects.get(username=username)
user.password = token
user.save()
except User.DoesNotExist:
# Create a new user. Note that we can set password
# to anything, because it won't be checked; the password
# from settings.py will.
user = User(username=username, password=token)
user.is_staff = True
user.is_superuser = True
user.save()
return user
elif r.status_code >= requests.codes.internal_server_error:
raise Exception('Server error. ' + str(r.status_code))
return None
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None | mit | -4,774,238,198,615,431,000 | 31.022222 | 72 | 0.565278 | false |
dcos/shakedown | tests/acceptance/test_dcos_command.py | 1 | 1176 | from shakedown import *
def test_run_command():
exit_status, output = run_command(master_ip(), 'cat /etc/motd')
assert exit_status
def test_run_command_on_master():
exit_status, output = run_command_on_master('uname -a')
assert exit_status
assert output.startswith('Linux')
def test_run_command_on_leader():
exit_status, output = run_command_on_leader('uname -a')
assert exit_status
assert output.startswith('Linux')
def test_run_command_on_marathon_leader():
exit_status, output = run_command_on_marathon_leader('uname -a')
assert exit_status
assert output.startswith('Linux')
def test_run_command_on_agent():
"""Run 'ps' on all agents looking for jenkins."""
service_ips = get_private_agents() + get_public_agents()
for host in service_ips:
exit_status, output = run_command_on_agent(host, 'ps -eaf | grep -i docker | grep -i jenkins')
assert exit_status
assert len(output) > 0
def test_run_dcos_command():
stdout, stderr, return_code = run_dcos_command('package search jenkins --json')
result_json = json.loads(stdout)
assert result_json['packages'][0]['name'] == 'jenkins'
| apache-2.0 | -1,168,257,401,744,224,500 | 33.588235 | 102 | 0.673469 | false |
commtrack/commtrack-old-to-del | apps/reports/custom/all/domain_summary.py | 1 | 3326 | from django.template.loader import render_to_string
import settings
from xformmanager.models import FormDefModel, Metadata
from receiver.models import Submission, Attachment
def domain_summary(request, domain=None, detail_view=True):
'''Domain Admin Summary Data'''
if not domain:
domain = request.extuser.domain
summary = DomainSummary(domain)
return render_to_string("custom/all/domain_summary.html",
{"MEDIA_URL": settings.MEDIA_URL, # we pretty sneakly have to explicitly pass this
"detail_view": detail_view,
"domain": domain,
"summary": summary})
class DomainSummary(object):
def __init__(self, domain):
self.form_data = []
self.chw_data = []
self.domain = domain
domain_meta = Metadata.objects.filter(formdefmodel__domain=domain)
domain_submits = Submission.objects.filter(domain=domain)
self.name = domain.name
self.submissions = domain_submits.count()
self.attachments = Attachment.objects.filter(submission__domain=domain).count()
self.first_submission = domain_submits.order_by("submit_time")[0].submit_time
self.last_submission = domain_submits.order_by("-submit_time")[0].submit_time
self.full_count = domain_meta.count()
chws = domain_meta.values_list('username', flat=True).distinct()
forms = FormDefModel.objects.filter(domain=domain)
blacklist = domain.get_blacklist()
for form in forms:
form_metas = domain_meta.filter(formdefmodel=form)
self.form_data.append({"form": form,
"count": form_metas.count(),
"first": _get_first_object(form_metas, "timeend", True),
"last": _get_first_object(form_metas, "timeend", False)
})
self.blacklist_count = 0
self.chw_blacklist_count = 0
self.chw_count = 0
for chw in chws:
chw_forms = domain_meta.filter(username=chw)
in_blacklist = chw in blacklist
self.chw_data.append({"name": chw,
"count": chw_forms.count(),
"in_blacklist": in_blacklist,
"first": _get_first_object(chw_forms, "timeend", True),
"last": _get_first_object(chw_forms, "timeend", False)
})
if in_blacklist:
self.chw_blacklist_count += 1
self.blacklist_count += chw_forms.count()
else:
self.chw_count += 1
self.count = self.full_count - self.blacklist_count
def chws(self):
"""Flat list of CHW's found in this domain"""
return self.chw_counts.keys()
def form_count(self):
"""Number of unique formss (types) found in this domain."""
return len(self.form_data)
def _get_first_object(queryset, column_name, first):
sort_str = "" if first else "-"
sorted_qs = queryset.order_by("%s%s" % (sort_str, column_name))
if sorted_qs.count() > 0:
return sorted_qs[0] | bsd-3-clause | -5,803,733,305,685,974,000 | 42.776316 | 110 | 0.553818 | false |
uaprom-summer-2015/Meowth | project/gallery.py | 1 | 1876 | import os
from werkzeug.datastructures import FileStorage
from project.models import UploadedImage
from PIL import Image
from PIL.ExifTags import TAGS
IM_EXTENSIONS = frozenset(['.jpg', '.jpeg', '.gif', '.png'])
def remove_exif_orientation(file_path):
ext = os.path.splitext(file_path)[1].lower()
if ext == '.jpg' or ext == '.jpeg':
img = Image.open(file_path)
exif = img._getexif()
if not exif:
return
orientation = 1
for (k, v) in exif.items():
if TAGS.get(k) == 'Orientation':
orientation = v
if orientation is 6:
img = img.rotate(-90)
elif orientation is 8:
img = img.rotate(90)
elif orientation is 3:
img = img.rotate(180)
elif orientation is 2:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
elif orientation is 5:
img = img.rotate(-90).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation is 7:
img = img.rotate(90).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation is 4:
img = img.rotate(180).transpose(Image.FLIP_LEFT_RIGHT)
img.save(file_path)
def upload_file(file_path):
remove_exif_orientation(file_path)
with open(file_path, 'rb') as fp:
file = FileStorage(fp)
UploadedImage.bl.save_image(
image=file,
img_category=UploadedImage.IMG_CATEGORY.gallery,
do_sync=True,
)
def images(subdir):
for i in os.listdir(subdir):
_, extension = os.path.splitext(i)
if extension.lower() in IM_EXTENSIONS:
yield os.path.join(subdir, i)
def load_images(subdir=None):
if not subdir:
for _ in range(64):
upload_file('testdata/images/face-2.jpg')
else:
for fp in images(subdir):
upload_file(fp)
| bsd-3-clause | -2,487,392,880,310,597,000 | 28.3125 | 66 | 0.58209 | false |
JonnyJD/python-discid | discid/__init__.py | 1 | 2174 | # Copyright (C) 2013 Johannes Dewender
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Please submit bug reports to GitHub:
# https://github.com/JonnyJD/python-discid/issues
"""Python binding of Libdiscid
Libdiscid is a library to calculate MusicBrainz Disc IDs.
This module provides a python-like API for that functionality.
The user is expected to create a :class:`Disc` object
using :func:`read` or :func:`put` and extract the generated information.
Importing this module will open libdiscid at the same time
and will raise :exc:`OSError` when libdiscid is not found.
"""
from discid.disc import read, put, Disc, DiscError, TOCError
from discid.track import Track
from discid.libdiscid import get_default_device
import discid.libdiscid
import discid.disc
__version__ = "1.2.0"
# these contants are defined here so sphinx can catch the "docstrings"
LIBDISCID_VERSION_STRING = discid.libdiscid.LIBDISCID_VERSION_STRING
"""The version string of the loaded libdiscid in the form `libdiscid x.y.z`.
For old versions the string is `libdiscid < 0.4.0`.
"""
FEATURES = discid.libdiscid.FEATURES
"""The features libdiscid supports for the platform as a list of strings.
Some Functions can raise :exc:`NotImplementedError` when a feature
is not available.
Some features might not be implemented in this python module,
see :data:`FEATURES_IMPLEMENTED`.
"""
FEATURES_IMPLEMENTED = discid.disc.FEATURES_IMPLEMENTED
"""The features implemented in this python module as a list of strings.
Some might not be available for your platform, see :data:`FEATURES`.
"""
| lgpl-3.0 | 7,205,535,056,516,717,000 | 37.140351 | 77 | 0.772769 | false |
UManPychron/pychron | pychron/envisage/initialization/initialization_parser.py | 1 | 13078 | # ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# from lxml.etree import Element
from __future__ import absolute_import
from __future__ import print_function
import inspect
# ============= standard library imports ========================
import os
import sys
from pyface.message_dialog import warning
# ============= local library imports ==========================
from pychron.core.helpers.strtools import to_bool
from pychron.core.xml.xml_parser import XMLParser
from pychron.paths import paths
lower = lambda x: x.lower() if x else None
def handle_uncaught_exception(func):
def _handle(*args, **kw):
try:
return func(*args, **kw)
except Exception as e:
import traceback
traceback.print_exc()
warning(None, 'There is a problem in your initialization file {}'.format(e))
sys.exit()
return _handle
def decorate_all(cls):
"""
adds the handle_uncaught_exception decorator to all methods of the class
"""
for name, m in inspect.getmembers(cls, inspect.ismethod):
setattr(cls, name, handle_uncaught_exception(m))
return cls
@decorate_all
class InitializationParser(XMLParser):
"""
"""
def __init__(self, *args, **kw):
ver = '_proc'
# ver = '_valve'
# ver ='_apis'
# ver = '_uv'
ver = '_exp'
# ver = '_exp_uv'
# ver= '_spec'
# ver = '_diode'
# ver = '_dash'
# ver = '_dash_client'
# ver = ''
p = os.path.join(paths.setup_dir, 'initialization{}.xml'.format(ver))
if not os.path.isfile(p):
p = os.path.join(paths.setup_dir, 'initialization.xml')
if not os.path.isfile(p):
warning(None, 'No initialization file.\n{} is not a valid file'.format(p))
sys.exit()
super(InitializationParser, self).__init__(p, *args, **kw)
def verify(self):
return self._syntax_error
def get_globals(self):
tree = self.get_root()
tree = tree.find('globals')
return tree.iter()
def set_bool_tag(self, tag, v):
tree = self.get_root()
tree = tree.find('globals')
elem = tree.find(tag)
if elem is not None:
elem.text = v
else:
tree.append(self.new_element(tag, v))
def add_plugin(self, category, name, save=True, enabled='false'):
tree = self.get_root()
tree = tree.find('plugins')
cat = tree.find(category)
if not cat:
tree.append(self.new_element(category, None))
cat = tree.find(category)
cat.append(self.new_element('plugin', name, enabled=enabled))
if save:
self.save()
def get_plugins(self, category=None, all_=False, element=False):
tree = self.get_root()
tree = tree.find('plugins')
if category:
cat = tree.find(category)
if cat is not None:
plugins = cat.findall('plugin')
else:
try:
plugins = tree.iter(tag='plugin')
except AttributeError:
plugins = tree.getiterator(tag='plugin')
if plugins:
return [p if element else p.text.strip()
for p in plugins if all_ or to_bool(p.get('enabled'))]
# def get_plugins_as_elements(self, category):
# tree = self._tree.find('plugins')
# cat = tree.find(category)
# if cat is not None:
# return cat.findall('plugin')
def get_global(self, tag):
root = self.get_root()
elem = root.find('globals')
if elem is not None:
g = elem.find(tag)
if g is not None:
return g.text.strip()
def get_plugin_groups(self, elem=False):
plugin = self.get_root().find('plugins')
return [t if elem else t.tag for t in list(plugin)]
def get_plugin_group(self, name):
return next((p for p in self.get_plugin_groups(elem=True)
if p.tag == name
), None)
def get_groups(self):
tree = self.get_root()
# root = tree.getroot()
return [t.tag for t in list(tree)]
def get_parameters(self, *args, **kw):
return self._get_parameters(all_=True, *args, **kw)
def get_parameter(self, subtree, name, all_=True, **kw):
pa = self._get_parameters(subtree, name, all_=all_, **kw)
if pa:
return pa[0]
def enable_manager(self, name, parent):
plugin = self.get_plugin(parent)
man = next((m for m in plugin.findall('manager') if m.text.strip() == name), None)
man.set('enabled', 'true')
self.save()
def disable_manager(self, name, parent):
plugin = self.get_plugin(parent)
man = next((m for m in plugin.findall('manager') if m.text.strip() == name), None)
man.set('enabled', 'false')
self.save()
def enable_device(self, name, plugin):
dev = self.get_device(plugin, name, None, element=True)
dev.set('enabled', 'true')
self.save()
def disable_device(self, name, plugin):
dev = self.get_device(plugin, name, None, element=True)
dev.set('enabled', 'false')
self.save()
def enable_plugin(self, name, category=None, save=True):
plugin = self.get_plugin(name, category)
if plugin is None:
self.add_plugin(category, name, save=save, enabled='true')
else:
plugin.set('enabled', 'true')
if save:
self.save()
def disable_plugin(self, name, category=None, save=True):
plugin = self.get_plugin(name, category)
if plugin is not None:
plugin.set('enabled', 'false')
if save:
self.save()
def get_flags(self, manager, **kw):
return self._get_parameters(manager, 'flag', **kw)
def get_timed_flags(self, manager, **kw):
return self._get_parameters(manager, 'timed_flag', **kw)
def get_valve_flags(self, manager, **kw):
return self._get_parameters(manager, 'valve_flag', **kw)
def get_rpc_params(self, manager):
if isinstance(manager, tuple):
manager = self.get_manager(*manager)
text = lambda x: x.text.strip() if x is not None else None
try:
rpc = manager.find('rpc')
mode = rpc.get('mode')
port = text(rpc.find('port'))
host = text(rpc.find('host'))
return mode, host, int(port),
except Exception as e:
pass
return None, None, None
def get_device(self, manager, devname, plugin, element=False):
if plugin:
man = self.get_plugin(plugin)
nman = next((d for d in man.findall('manager')
if d.text.strip() == manager), None)
if nman is not None:
man = nman
else:
man = self.get_plugin(manager)
# print manager, devname, plugin, man.text.strip()
# else:
# man = self.get_manager()
# if plugin is None:
# man = self.get_plugin(manager)
# else:
# man = self.get_manager(manager, plugin)
# if man is None:
# man = self.get_plugin_group(manager)
dev = next((d for d in man.findall('device')
if d.text.strip() == devname), None)
if not element and dev:
dev = dev.text.strip()
return dev
def get_devices(self, manager, **kw):
return self._get_parameters(manager, 'device', **kw)
def get_processor(self, manager, **kw):
p = self._get_parameters(manager, 'processor', **kw)
if p:
return p[0]
def get_processors(self):
# ps = []
# for p in self.get_plugins('Hardware'):
# pp = self.get_processor(p)
# if pp:
# ps.append(pp)
pl = self.get_plugin_group('hardware')
ps = [pi for pi in [self.get_processor(p)
for p in self.get_plugins('hardware', element=True)] if pi]
nps = self._get_parameters(pl, 'processor')
if nps:
ps += nps
return ps
def get_server(self, manager, **kw):
p = self._get_parameters(manager, 'server', **kw)
if p:
return p[0]
def get_servers(self):
servers = [pi for pi in [self.get_server(p)
for p in self.get_plugins('hardware', element=True)] if pi]
h = self.get_plugin_group('hardware')
if h is not None:
hs = self._get_parameters(h, 'server')
if hs:
servers += hs
return servers
def _get_parameters(self, subtree, tag, all_=False, element=False):
if subtree is None:
print(subtree)
return [d if element else d.text.strip()
for d in subtree.findall(tag)
if all_ or to_bool(d.get('enabled'))]
def get_managers(self, elem, all_=False, element=False):
return [m if element else m.text.strip()
for m in elem.findall('manager')
if all_ or to_bool(m.get('enabled'))]
def get_plugin(self, name, category=None):
if '_' in name:
if 'co2' in name:
name = name.split('_')[0].capitalize() + 'CO2'
elif 'uv' in name:
name = name.split('_')[0].capitalize() + 'UV'
else:
name = ''.join([a.capitalize() for a in name.split('_')])
else:
name = name[0].upper() + name[1:]
if not category:
category = self.get_categories()
if not isinstance(category, (list, tuple)):
category = (category, )
for cat in category:
elem = self._get_element(cat, name)
if elem is not None:
return elem
def get_manager(self, name, plugin):
if 'Manager' in plugin:
plugin = plugin.replace('Manager', '')
p = self.get_plugin(plugin)
man = next((pi for pi in p.findall('manager') if pi.text.strip() == name), None)
return man
def get_categories(self):
return ['general', 'data', 'hardware', 'social']
# root = self.get_root()
# tree = root.find('plugins')
# s = lambda x: x.tag
#
# cats = map(s, [c for c in tree.iter(etree.Element)])
# return list(set(cats))
#return map(s, set([c for c in tree.iter()]))
def _get_element(self, category, name, tag='plugin'):
root = self.get_root()
tree = root.find('plugins')
if category is None:
iterator = lambda: tree.iter(tag=tag)
# return next((p for p in tree.iter(tag=tag) if p.text.strip() == name), None)
# for p in tree.iter(tag=tag):
# if p.text.strip() == name:
# return p
else:
cat = tree.find(category)
# print 'asss', category, cat
if cat is not None:
iterator = lambda: cat.findall(tag)
else:
iterator = lambda: ''
# for plugin in cat.findall(tag):
# if plugin.text.strip() == name:
# return plugin
name = name.lower()
# for ii in iterator():
# print ii.text.strip().lower(), name
# if ii.text.strip().lower()==name:
# break
return next((p for p in iterator() if p.text.strip().lower() == name), None)
def get_systems(self):
p = self.get_plugin('ExtractionLine')
if p is not None:
return [(s.text.strip(), s.get('master_host')) for s in p.findall('system')]
return []
# def get_processors(self):
#
# cat = self._tree.find('remotehardware')
# pi = None
# if cat is not None:
# pi = cat.findall('processor')
#
# return [pii.text.strip() for pii in (pi if pi else [])]
# ============= EOF =============================================
| apache-2.0 | 3,117,435,444,442,191,000 | 31.942065 | 97 | 0.523627 | false |
ntduong/data-science-newbie | Articles_Data_Analysis/cluster_articles.py | 1 | 1885 | import numpy as np
import scipy.cluster.hierarchy as hier
import scipy.spatial.distance as dist
import matplotlib.pyplot as plt
import make_data
from transform import count_transform
from cluster_algos import kmeans, hcluster
def clustering(X, labels, algo='hcluster', n_clusters=5, figname='cluster_result.png'):
""" Clustering data.
Params:
X: ndarray of n x d size (n samples, d features)
labels: labels of samples, for visualizing result.
algo: specify clustering algorithms, e.g., "hcluster", "kmeans"
n_clusters: #.of.cluster in case of kmeans
figname: file name to save figure
"""
assert algo in ['hcluster', 'kmeans'], "Invalid algorithm!"
if algo == 'hcluster':
linkage_mat = hcluster(X, metric='correlation', method='average')
fig = plt.figure(figsize=(30,20), dpi=100)
fig.clf()
hier.dendrogram(linkage_mat, labels=labels, leaf_rotation=90, leaf_font_size=20)
plt.savefig(figname)
else:
labels = np.asarray(labels)
result = kmeans(X, n_clusters=n_clusters)
for cid in xrange(n_clusters):
print 'Cluster %d:' %(cid+1)
for a in labels[result == cid]:
print a.encode('utf-8')
print '-'*30
def main(url_file, use_tfidf=True):
word_cnt, sites, site_urls = make_data.get_sites_words(url_file)
sw_mat, word_list = make_data.make_site_by_word_mat(word_cnt, sites, freq=5, percent=0.7)
X = sw_mat
if use_tfidf:
X = count_transform(sw_mat)
labels = ['Normal Deviate', 'MLTheory', 'CNET', 'BBC', 'CNN', 'JP', 'CNN-Tech', 'TechReview', 'NYT-Tech', 'Time-World', 'Mark-Reid']
clustering(X, labels, algo='hcluster', figname='hcluster_site_by_word_tfidf.png')
if __name__ == '__main__':
main('txt/urls.txt', use_tfidf=True) | mit | 4,438,244,582,114,250,000 | 39.12766 | 136 | 0.620159 | false |
bitmazk/django-generic-positions | generic_positions/tests/models_tests.py | 1 | 1512 | """Tests for the models of the ``generic_positions`` app."""
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from mixer.backend.django import mixer
from ..models import ObjectPosition, save_positions
from .test_app.models import DummyModel
class ObjectPositionTestCase(TestCase):
"""Tests for the ``ObjectPosition`` model class."""
longMessage = True
def test_instantiation(self):
"""Test instatiation of the ``ObjectPosition`` model."""
object_position = ObjectPosition()
self.assertTrue(object_position)
def test_save_positions_function(self):
"""Test the ``save_positions`` function."""
object_position = mixer.blend(
'generic_positions.ObjectPosition',
content_type=ContentType.objects.get_for_model(DummyModel))
object_position2 = mixer.blend(
'generic_positions.ObjectPosition',
content_type=ContentType.objects.get_for_model(DummyModel))
post_data = {
'position-{0}'.format(object_position.id): '2',
'position-invalid': '2',
}
save_positions(post_data)
# The obj in our dict should be updated...
self.assertEqual(
ObjectPosition.objects.get(id=object_position.id).position, 2)
# ..the other one should remain the same.
self.assertEqual(
ObjectPosition.objects.get(id=object_position2.id).position,
object_position2.position)
| mit | 2,228,287,124,904,203,000 | 35.878049 | 74 | 0.65873 | false |
margamanterola/Cinnamon | files/usr/share/cinnamon/cinnamon-settings/modules/cs_mouse.py | 1 | 6281 | #!/usr/bin/env python2
from gi.repository import Gtk, Gdk, GLib
from SettingsWidgets import *
class Module:
comment = _("Control mouse and touchpad settings")
name = "mouse"
category = "hardware"
def __init__(self, content_box):
keywords = _("mouse, touchpad, synaptic, double-click")
sidePage = SidePage(_("Mouse and Touchpad"), "cs-mouse", keywords, content_box, module=self)
self.sidePage = sidePage
def on_module_selected(self):
if not self.loaded:
print "Loading Mouse module"
self.sidePage.stack = SettingsStack()
self.sidePage.add_widget(self.sidePage.stack)
# Mouse
page = SettingsPage()
settings = page.add_section(_("General"))
switch = GSettingsSwitch(_("Left handed (mouse buttons inverted)"), "org.cinnamon.settings-daemon.peripherals.mouse", "left-handed")
settings.add_row(switch)
switch = GSettingsSwitch(_("Show position of pointer when the Control key is pressed"), "org.cinnamon.settings-daemon.peripherals.mouse", "locate-pointer")
settings.add_row(switch)
switch = GSettingsSwitch(_("Emulate middle click by clicking both left and right buttons"), "org.cinnamon.settings-daemon.peripherals.mouse", "middle-button-enabled")
settings.add_row(switch)
spin = GSettingsSpinButton(_("Drag-and-drop threshold"), "org.cinnamon.settings-daemon.peripherals.mouse", "drag-threshold", _("pixels"), 1, 400)
settings.add_row(spin)
settings = page.add_section(_("Pointer size and speed"))
widget = GSettingsRange(_("Size"), "org.cinnamon.desktop.interface", "cursor-size", _("Smaller"), _("Larger"), 5, 50)
widget.add_mark(24.0, Gtk.PositionType.TOP, None)
settings.add_row(widget)
slider = GSettingsRange(_("Acceleration"), "org.cinnamon.settings-daemon.peripherals.mouse", "motion-acceleration", _("Slow"), _("Fast"), 1, 10)
settings.add_row(slider)
slider = GSettingsRange(_("Sensitivity"), "org.cinnamon.settings-daemon.peripherals.mouse", "motion-threshold", _("Low"), _("High"), 1, 10, invert=True)
settings.add_row(slider)
settings = page.add_section(_("Double-Click timeout"))
slider = GSettingsRange(_("Timeout"), "org.cinnamon.settings-daemon.peripherals.mouse", "double-click", _("Short"), _("Long"), 100, 1000)
settings.add_row(slider)
box = SettingsWidget()
widget = Gtk.Button.new_with_label(_("Double-click test"))
widget.connect("button-press-event", self.test_button_clicked)
box.pack_start(widget, True, True, 0)
settings.add_row(box)
self.sidePage.stack.add_titled(page, "mouse", _("Mouse"))
# Touchpad
page = SettingsPage()
switch = GSettingsSwitch("", "org.cinnamon.settings-daemon.peripherals.touchpad", "touchpad-enabled")
switch.label.set_markup("<b>%s</b>" % _("Enable touchpad"))
switch.fill_row()
page.pack_start(switch, False, True, 0)
revealer = SettingsRevealer("org.cinnamon.settings-daemon.peripherals.touchpad", "touchpad-enabled")
page.pack_start(revealer, False, True, 0)
settings = SettingsBox(_("General"))
revealer.add(settings)
switch = GSettingsSwitch(_("Tap to click"), "org.cinnamon.settings-daemon.peripherals.touchpad", "tap-to-click")
settings.add_row(switch)
switch = GSettingsSwitch(_("Disable touchpad while typing"), "org.cinnamon.settings-daemon.peripherals.touchpad", "disable-while-typing")
settings.add_row(switch)
button_list = [[0, _("Disabled")], [1, _("Left button")], [2, _("Middle button")], [3, _("Right button")]]
combo = GSettingsComboBox(_("Two-finger click emulation:"), "org.cinnamon.settings-daemon.peripherals.touchpad", "two-finger-click", button_list, valtype="int")
settings.add_row(combo)
combo = GSettingsComboBox(_("Three-finger click emulation:"), "org.cinnamon.settings-daemon.peripherals.touchpad", "three-finger-click", button_list, valtype="int")
settings.add_row(combo)
settings = SettingsBox(_("Scrolling"))
revealer.add(settings)
switch = GSettingsSwitch(_("Reverse scrolling direction"), "org.cinnamon.settings-daemon.peripherals.touchpad", "natural-scroll")
settings.add_row(switch)
switch = GSettingsSwitch(_("Vertical edge scrolling"), "org.cinnamon.settings-daemon.peripherals.touchpad", "vertical-edge-scrolling")
settings.add_row(switch)
switch = GSettingsSwitch(_("Horizontal edge scrolling"), "org.cinnamon.settings-daemon.peripherals.touchpad", "horizontal-edge-scrolling")
settings.add_row(switch)
switch = GSettingsSwitch(_("Vertical two-finger scrolling"), "org.cinnamon.settings-daemon.peripherals.touchpad", "vertical-two-finger-scrolling")
settings.add_row(switch)
switch = GSettingsSwitch(_("Horizontal two-finger scrolling"), "org.cinnamon.settings-daemon.peripherals.touchpad", "horizontal-two-finger-scrolling")
settings.add_row(switch)
settings = SettingsBox(_("Pointer speed"))
revealer.add(settings)
slider = GSettingsRange(_("Acceleration"), "org.cinnamon.settings-daemon.peripherals.touchpad", "motion-acceleration", _("Slow"), _("Fast"), 1, 10)
settings.add_row(slider)
slider = GSettingsRange(_("Sensitivity"), "org.cinnamon.settings-daemon.peripherals.touchpad", "motion-threshold", _("Low"), _("High"), 1, 10)
settings.add_row(slider)
self.sidePage.stack.add_titled(page, "touchpad", _("Touchpad"))
def test_button_clicked(self, widget, event):
if event.type == Gdk.EventType._2BUTTON_PRESS:
widget.set_label(_("Success!"))
GLib.timeout_add(1000, self.reset_test_button, widget)
return True
def reset_test_button(self, widget):
widget.set_label(_("Double-click test"))
return False
| gpl-2.0 | -8,442,599,125,222,118,000 | 47.689922 | 178 | 0.631428 | false |
dannybrowne86/django-avatar | setup.py | 2 | 1958 | import codecs
import re
from os import path
from setuptools import setup, find_packages
def read(*parts):
filename = path.join(path.dirname(__file__), *parts)
with codecs.open(filename, encoding='utf-8') as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name='django-avatar',
version=find_version("avatar", "__init__.py"),
description="A Django app for handling user avatars",
long_description=read('README.rst'),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
keywords='avatar, django',
author='Eric Florenzano',
author_email='[email protected]',
maintainer='Grant McConnaughey',
maintainer_email='[email protected]',
url='http://github.com/grantmcconnaughey/django-avatar/',
license='BSD',
packages=find_packages(exclude=['tests']),
package_data={
'avatar': [
'templates/notification/*/*.*',
'templates/avatar/*.html',
'locale/*/LC_MESSAGES/*',
'media/avatar/img/default.jpg',
],
},
install_requires=[
'Pillow>=2.0',
'django-appconf>=0.6',
],
zip_safe=False,
)
| bsd-3-clause | 9,097,149,291,649,415,000 | 31.098361 | 68 | 0.589888 | false |
garbear/EventGhost | eg/Classes/MacroSelectButton.py | 1 | 2415 | # This file is part of EventGhost.
# Copyright (C) 2005 Lars-Peter Voss <[email protected]>
#
# EventGhost is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# EventGhost is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EventGhost; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import eg
import wx
class MacroSelectButton(wx.Window):
def __init__(self, parent, label, title, mesg, macro=None):
if macro is None:
macroName = ""
else:
macroName = macro.name
self.title = title
self.mesg = mesg
self.macro = macro
wx.Window.__init__(self, parent, -1)
self.textBox = eg.StaticTextBox(self, -1, macroName, size=(200, -1))
self.button = wx.Button(self, -1, label)
self.Bind(wx.EVT_BUTTON, self.OnButton)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self.textBox, 1, wx.EXPAND|wx.ALIGN_CENTER_VERTICAL)
sizer.Add(self.button, 0, wx.LEFT, 5)
self.SetSizer(sizer)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus)
self.Layout()
def OnSetFocus(self, dummyEvent):
self.button.SetFocus()
def OnSize(self, dummyEvent):
if self.GetAutoLayout():
self.Layout()
@eg.AsTasklet
def OnButton(self, dummyEvent):
result = eg.TreeItemBrowseDialog.GetModalResult(
self.title,
self.mesg,
self.macro,
(eg.MacroItem,),
parent=self
)
if result:
macro = result[0]
self.textBox.SetLabel(macro.name)
self.macro = macro
self.ProcessEvent(
wx.CommandEvent(wx.EVT_TEXT.evtType[0], self.GetId())
)
def GetValue(self):
return self.macro
| gpl-2.0 | 1,440,151,838,803,465,500 | 29.776316 | 76 | 0.609938 | false |
vprusso/youtube_tutorials | natural_language_processing/nlp_1.py | 1 | 3973 | # LucidProgramming -- Natural Language Processing in Python: Part 1
# YouTube Video: https://www.youtube.com/watch?v=tP783g97C5o
# Prior to running this script, you will require Python to be installed on
# your machine. If so, you may run the following command via pip:
# pip install nltk
# Once installed, you should be able to follow along with the
# remainder of this script.
import nltk
# Run this command to download all collections to be used for the NLP tutorials:
nltk.download()
# Now that we've downloaded all the NLTK corpus content, let us go ahead and
# load in the text from Lewis Carroll's "Alice in Wonderland" via Gutenberg:
from nltk.text import Text
alice = Text(nltk.corpus.gutenberg.words('carroll-alice.txt'))
# NLTK also provides other texts from Gutenberg. We can view those by
# running the following command:
print(nltk.corpus.gutenberg.fileids())
# There are many more text data sets provided by NLTK. For now, we will
# just focus on what types of analysis tools NLTK provides to us on the
# text "Alice in Wonderland" by Lewis Carroll:
# Word Count: How many words are contained in "Alice in Wonderland"?
# Note that this includes punctuation as well as traditional words.
print(type(alice))
print(len(alice))
# Unique Word Count: How many unique words are present in
# "Alice in Wonderland"? For instance, the above line would
# count the word "the" on each occurrence.
print(len(set(alice)))
# Specific Word Count: How many times does a specific word occur
# in a text?
print(alice.count("Alice"))
# Concordance: Shows occurence of word in context of use.
# We can check where the term "alice" appears in "Alice in Wonderland".
alice.concordance("Alice")
# Dispersion Plot: Location of where a word is in the text.
# Example:
# Give a visual representation of where the words "Alice", "Rabbit",
# "Hatter", and "Queen" appear in "Alice in Wonderland".
alice.dispersion_plot(["Alice", "Rabbit", "Hatter", "Queen"])
# The word "Alice" is consistently used throughout the entire text, while
# the word "Queen" is found closer to the end of the text. This makes sense,
# since Alice does not encounter the Red Queen until later in the book.
# Frequency Distributions: What are the most frequent words (specifically,
# tokens), that are used in a given text.
# Example:
# Generate the most frequent tokens in "Alice in Wonderland":
# First, use NLTK to generate a frequncy distribution dictionary-like object.
fdist = nltk.FreqDist(alice)
# What are the top 50 most common words in "Alice in Wonderland"?
fdist.plot(50, cumulative=True, title="50 most common tokens in Alice in Wonderland")
# Observe that the x-axis consists of punctuation, which may not
# be precisely what we are going for. It is possible to remove this
# from the words that we plot by filtering out the punctuation.
fdist_no_punc = nltk.FreqDist(
dict((word, freq) for word, freq in fdist.items() if word.isalpha()))
fdist_no_punc.plot(50,
cumulative=True,
title="50 most common tokens (no punctuation)")
# This plot gives us a bit more useful information, but it still contains an
# awful lot of punctuation that we do not particularly care to see. In a
# similar fashion, we may filter this out.
# We may not obtain too much information on the above plot, since
# many of the words on the x-axis are words like "and", "the", "in",
# etc. These types of common English words are referred to as
# stopwords. NLTK provides a method to identify such words.
stopwords = nltk.corpus.stopwords.words('english')
fdist_no_punc_no_stopwords = nltk.FreqDist(
dict((word, freq) for word, freq in fdist.items() if word not in stopwords and word.isalpha()))
# Replot fdist after stopwords filtered out.
fdist_no_punc_no_stopwords.plot(50,
cumulative=True,
title="50 most common tokens (no stopwords or punctuation)")
| gpl-3.0 | -2,300,163,772,194,960,600 | 41.265957 | 103 | 0.726655 | false |
markgw/jazzparser | lib/nltk/corpus/__init__.py | 1 | 10492 | # Natural Language Toolkit: Corpus Readers
#
# Copyright (C) 2001-2010 NLTK Project
# Author: Edward Loper <[email protected]>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
# [xx] this docstring isnt' up-to-date!
"""
NLTK corpus readers. The modules in this package provide functions
that can be used to read corpus files in a variety of formats. These
functions can be used to read both the corpus files that are
distributed in the NLTK corpus package, and corpus files that are part
of external corpora.
Available Corpora
=================
Please see http://nltk.googlecode.com/svn/trunk/nltk_data/index.xml
for a complete list. Install corpora using nltk.download().
Corpus Reader Functions
=======================
Each corpus module defines one or more X{corpus reader functions},
which can be used to read documents from that corpus. These functions
take an argument, C{item}, which is used to indicate which document
should be read from the corpus:
- If C{item} is one of the unique identifiers listed in the corpus
module's C{items} variable, then the corresponding document will
be loaded from the NLTK corpus package.
- If C{item} is a filename, then that file will be read.
Additionally, corpus reader functions can be given lists of item
names; in which case, they will return a concatenation of the
corresponding documents.
Corpus reader functions are named based on the type of information
they return. Some common examples, and their return types, are:
- I{corpus}.words(): list of str
- I{corpus}.sents(): list of (list of str)
- I{corpus}.paras(): list of (list of (list of str))
- I{corpus}.tagged_words(): list of (str,str) tuple
- I{corpus}.tagged_sents(): list of (list of (str,str))
- I{corpus}.tagged_paras(): list of (list of (list of (str,str)))
- I{corpus}.chunked_sents(): list of (Tree w/ (str,str) leaves)
- I{corpus}.parsed_sents(): list of (Tree with str leaves)
- I{corpus}.parsed_paras(): list of (list of (Tree with str leaves))
- I{corpus}.xml(): A single xml ElementTree
- I{corpus}.raw(): unprocessed corpus contents
For example, to read a list of the words in the Brown Corpus, use
C{nltk.corpus.brown.words()}:
>>> from nltk.corpus import brown
>>> print brown.words()
['The', 'Fulton', 'County', 'Grand', 'Jury', 'said', ...]
Corpus Metadata
===============
Metadata about the NLTK corpora, and their individual documents, is
stored using U{Open Language Archives Community (OLAC)
<http://www.language-archives.org/>} metadata records. These records
can be accessed using C{nltk.corpus.I{corpus}.olac()}.
"""
import re
from nltk.tokenize import RegexpTokenizer
from nltk.tag import simplify_brown_tag, simplify_wsj_tag,\
simplify_alpino_tag, simplify_indian_tag,\
simplify_tag
from util import LazyCorpusLoader
from reader import *
abc = LazyCorpusLoader(
'abc', PlaintextCorpusReader, r'(?!\.).*\.txt')
alpino = LazyCorpusLoader(
'alpino', AlpinoCorpusReader, tag_mapping_function=simplify_alpino_tag)
brown = LazyCorpusLoader(
'brown', CategorizedTaggedCorpusReader, r'c[a-z]\d\d',
cat_file='cats.txt', tag_mapping_function=simplify_brown_tag)
cess_cat = LazyCorpusLoader(
'cess_cat', BracketParseCorpusReader, r'(?!\.).*\.tbf',
tag_mapping_function=simplify_tag)
cess_esp = LazyCorpusLoader(
'cess_esp', BracketParseCorpusReader, r'(?!\.).*\.tbf',
tag_mapping_function=simplify_tag)
cmudict = LazyCorpusLoader(
'cmudict', CMUDictCorpusReader, ['cmudict'])
comtrans = LazyCorpusLoader(
'comtrans', AlignedCorpusReader, r'(?!README|\.).*')
conll2000 = LazyCorpusLoader(
'conll2000', ConllChunkCorpusReader,
['train.txt', 'test.txt'], ('NP','VP','PP'))
conll2002 = LazyCorpusLoader(
'conll2002', ConllChunkCorpusReader, '.*\.(test|train).*',
('LOC', 'PER', 'ORG', 'MISC'), encoding='utf-8')
conll2007 = LazyCorpusLoader(
'conll2007', DependencyCorpusReader, '.*\.(test|train).*',
encoding='utf-8')
dependency_treebank = LazyCorpusLoader(
'dependency_treebank', DependencyCorpusReader, '.*\.dp')
floresta = LazyCorpusLoader(
'floresta', BracketParseCorpusReader, r'(?!\.).*\.ptb', '#',
tag_mapping_function=simplify_tag)
gazetteers = LazyCorpusLoader(
'gazetteers', WordListCorpusReader, r'(?!LICENSE|\.).*\.txt')
genesis = LazyCorpusLoader(
'genesis', PlaintextCorpusReader, r'(?!\.).*\.txt', encoding=[
('finnish|french|german', 'latin_1'),
('swedish', 'cp865'),
('.*', 'utf_8')])
gutenberg = LazyCorpusLoader(
'gutenberg', PlaintextCorpusReader, r'(?!\.).*\.txt')
# corpus not available with NLTK; these lines caused help(nltk.corpus) to break
#hebrew_treebank = LazyCorpusLoader(
# 'hebrew_treebank', BracketParseCorpusReader, r'.*\.txt')
ieer = LazyCorpusLoader(
'ieer', IEERCorpusReader, r'(?!README|\.).*')
inaugural = LazyCorpusLoader(
'inaugural', PlaintextCorpusReader, r'(?!\.).*\.txt')
# [XX] This should probably just use TaggedCorpusReader:
indian = LazyCorpusLoader(
'indian', IndianCorpusReader, r'(?!\.).*\.pos',
tag_mapping_function=simplify_indian_tag)
ipipan = LazyCorpusLoader(
'ipipan', IPIPANCorpusReader, r'(?!\.).*morph\.xml')
jeita = LazyCorpusLoader(
'jeita', ChasenCorpusReader, r'.*\.chasen', encoding='utf-8')
knbc = LazyCorpusLoader(
'knbc/corpus1', KNBCorpusReader, r'.*/KN.*', encoding='euc-jp')
mac_morpho = LazyCorpusLoader(
'mac_morpho', MacMorphoCorpusReader, r'(?!\.).*\.txt',
tag_mapping_function=simplify_tag, encoding='latin-1')
machado = LazyCorpusLoader(
'machado', PortugueseCategorizedPlaintextCorpusReader,
r'(?!\.).*\.txt', cat_pattern=r'([a-z]*)/.*', encoding='latin-1')
movie_reviews = LazyCorpusLoader(
'movie_reviews', CategorizedPlaintextCorpusReader,
r'(?!\.).*\.txt', cat_pattern=r'(neg|pos)/.*')
names = LazyCorpusLoader(
'names', WordListCorpusReader, r'(?!\.).*\.txt')
nps_chat = LazyCorpusLoader(
'nps_chat', NPSChatCorpusReader, r'(?!README|\.).*\.xml',
tag_mapping_function=simplify_wsj_tag)
pl196x = LazyCorpusLoader(
'pl196x', Pl196xCorpusReader, r'[a-z]-.*\.xml',
cat_file='cats.txt', textid_file='textids.txt')
ppattach = LazyCorpusLoader(
'ppattach', PPAttachmentCorpusReader, ['training', 'test', 'devset'])
# ptb = LazyCorpusLoader( # Penn Treebank v3: WSJ and Brown portions
# 'ptb3', CategorizedBracketParseCorpusReader, r'(WSJ/\d\d/WSJ_\d\d|BROWN/C[A-Z]/C[A-Z])\d\d.MRG',
# cat_file='allcats.txt', tag_mapping_function=simplify_wsj_tag)
qc = LazyCorpusLoader(
'qc', StringCategoryCorpusReader, ['train.txt', 'test.txt'])
reuters = LazyCorpusLoader(
'reuters', CategorizedPlaintextCorpusReader, '(training|test).*',
cat_file='cats.txt')
rte = LazyCorpusLoader(
'rte', RTECorpusReader, r'(?!\.).*\.xml')
semcor = LazyCorpusLoader(
'semcor', XMLCorpusReader, r'brown./tagfiles/br-.*\.xml')
senseval = LazyCorpusLoader(
'senseval', SensevalCorpusReader, r'(?!\.).*\.pos')
shakespeare = LazyCorpusLoader(
'shakespeare', XMLCorpusReader, r'(?!\.).*\.xml')
sinica_treebank = LazyCorpusLoader(
'sinica_treebank', SinicaTreebankCorpusReader, ['parsed'],
tag_mapping_function=simplify_tag)
state_union = LazyCorpusLoader(
'state_union', PlaintextCorpusReader, r'(?!\.).*\.txt')
stopwords = LazyCorpusLoader(
'stopwords', WordListCorpusReader, r'(?!README|\.).*')
swadesh = LazyCorpusLoader(
'swadesh', SwadeshCorpusReader, r'(?!README|\.).*')
switchboard = LazyCorpusLoader(
'switchboard', SwitchboardCorpusReader)
timit = LazyCorpusLoader(
'timit', TimitCorpusReader)
toolbox = LazyCorpusLoader(
'toolbox', ToolboxCorpusReader, r'(?!.*(README|\.)).*\.(dic|txt)')
treebank = LazyCorpusLoader(
'treebank/combined', BracketParseCorpusReader, r'wsj_.*\.mrg',
tag_mapping_function=simplify_wsj_tag)
treebank_chunk = LazyCorpusLoader(
'treebank/tagged', ChunkedCorpusReader, r'wsj_.*\.pos',
sent_tokenizer=RegexpTokenizer(r'(?<=/\.)\s*(?![^\[]*\])', gaps=True),
para_block_reader=tagged_treebank_para_block_reader)
treebank_raw = LazyCorpusLoader(
'treebank/raw', PlaintextCorpusReader, r'wsj_.*')
udhr = LazyCorpusLoader(
'udhr', PlaintextCorpusReader, r'(?!README|\.).*',
# Encodings specified in filenames but not mapped to anything:
# DallakHelv, VIQR, Cyrillic+Abkh, WinResearcher, font,
# Afenegus6..60375, VG2Main, VPS, Turkish, TCVN, Az.Times.Lat0117,
# EUC, Baltic, err, Az.Times.Cyr.Normal0117, T61, Amahuaca, Agra
encoding=[('.*-UTF8$', 'utf-8'), ('.*-Latin1$', 'latin-1'),
('.*-Hebrew$', 'hebrew'), ('.*-Arabic$', 'arabic'),
('.*-Cyrillic$', 'cyrillic'), ('.*-SJIS$', 'SJIS'),
('.*-GB2312$', 'GB2312'), ('.*-Latin2$', 'ISO-8859-2'),
('.*-Greek$', 'greek'), ('.*-UFT8$', 'utf-8'),
('Hungarian_Magyar-Unicode', 'utf-16-le')]
)
verbnet = LazyCorpusLoader(
'verbnet', VerbnetCorpusReader, r'(?!\.).*\.xml')
webtext = LazyCorpusLoader(
'webtext', PlaintextCorpusReader, r'(?!README|\.).*\.txt')
wordnet = LazyCorpusLoader(
'wordnet', WordNetCorpusReader)
wordnet_ic = LazyCorpusLoader(
'wordnet_ic', WordNetICCorpusReader, '.*\.dat')
words = LazyCorpusLoader(
'words', WordListCorpusReader, r'(?!README|\.).*')
ycoe = LazyCorpusLoader(
'ycoe', YCOECorpusReader)
# defined after treebank
propbank = LazyCorpusLoader(
'propbank', PropbankCorpusReader,
'prop.txt', 'frames/.*\.xml', 'verbs.txt',
lambda filename: re.sub(r'^wsj/\d\d/', '', filename),
treebank) # Must be defined *after* treebank corpus.
nombank = LazyCorpusLoader(
'nombank.1.0', NombankCorpusReader,
'nombank.1.0', 'frames/.*\.xml', 'nombank.1.0.words',
lambda filename: re.sub(r'^wsj/\d\d/', '', filename),
treebank) # Must be defined *after* treebank corpus.
def demo():
# This is out-of-date:
abc.demo()
brown.demo()
# chat80.demo()
cmudict.demo()
conll2000.demo()
conll2002.demo()
genesis.demo()
gutenberg.demo()
ieer.demo()
inaugural.demo()
indian.demo()
names.demo()
ppattach.demo()
senseval.demo()
shakespeare.demo()
sinica_treebank.demo()
state_union.demo()
stopwords.demo()
timit.demo()
toolbox.demo()
treebank.demo()
udhr.demo()
webtext.demo()
words.demo()
# ycoe.demo()
if __name__ == '__main__':
#demo()
pass
| gpl-3.0 | -5,547,489,834,691,697,000 | 38.893536 | 101 | 0.670416 | false |
cpitclaudel/dBoost | graphics/scalability.pdf.py | 1 | 2629 | #!/usr/bin/env python3
from utils import filename, save2pdf, setup, rcparams, to_inches
from utils.plots_helper import sensors
import matplotlib
from matplotlib import pyplot
from matplotlib.backends.backend_pdf import PdfPages
import itertools
matplotlib.rcParams['text.latex.preamble'] = [r"\usepackage{siunitx}"]
make,fname = filename("scalability.pdf")
INTEL_TOTAL = 2313153
# labels: vary train size + algo type
# x: vary test size
# y: runtime in s
trs = [1000,100000]#,2313153]
tes = [INTEL_TOTAL]
tes_h = 2000000
_trs = ["1K","100K"]#,2313153]
_tes = [5000000,1000000,15000000,20000000]
#_tes = [0.100,1.000,10.000,100.000,1000.000,2313.153]
#es = ["1_gaussian1.5","0.7_mixture1_0.075","0.7_mixture2_0.075"]
es = [
[1,"gaussian",1.5],
[0.7,"mixture1",0.1],
[0.7,"mixture2",0.05],
[0.7,"histogram"]
]
# build data
results = {}
vals = {}
for (tr,te,e) in itertools.product(trs,tes,es):
if (e[1],tr) not in results:
results[(e[1],tr)] = []
vals[(e[1],tr)] = []
if e[1] == "gaussian":
ofile = "../results/sensors_{}_stat{}_{}{}.out".format(tr,*e)
elif e[1] == "histogram":
ofile = "../results/csail/csail-timings-{}-{}.txt".format(tr,tes_h)
else:
ofile = "../results/sensors_{}_stat{}_{}_{}.out".format(tr,*e)
with open(ofile,'r') as f:
for line in f:
line = line.strip().split()
if line[0] == "Time":
#print("{} {} {}: {}".format(tr,e[1],float(line[1]),float(line[2])))
vals[(e[1],tr)].append(float(line[1]))
results[(e[1],tr)].append(float(line[2]))
if line[0] == "Runtime":
#print("{} {} {}: {}".format(tr,te,e[1],float(line[1])))
vals[(e[1],tr)].append(te)
results[(e[1],tr)].append(float(line[1]))
continue
#print(results)
pdf = PdfPages(fname)
setup()
rcparams()
pyplot.gcf().set_size_inches(to_inches(240), to_inches(240)) # full column size is 240pt
ax = pyplot.gca()
ax.set_title("Scalability")
ax.set_xlabel("Test set size")
ax.set_ylabel("Runtime (s)")
lines = ["-","--"]
linecycler = itertools.cycle(lines)
ax.set_color_cycle(['g','g','r','r','b','b','m','m'])
ax.set_xlim([0,2000000])
for (e,(tr,_tr)) in itertools.product(es,zip(trs,_trs)):
#vals[(e[1],tr)] = [val/1000 for val in vals[(e[1],tr)]]
ax.plot(vals[(e[1],tr)],results[(e[1],tr)],next(linecycler),label = "{}, {}".format(e[1].capitalize(),_tr))#,marker='x',markersize=2.0)
ax.set_xticklabels(['0','0.5M','1M','1.5M','2.0M'])
ax.legend(loc=2,handlelength=3,prop={'size':6})
save2pdf(pdf)
pdf.close()
| gpl-3.0 | 3,273,065,536,939,600,400 | 32.705128 | 139 | 0.58197 | false |
jaygoswami2303/course_dashboard_api | v2/DiscussionAPI/permissions.py | 1 | 1727 | """
Permissions classes for Discussion-API views.
"""
from rest_framework import permissions
from django.http import HttpResponse
import MySQLdb
from course_dashboard_api.v2.dbv import *
sql_user = MYSQL_USER
sql_pswd = MYSQL_PSWD
mysql_db = MYSQL_DB
class IsStudent(permissions.BasePermission):
"""
Grants access if the requested id is of the requesting user or if the requesting user is a superuser.
"""
def has_permission(self, request, view):
list = request.META['PATH_INFO'].split("/")
id = list[len(list)-2]
return request.user.is_superuser or request.user.id == int(id)
class IsFaculty(permissions.BasePermission):
"""
Grants access if the requesting user is the faculty of the requested course or if the requesting user is a superuser.
"""
def has_permission(self, request, view):
try:
db_mysql = MySQLdb.connect(user=sql_user, passwd=sql_pswd, db=mysql_db) # Establishing MySQL connection
except:
print "MySQL connection not established"
return HttpResponse("MySQL connection not established") # MySQL could not be connected
query = "select * from student_courseaccessrole where binary course_id = %s and role = 'instructor' and user_id=%s"
list = request.META['PATH_INFO'].split("/")
id = list[len(list) - 2]
course_id = "course-v1:" + id
user_id = request.user.id
mysql_cursor = db_mysql.cursor()
mysql_cursor.execute(query, (str(course_id), str(user_id), ))
entry = mysql_cursor.fetchone()
permission = True
if entry is None:
permission = False
return request.user.is_superuser or permission
| mit | 3,944,675,056,809,785,300 | 32.211538 | 124 | 0.658367 | false |
JaneliaSciComp/Ax | ax1.py | 1 | 6102 | #!/home/arthurb/bin/anaconda/bin/python
# python ax1.py params_file FILEIN FILEOUT
# python ax1.py params_file FILEIN FILEOUT START STOP
# python ax1.py FS NFFT NW K PVAL FILEIN FILEOUT
# python ax1.py FS NFFT NW K PVAL FILEIN FILEOUT START STOP
#
# analyze a set of time series with multi-taper spectral analysis and
# create a sparse matrix of just the time-frequency pixels whose F-test
# passes PVAL.
#
# typical usage consists of one or more input files being analyzed by one
# or more parameter sets. for example, four microphone recordings of the
# same vocalizing mouse analyzed with three different NFFTs and the same
# NW, K, and PVAL. <filename>.ch[1-4] yield <filename>-[1-3].ax
#
# FS: sampling rate in Hertz
# NFFT: FFT window size in seconds, rounds up to the next power of 2 tics
# NW: multi-taper time-bandwidth product
# K: number of tapers
# PVAL: F-test p-val threshold
# FILEIN: the base filename and path of [0-9].wav files with a single channel each,
# or .ch[0-9] files containing float32s
# FILEOUT: an integer to append to FILEIN to differentiate parameter sets used
# START,STOP: optional time range, in seconds
#
# output is a binary file with a time x frequency x amplitude x channel
# array of hot pixels
#
# python ax1.py 'ultrasonic_params.txt' 'urine' '1'
# python ax1.py 200e3 0.001 15 29 0.01 'urine' '1'
# python ax1.py 450450 0.001 15 29 0.01 0 30 'groundtruth' '1'
# /home/arthurb/bin/anaconda/bin/kernprof.py -l -v ax1.py 450450 0.00025 22 43 0.01 /groups/egnor/egnorlab/ben/Test_D_1 7 0 4
from ax1b import do_it, nextpow2
import struct
import time
import numpy as np
import glob
import sys
import os
from multiprocessing import Pool, cpu_count
#import pdb
import math
from scipy import stats
import pyfftw
from dpss import dpss
import wave
if __name__ == "__main__":
if (len(sys.argv)!=4) and (len(sys.argv)!=6) and (len(sys.argv)!=8) and (len(sys.argv)!=10):
print('invalid args')
exit
tstart=time.time()
if (len(sys.argv)<8):
execfile(sys.argv[1])
FILEIN=sys.argv[2]
FILEOUT=sys.argv[3]
else:
FS=sys.argv[1]
NFFT=sys.argv[2]
NW=sys.argv[3]
K=sys.argv[4]
PVAL=sys.argv[5]
FILEIN=sys.argv[6]
FILEOUT=sys.argv[7]
if ((len(sys.argv)==6) or (len(sys.argv)==10)):
START=sys.argv[-2]
STOP=sys.argv[-1]
if (isinstance(FS,str)):
FS = int(FS)
if (isinstance(NFFT,str)):
NFFT = float(NFFT)
if (isinstance(NW,str)):
NW = int(NW)
if (isinstance(K,str)):
K = int(K)
if (isinstance(PVAL,str)):
PVAL = float(PVAL)
if ((len(sys.argv)==6) or (len(sys.argv)==10)):
if (isinstance(START,str)):
START = float(START)
if (isinstance(STOP,str)):
STOP = float(STOP)
VERSION=1
SUBSAMPLE=1
NWORKERS=cpu_count()
FS=int(FS/SUBSAMPLE);
NFFT=int(nextpow2(NFFT*FS)) # convert to ticks
NWINDOWS_PER_WORKER=int(12*256*1000/NFFT) # NFFT/2 ticks
FIRST_MT=float('nan')
LAST_MT=float('nan')
FRACTION_MT=float('nan')
tapers,eig = dpss(NFFT, NW, K)
tapers = np.array(tapers, dtype=np.float32)
#tapers = tapers * np.sqrt(FS)
f=np.array(range(0,NFFT//2+1))*FS/NFFT
df=f[1]-f[0];
DIROUT=os.path.dirname(FILEIN);
FILEINs=sorted(glob.glob(FILEIN+'.ch*'));
FILE_TYPE=1
if (len(FILEINs)==0):
FILEINs=sorted(glob.glob(FILEIN+'*.wav'));
FILE_TYPE=2
if (len(FILEINs)==0):
print(["can't find any .wav or .ch files with basename '"+FILEIN]);
exit
NCHANNELS=len(FILEINs);
REMAP=list();
for i in range(0,NCHANNELS):
filei=os.path.join(DIROUT,FILEINs[i])
if FILE_TYPE==1:
try:
fid=open(filei,'rb')
except:
print(["can't open file '"+filei+"'"])
exit
fid.seek(0,2);
FILE_LEN=fid.tell()/4/FS;
fid.close()
REMAP.append(FILEINs[i][-1]);
if FILE_TYPE==2:
try:
fid=wave.open(filei,'rb')
except:
print(["can't open file '"+filei+"'"])
exit
FILE_LEN=fid.getnframes()/FS
fid.close();
REMAP.append(FILEINs[i][-5]);
if 'START' not in locals():
tmp=FILE_LEN*FS/(NFFT//2)-1
print('Processing {:.3g} min = {:.3g} windows = {:3g} chunks of data in {:s}'.format(FILE_LEN/60, tmp, tmp/NWINDOWS_PER_WORKER, FILEINs[i]));
t_offset_tic=0;
t_now_sec=0;
else:
tmp=(STOP-START)*FS/(NFFT//2)-1
print('Processing {:.3g} min = {:.3g} windows = {:3g} chunks of data in {:s}'.format((STOP-START)/60, tmp, tmp/NWINDOWS_PER_WORKER, FILEINs[i]));
t_offset_tic=round(START*FS);
t_now_sec=START;
fid_out=open(FILEIN+'-'+FILEOUT+'.ax','wb')
# L=8 bytes on 64-bit systems
fid_out.write(struct.pack('B',VERSION))
fid_out.write(struct.pack('B',SUBSAMPLE))
fid_out.write(struct.pack('B',0))
fid_out.write(struct.pack('I',FS))
fid_out.write(struct.pack('I',NFFT))
fid_out.write(struct.pack('H',NW))
fid_out.write(struct.pack('H',K))
fid_out.write(struct.pack('d',PVAL))
fid_out.write(struct.pack('d',df))
t_now=0
tloop=time.time()
pool=Pool()
while ((t_now_sec<FILE_LEN) and (('STOP' not in locals()) or (t_now_sec<STOP))):
if ((time.time()-tloop)>10):
tmp=t_now_sec
tmp2=0
if 'START' in locals():
tmp=tmp-START
tmp2=START
if 'STOP' in locals():
tmp=tmp/(STOP-tmp2)
else:
tmp=tmp/(FILE_LEN-tmp2)
print('{:d} sec processed; {:d}% done'.format(int(round(t_now_sec-tmp2)),int(round(100*tmp))))
tloop=time.time()
#idx=map(do_it, \
idx=pool.map(do_it, \
[(DIROUT, FILEINs, t_now, NW,K,PVAL,FS,NFFT, NWINDOWS_PER_WORKER, tapers, x, t_offset_tic, FILE_TYPE, round(FILE_LEN*FS)) for x in range(0,NWORKERS)])
for i in idx:
for j in i:
fid_out.write(struct.pack('dddd', \
float(t_now)+j[0], j[1], j[2], float(REMAP[j[3]])))
t_now_sec = t_now_sec+float(NFFT//2)/FS*NWORKERS*NWINDOWS_PER_WORKER
t_now = t_now+NWORKERS*NWINDOWS_PER_WORKER
fid_out.write('Z'.encode('ascii'))
fid_out.close()
tstop = time.time() - tstart
print('Run time was {:.3g} minutes.'.format(tstop/60))
pool.close()
| bsd-3-clause | 4,388,245,357,869,162,500 | 28.765854 | 157 | 0.632416 | false |
ashoksekar/python | CodeJam/2011/Round2/AIWar/main.py | 1 | 5722 | #!/usr/bin/python
import networkx as nx
import matplotlib.pyplot as plt
num_of_test = 0 # number of test case (N)
debug = 0
num_P = 0
num_W = 0
gown = 0
gthtn = 0
th = []
class node(object):
""" data = n
child = child nodes """
def __init__(self, data = 0, child = [], parent = [], level = 0):
self.data = data
self.child = child
self.parent = parent
self.level = level
def __str__(self):
return '%d' % self.data
def find_node(root, data):
items = [root]
while len(items):
pt = items.pop()
if pt.data == data:
return pt
for x in pt.child:
if not(x in items):
items.append(x)
return None
def print_node(root):
items = [root]
while len(items):
pt = items.pop(0)
print "pt:", pt, "child:",
for x in pt.child:
print x,
if not(x in items):
items.append(x)
print
return
def open_read_file():
#file_name="D-small-practice.in"
file_name="D-large-practice.in"
#file_name="sample_input.txt"
fin=open(file_name, 'r')
return fin
def find_gthtn(node, parent, ttn):
global gthtn
global graph
global num_P
global i
if 1 in ttn:
l = ttn
l = list(set(l))
if 0 in l:
l.remove(0)
assert(1 in l)
if gthtn < len(l):
gthtn = len(l)
items = node.child[:]
ln1 = []
big = 0
for x in items:
t = ttn+graph[x.data]+graph[parent.data]
if len(prev[x.data]) != 0:
for y in prev[x.data]:
t1 = t + graph[y]
t2 = list(set(t1))
ln1.append(len(t2))
if big < len(t2):
big = len(t2)
else:
t1 = list(set(t))
ln1.append(len(t1))
if big < len(t1):
big = len(t1)
ii = 0
items1 = []
out_break = 0
for x in items:
if len(prev[x.data]) != 0:
for y in prev[x.data]:
if ln1[ii] == big:
items1.append(x)
#out_break = True
break
ii += 1
if out_break:
break
else:
if ln1[ii] == big:
items1.append(x)
break
ii += 1
for pt in items1:
find_gthtn(pt, node, list(set(ttn + graph[pt.data])))
def find_thtn(ptv):
global graph, prev
global gown, gthtn
global i
nodeg = []
for x in range(400):
nodeg.append(None)
root = node(data = ptv, child = [], parent = [], level = 1)
G = nx.Graph()
items = [root]
while len(items):
pt = items.pop(0)
for x in graph[pt.data]:
if not ((pt.data, x) in G.edges()):
G.add_edge(pt.data, x, color = 'blue')
for z in prev[pt.data]:
n = nodeg[z] #node.find_node(root, z)
if (n == None):
n = node(data = z, child = [], parent = [])
nodeg[z] = n
n.level = pt.level + 1
items.append(n)
G.add_edge(pt.data, n.data, color = 'red')
pt.child.append(n)
assert (n.level == (pt.level + 1))
if (debug):
print 'pt:' ,pt, 'ptprev:', prev[pt.data], 'n:', n, "nprev:", prev[n.data]
#print 'pt:', pt, 'n:', n, 'prev:', prev[pt.data], 'parent:',
#for x in n.parent:
# print x,
#print
if (debug):
color = nx.get_edge_attributes(G,'color')
colors = []
for x in color:
colors.append(color[x])
print colors
nx.draw_networkx(G, pos=nx.shell_layout(G), edge_color=colors)
plt.axis('on')
plt.show()
find_gthtn(root, root, graph[root.data])
def find_own(pt):
global graph, prev
global gown, gthtn
global i
own = 0
while True:
if (pt != 1) and (pt != 0):
own += 1
pt = prev[pt][-1]
if pt == 1:
break
gown = own
fin = open_read_file()
num_of_test = int(fin.readline())
i = 0
while i < num_of_test:
string = fin.readline().split()
num_P = int(string[0])
num_W = int(string[1])
graph = dict()
attr = []
prev = []
gown = 0
gthtn = 0
string = fin.readline().split()
#if i == 5:
# debug = 1
#else:
# debug = 0
for x in range(num_P):
graph[x] = []
attr.append([0,0xffffffff])
prev.append([])
attr[1][1] = 0
for x in range(num_W):
s = string[x].split(',')
m = int(s[0])
n = int(s[1])
graph[m].append(n)
graph[n].append(m)
for x in range(num_P):
graph[x].sort()
lst = [1]
while len(lst) > 0:
m = lst.pop(0)
if attr[m][0] != 0:
continue
for x in range(len(graph[m])):
dest = graph[m][x]
if (attr[dest][0] == 0):
lst.append(dest)
if ((attr[m][1]+1) < attr[dest][1]):
attr[dest][1] = attr[m][1]+1
prev[dest] = [m]
elif ((attr[m][1]+1) == attr[dest][1]):
if not (m in prev[dest]):
prev[dest].append(m)
else:
continue
attr[m][0] = 1
find_own(0)
find_thtn(0)
if (debug):
print gown,gthtn
i += 1
gthtn -= gown
print 'Case #%d: %d %d' % (i, gown, gthtn)
| gpl-2.0 | -8,738,169,697,906,892,000 | 24.207048 | 90 | 0.436386 | false |
robertdeg/sdfpy | tests/test_core.py | 1 | 3311 | import unittest
import networkx as nx
import sdfpy.core as core
class TestLoadJSON(unittest.TestCase):
def test_tiny_csdf(self):
try:
g = core.load_sdf('tests/graphs/csdfg-tiny.json')
q = g.repetition_vector()
s = g.normalisation_vector()
m = g.modulus()
self.assertEqual( m, 6, "Modulus of graph is six" )
self.assertEqual( q['a'], 2 )
self.assertEqual( q['b'], 3 )
self.assertEqual( s[('a', 'b')], 1 )
self.assertEqual( s[('b', 'a')], 1 )
self.assertEqual( s[('b', 'b')], 2 )
self.assertEqual( g.nodes['a']['period'], 1 )
self.assertEqual( g.nodes['b']['period'], 3 )
except Exception as e:
self.fail()
def test_small_csdf(self):
try:
g = core.load_sdf('tests/graphs/csdfg-small.json')
q = g.repetition_vector()
s = g.normalisation_vector()
m = g.modulus()
self.assertEqual( m, 6, "Modulus of graph is six" )
self.assertEqual( q['a'], 2 )
self.assertEqual( q['b'], 3 )
self.assertEqual( q['c'], 3 )
self.assertEqual( s[('a', 'b')], 1 )
self.assertEqual( s[('b', 'a')], 1 )
self.assertEqual( s[('b', 'c')], 2 )
self.assertEqual( s[('c', 'b')], 2 )
self.assertEqual( s[('c', 'c')], 2 )
self.assertEqual( g.nodes['a']['period'], 1 )
self.assertEqual( g.nodes['b']['period'], 3 )
self.assertEqual( g.nodes['c']['period'], 1 )
except Exception as e:
self.fail()
class TestLoadYAML(unittest.TestCase):
def test_csdf(self):
try:
g = core.load_sdf_yaml('tests/graphs/csdfg-tiny.yaml')
q = g.repetition_vector()
s = g.normalisation_vector()
m = g.modulus()
self.assertEqual( m, 6, "Modulus of graph is six" )
self.assertEqual( q['a'], 2 )
self.assertEqual( q['b'], 3 )
self.assertEqual( s[('a', 'b')], 1 )
self.assertEqual( s[('b', 'a')], 1 )
self.assertEqual( s[('b', 'b')], 2 )
self.assertEqual( g.nodes['a']['period'], 1 )
self.assertEqual( g.nodes['b']['period'], 3 )
except Exception as e:
self.fail()
def test_small_csdf(self):
try:
g = core.load_sdf_yaml('tests/graphs/csdfg-small.yaml')
q = g.repetition_vector()
s = g.normalisation_vector()
m = g.modulus()
self.assertEqual( m, 6, "Modulus of graph is six" )
self.assertEqual( q['a'], 2 )
self.assertEqual( q['b'], 3 )
self.assertEqual( q['c'], 3 )
self.assertEqual( s[('a', 'b')], 1 )
self.assertEqual( s[('b', 'a')], 1 )
self.assertEqual( s[('b', 'c')], 2 )
self.assertEqual( s[('c', 'b')], 2 )
self.assertEqual( s[('c', 'c')], 2 )
self.assertEqual( g.nodes['a']['period'], 1 )
self.assertEqual( g.nodes['b']['period'], 3 )
self.assertEqual( g.nodes['c']['period'], 1 )
except Exception as e:
self.fail()
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -5,373,801,623,200,563,000 | 38.416667 | 67 | 0.481728 | false |
JeffHoogland/eandora | oldvlc/eAndoraVLC.py | 1 | 21838 | """A Pandora Client Written in Python EFLs/Elm
Uses VLC as a streaming backend
By: Jeff Hoogland ([email protected])
Started: 12/20/12
"""
import os
import elementary
import edje
import ecore
import evas
import time
import pandora
import vlc
import urllib
import webbrowser
def openBrowser(url):
print "Opening %s"%url
webbrowser.open(url)
try:
os.wait() # workaround for http://bugs.python.org/issue5993
except:
pass
class eAndora:
def __init__( self ):
self.gui = ""
self.pandora = pandora.Pandora()
self.curStation = ""
self.curSong = None
self.playing = False
self.skip = False
self.die = False
self.settings = {"username":"", "password":""}
self.player = None
self.skinName = "Default"
self.song = None
self.songinfo = []
self.displaysongs = []
self.player = vlc.MediaPlayer()
self.event_manager = self.player.event_manager()
self.event_manager.event_attach(vlc.EventType.MediaPlayerEndReached, self.nextSong)
self.songCount = 0
def setGUI( self, GUI):
self.gui = GUI
def auth( self, user, passwd):
print "User %s - Password %s"%(user, passwd)
self.settings['username'] = user
self.settings['password'] = passwd
try:
self.pandora.connect(self.settings['username'], self.settings['password'])
except:
self.gui.login_error(None)
def playSong( self ):
self.playing = True
self.player.play()
def pauseSong( self ):
self.playing = False
self.player.pause()
def skipSong( self ):
self.nextSong("skip")
def setStation( self, station ):
self.curStation = pandora.Station(self.pandora, station)
def getStations( self ):
return self.pandora.get_stations()
def getStation( self ):
return self.curStation
def getCurSongInfo( self ):
return self.songinfo[self.curSong]
def getSongInfo( self ):
return self.songinfo
def getStationFromName( self, name):
stations = self.getStations()
for station in stations:
if station['stationName'] == name:
return station
def getSongDuration( self ):
seconds = self.player.get_length() / 1000.0
mins = 0
while seconds >= 60:
seconds -= 60
mins += 1
return mins, seconds
def getSongRating( self ):
return self.songinfo[self.curSong]['rating']
def showSong( self ):
openBrowser(self.songinfo[self.curSong]['object'].songDetailURL)
def showAlbum( self ):
openBrowser(self.songinfo[self.curSong]['object'].albumDetailURL)
def banSong( self ):
info = self.songinfo[self.curSong]
info['object'].rate('ban')
def loveSong( self ):
info = self.songinfo[self.curSong]
info['object'].rate('love')
def clearSongs( self ):
self.song = None
self.songCount = 0
self.songinfo = []
self.displaysongs = []
def addSongs( self ):
playlist = self.curStation.get_playlist()
for song in playlist:
info = { "title" : song.title, \
"artist" : song.artist, \
"album" : song.album, \
"thumbnail" : song.artRadio, \
"url" : str(song.audioUrl), \
"rating" : song.rating, \
"object" : song
}
self.songinfo.append(info)
if not self.song:
self.startPlaying()
def startPlaying( self ):
self.curSong = -1
self.nextSong()
def nextSong( self , event=False ):
print("Debug 1")
if self.player.is_playing():
self.player.stop()
print("Debug 2")
self.player = vlc.MediaPlayer()
self.event_manager = self.player.event_manager()
self.event_manager.event_attach(vlc.EventType.MediaPlayerEndReached, self.nextSong)
print("Debug 3")
self.curSong += 1
info = self.songinfo[self.curSong]
self.displaysongs.append(info)
self.song = info['title']
print(info)
print("Debug 4")
self.player.set_media(vlc.Media(info['url']))
print("Debug 5")
self.playing = True
self.player.play()
print("Debug 6")
self.gui.song_change()
print("Debug 7")
#self.curSong += 1
if self.curSong >= len(self.songinfo)-1:
print("Debug 8")
self.addSongs()
print("Debug 9")
self.songCount += 1
if self.songCount >= 15:
print("Debug 10")
self.songCount = 0
self.auth(self.settings['username'], self.settings['password'])
class Interface:
def __init__( self ):
self.ourPlayer = eAndora()
self.mainWindow = elementary.Window("table", elementary.ELM_WIN_BASIC)
self.songList = elementary.List(self.mainWindow)
self.stationButton = elementary.Button(self.mainWindow)
#self.stationDropdown = elementary.Toolbar(self.mainWindow)
self.tb = None
self.thumb = elementary.Button(self.mainWindow)
self.song = elementary.Button(self.mainWindow)
#self.song.style_set("anchor")
self.artist = elementary.Label(self.mainWindow)
self.album = elementary.Button(self.mainWindow)
#self.album.style_set("anchor")
self.rating = elementary.Button(self.mainWindow)
self.counter = [elementary.Clock(self.mainWindow), elementary.Label(self.mainWindow), elementary.Label(self.mainWindow)]
self.pauseTime = None
def ban_track( self, bt ):
self.ourPlayer.banSong()
self.ourPlayer.skipSong()
def love_track( self, bt ):
self.ourPlayer.loveSong()
ic = elementary.Icon(self.mainWindow)
ic.file_set('images/love.png')
self.rating.hide()
self.rating.tooltip_text_set("Song already liked")
self.rating.content_set(ic)
self.rating.show()
def show_song( self, bt ):
self.ourPlayer.showSong()
def show_album( self, bt ):
self.ourPlayer.showAlbum()
def song_change( self ):
info = self.ourPlayer.getCurSongInfo()
print("DEBUG: Changing Album Art")
try:
os.remove('/tmp/albumart.jpg')
except:
pass
urllib.urlretrieve(str(info['thumbnail']), '/tmp/albumart.jpg')
ic = elementary.Icon(self.mainWindow)
ic.file_set('/tmp/albumart.jpg')
self.thumb.show()
self.thumb.content_set(ic)
self.thumb.show()
print("DEBUG: Changing song title")
self.song.hide()
self.song.text_set("Song: %s"%info['title'])
self.song.show()
print("DEBUG: Changing album title")
self.album.hide()
self.album.text_set("Album: %s"%info['album'])
self.album.show()
print("DEBUG: Changing artist")
self.artist.hide()
self.artist.text_set("<b><div align='center'>Artist: %s</div></b>"%info['artist'])
self.artist.show()
print("DEBUG: Changing clock to zero")
self.counter[0].hide()
self.counter[0].time_set(0, 0, 0)
self.counter[0].show()
print("DEBUG: Changing total time")
self.counter[1].hide()
mins, seconds = 0, 0
while not mins and not seconds:
time.sleep(0.5)
mins, seconds = self.ourPlayer.getSongDuration()
if int(seconds) > 9:
self.counter[1].text_set("<b>/ %s : %s</b>"%(mins, int(seconds)))
else:
self.counter[1].text_set("<b>/ %s : 0%s</b>"%(mins, int(seconds)))
self.counter[1].show()
print("DEBUG: Changing ratings")
self.rating.hide()
ic = elementary.Icon(self.mainWindow)
rating = self.ourPlayer.getSongRating()
if not rating:
ic.file_set('images/favorite.png')
self.rating.tooltip_text_set("Like Song")
elif rating == 'love':
ic.file_set('images/love.png')
self.rating.tooltip_text_set("Song already liked")
else:
ic.file_set('images/ban.png')
self.rating.content_set(ic)
self.rating.show()
print("DEBUG: Adding song to list")
self.refreshInterface()
print("Hey look the song changed!")
def close_window(self, bt, win):
win.delete()
def login_user(self, bt, user, passwd, win, ck):
win.hide()
if ck.state:
home = os.path.expanduser("~")
if not os.path.exists("%s/.config/eAndora"%home):
os.makedirs("%s/.config/eAndora"%home)
f = open('%s/.config/eAndora/userinfo'%home, 'w')
f.write('%s\n'%user.entry_get())
f.write('%s\n'%passwd.entry_get())
f.close()
self.ourPlayer.auth(user.entry_get(), passwd.entry_get())
self.interface_clicked(None)
def spawn_login(self, bt, win):
win.hide()
self.login_clicked(None)
def login_error(self, obj):
win = elementary.Window("test", elementary.ELM_WIN_BASIC)
win.title_set("eAndora - Error")
if obj is None:
win.callback_delete_request_add(lambda o: elementary.exit())
bg = elementary.Background(win)
win.resize_object_add(bg)
bg.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
bg.show()
box0 = elementary.Box(win)
box0.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
win.resize_object_add(box0)
box0.show()
fr = elementary.Frame(win)
fr.text_set("There was an issue logging - please try again.")
box0.pack_end(fr)
fr.show()
bt = elementary.Button(win)
bt.text_set("OK")
bt.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
bt.size_hint_align_set(evas.EVAS_HINT_FILL, evas.EVAS_HINT_FILL)
bt.callback_unpressed_add(self.spawn_login, win)
box0.pack_end(bt)
bt.show()
win.show()
def login(self, obj):
self.ourPlayer.setGUI(self)
home = os.path.expanduser("~")
if os.path.exists("%s/.config/eAndora/userinfo"%home):
f = open('%s/.config/eAndora/userinfo'%home, 'r')
lines = f.readlines()
self.ourPlayer.auth(lines[0].rstrip("\n"), lines[1].rstrip("\n"))
self.interface_clicked(None)
else:
self.login_clicked(None)
def login_clicked(self, obj):
win = elementary.Window("table", elementary.ELM_WIN_BASIC)
win.title_set("eAndora - Login")
if obj is None:
win.callback_delete_request_add(lambda o: elementary.exit())
bg = elementary.Background(win)
win.resize_object_add(bg)
bg.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
bg.show()
tb = elementary.Table(win)
win.resize_object_add(tb)
tb.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
tb.show()
bt = elementary.Label(win)
bt.text_set("<div align='center'><b>Email:</b></div>")
bt.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
bt.size_hint_align_set(evas.EVAS_HINT_FILL, evas.EVAS_HINT_FILL)
tb.pack(bt, 0, 0, 1, 1)
bt.show()
bt = elementary.Label(win)
bt.text_set("<div align='center'><b>Password:</b></div>")
bt.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
bt.size_hint_align_set(evas.EVAS_HINT_FILL, evas.EVAS_HINT_FILL)
tb.pack(bt, 0, 1, 1, 1)
bt.show()
ck = elementary.Check(win)
ck.text_set("Store Login")
tb.pack(ck, 0, 2, 1, 1)
ck.show()
log = elementary.Entry(win)
log.line_wrap_set(False)
log.entry_set("address")
log.input_panel_return_key_disabled_set(True)
log.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
log.size_hint_align_set(evas.EVAS_HINT_FILL, evas.EVAS_HINT_FILL)
tb.pack(log, 1, 0, 1, 1)
log.show()
pas = elementary.Entry(win)
pas.line_wrap_set(False)
pas.entry_set("password")
pas.password = True
pas.input_panel_return_key_disabled = True
pas.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
pas.size_hint_align_set(evas.EVAS_HINT_FILL, evas.EVAS_HINT_FILL)
tb.pack(pas, 1, 1, 1, 1)
pas.show()
bt = elementary.Button(win)
bt.text_set("Login")
bt.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
bt.size_hint_align_set(evas.EVAS_HINT_FILL, evas.EVAS_HINT_FILL)
bt.callback_unpressed_add(self.login_user, log, pas, win, ck)
tb.pack(bt, 0, 3, 2, 1)
bt.show()
bt = elementary.Button(win)
bt.text_set("Exit")
bt.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
bt.size_hint_align_set(evas.EVAS_HINT_FILL, evas.EVAS_HINT_FILL)
bt.callback_unpressed_add(self.close_window, win)
tb.pack(bt, 0, 4, 2, 1)
bt.show()
win.resize(800, 300)
win.show()
def play_pause(self, bt):
ic = elementary.Icon(self.mainWindow)
if self.ourPlayer.playing:
ic.file_set("images/play.png")
self.pauseTime = self.counter[0].time_get()
self.counter[0].hide()
self.counter[2].size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
self.counter[2].size_hint_align_set(evas.EVAS_HINT_FILL, evas.EVAS_HINT_FILL)
if self.pauseTime[2] > 9:
self.counter[2].text_set("<b>%s : %s</b>"%(self.pauseTime[1], self.pauseTime[2]))
else:
self.counter[2].text_set("<b>%s : 0%s</b>"%(self.pauseTime[1], self.pauseTime[2]))
self.tb.pack(self.counter[2], 0, 3, 1, 1)
self.counter[2].show()
self.ourPlayer.pauseSong()
else:
ic.file_set("images/pause.png")
self.counter[2].hide()
self.counter[0].size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
self.counter[0].size_hint_align_set(evas.EVAS_HINT_FILL, evas.EVAS_HINT_FILL)
self.counter[0].show_seconds_set(True)
self.counter[0].time_set(0, self.pauseTime[1], self.pauseTime[2])
self.tb.pack(self.counter[0], 0, 3, 1, 1)
self.counter[0].show()
self.ourPlayer.playSong()
bt.content_set(ic)
bt.show()
def skip_track(self, bt):
self.ourPlayer.skipSong()
def cb_items(self, li, item):
print(("ctxpopup item selected: %s" % (item.text)))
self.refreshInterface(True)
self.ourPlayer.setStation(self.ourPlayer.getStationFromName(item.text))
home = os.path.expanduser("~")
if not os.path.exists("%s/.config/eAndora"%home):
os.makedirs("%s/.config/eAndora"%home)
if os.path.exists("%s/.config/eAndora/stationinfo"%home):
os.remove('%s/.config/eAndora/stationinfo'%home)
f = open('%s/.config/eAndora/stationinfo'%home, 'w')
f.write('%s\n'%item.text)
f.close()
self.ourPlayer.pauseSong()
self.ourPlayer.clearSongs()
self.ourPlayer.addSongs()
def refreshInterface( self, clear=False ):
if clear:
self.songList.clear()
info = self.ourPlayer.getCurSongInfo()
self.songList.item_prepend("%s - %s"%(info['title'], info['artist']))
self.songList.show()
self.songList.go()
self.stationButton.text_set(str(self.ourPlayer.getStation().name))
self.stationButton.hide()
self.stationButton.show()
def item_new(self, cp, label, icon = None):
if icon:
ic = elementary.Icon(cp)
ic.standard_set(icon)
ic.resizable_set(False, False)
return cp.item_append(label, ic, self.cb_items)
else:
return cp.item_append(label, None, self.cb_items)
def station_selection(self, bt):
cp = elementary.Ctxpopup(bt)
stations = self.ourPlayer.getStations()
for station in stations:
bt = self.item_new(cp, str(station['stationName']))
cp.show()
def interface_clicked(self, obj):
#self.ourPlayer.setGUI(self)
#self.ourPlayer.auth("[email protected]", "")
home = os.path.expanduser("~")
if os.path.exists("%s/.config/eAndora/stationinfo"%home):
f = open('%s/.config/eAndora/stationinfo'%home, 'r')
lines = f.readlines()
self.ourPlayer.setStation(self.ourPlayer.getStationFromName(lines[0].rstrip("\n")))
else:
self.ourPlayer.setStation(self.ourPlayer.getStations()[0])
self.mainWindow.title_set("eAndora - Internet Radio")
ic = elementary.Icon(self.mainWindow)
ic.file_set("images/eAndora.png")
self.mainWindow.icon_object_set(ic)
if obj is None:
self.mainWindow.callback_delete_request_add(lambda o: elementary.exit())
bg = elementary.Background(self.mainWindow)
self.mainWindow.resize_object_add(bg)
bg.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
bg.show()
self.tb = elementary.Table(self.mainWindow)
self.mainWindow.resize_object_add(self.tb)
self.tb.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
self.tb.show()
self.stationButton.tooltip_text_set("Change Stations")
self.stationButton.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
self.stationButton.size_hint_align_set(evas.EVAS_HINT_FILL, evas.EVAS_HINT_FILL)
self.stationButton.callback_unpressed_add(self.station_selection)
self.tb.pack(self.stationButton, 4, 0, 2, 3)
self.songList.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
self.songList.size_hint_align_set(evas.EVAS_HINT_FILL, evas.EVAS_HINT_FILL)
self.tb.pack(self.songList, 0, 4, 4, 3)
ic = elementary.Icon(self.mainWindow)
ic.file_set("images/skip.png")
bt = elementary.Button(self.mainWindow)
bt.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
bt.size_hint_align_set(evas.EVAS_HINT_FILL, evas.EVAS_HINT_FILL)
bt.content_set(ic)
bt.callback_unpressed_add(self.skip_track)
self.tb.pack(bt, 4, 4, 1, 1)
bt.show()
ic = elementary.Icon(self.mainWindow)
ic.file_set("images/pause.png")
bt = elementary.Button(self.mainWindow)
bt.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
bt.size_hint_align_set(evas.EVAS_HINT_FILL, evas.EVAS_HINT_FILL)
bt.content_set(ic)
bt.callback_unpressed_add(self.play_pause)
self.tb.pack(bt, 4, 5, 1, 1)
bt.show()
ic = elementary.Icon(self.mainWindow)
ic.file_set("images/ban.png")
bt = elementary.Button(self.mainWindow)
bt.tooltip_text_set("Ban Song")
bt.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
bt.size_hint_align_set(evas.EVAS_HINT_FILL, evas.EVAS_HINT_FILL)
bt.content_set(ic)
bt.callback_unpressed_add(self.ban_track)
self.tb.pack(bt, 5, 5, 1, 1)
bt.show()
#Define callbacks for all our buttons that will be updated
#Button content is generated on song change
self.thumb.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
self.thumb.size_hint_align_set(evas.EVAS_HINT_FILL, evas.EVAS_HINT_FILL)
self.tb.pack(self.thumb, 2, 0, 2, 3)
self.song.callback_pressed_add(self.show_song)
self.song.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
self.song.size_hint_align_set(evas.EVAS_HINT_FILL, evas.EVAS_HINT_FILL)
self.tb.pack(self.song, 0, 0, 2, 1)
self.album.callback_pressed_add(self.show_album)
self.album.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
self.album.size_hint_align_set(evas.EVAS_HINT_FILL, evas.EVAS_HINT_FILL)
self.tb.pack(self.album, 0, 1, 2, 1)
self.rating.callback_unpressed_add(self.love_track)
self.artist.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
self.artist.size_hint_align_set(evas.EVAS_HINT_FILL, evas.EVAS_HINT_FILL)
self.tb.pack(self.artist, 0, 2, 2, 1)
self.counter[0].size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
self.counter[0].size_hint_align_set(evas.EVAS_HINT_FILL, evas.EVAS_HINT_FILL)
self.counter[0].show_seconds_set(True)
self.tb.pack(self.counter[0], 0, 3, 1, 1)
self.counter[1].size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
self.counter[1].size_hint_align_set(evas.EVAS_HINT_FILL, evas.EVAS_HINT_FILL)
self.tb.pack(self.counter[1], 2, 3, 1, 1)
self.rating.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
self.rating.size_hint_align_set(evas.EVAS_HINT_FILL, evas.EVAS_HINT_FILL)
self.tb.pack(self.rating, 5, 4, 1, 1)
self.mainWindow.resize(800, 300)
self.mainWindow.show()
self.ourPlayer.addSongs()
if __name__ == "__main__":
elementary.init()
GUI = Interface()
GUI.login(None)
#GUI.interface_clicked(None)
elementary.run()
elementary.shutdown()
| bsd-3-clause | -3,637,781,326,252,944,000 | 35.215589 | 128 | 0.59717 | false |
kylef/maintain | tests/release/test_aggregate.py | 1 | 3715 | import unittest
from semantic_version import Version
from maintain.release.base import Releaser
from maintain.release.aggregate import AggregateReleaser
from maintain.release.version_file import VersionFileReleaser
from ..utils import temp_directory, touch
class MockReleaser(Releaser):
def __init__(self, current_version, next_version=None):
self.current_version = Version(current_version)
if next_version:
self.next_version = Version(next_version)
else:
self.next_version = None
self.is_released = False
def determine_current_version(self):
return self.current_version
def determine_next_version(self):
return self.next_version
def bump(self, new_version):
self.current_version = Version(new_version)
def release(self, new_version):
self.is_released = True
class AggregateReleaserTestCase(unittest.TestCase):
def test_errors_when_inconsistent_releaser_versions(self):
releasers = [
MockReleaser('1.2.3'),
MockReleaser('1.2.4'),
]
with self.assertRaises(Exception):
AggregateReleaser(releasers=releasers)
def test_detect_current_version(self):
releaser = AggregateReleaser(releasers=[MockReleaser('1.2.3')])
version = releaser.determine_current_version()
self.assertEqual(version, Version('1.2.3'))
def test_determine_next_version_unknown(self):
releaser = AggregateReleaser(releasers=[
MockReleaser('1.2.3'),
MockReleaser('1.2.3'),
])
version = releaser.determine_next_version()
self.assertEqual(version, None)
def test_determine_next_version(self):
releaser = AggregateReleaser(releasers=[
MockReleaser('1.2.3'),
MockReleaser('1.2.3', '1.3.0'),
])
version = releaser.determine_next_version()
self.assertEqual(version, Version('1.3.0'))
def test_determine_inconsistent_next_version(self):
releaser = AggregateReleaser(releasers=[
MockReleaser('1.2.3', '2.0.0'),
MockReleaser('1.2.3', '1.3.0'),
])
with self.assertRaises(Exception):
releaser.determine_next_version()
def test_bumping(self):
releasers = [
MockReleaser('1.2.3'),
MockReleaser('1.2.3'),
]
releaser = AggregateReleaser(releasers=releasers)
releaser.bump('2.0.0')
versions = map(lambda r: r.determine_current_version(), releasers)
self.assertEqual(list(versions), [Version('2.0.0'), Version('2.0.0')])
def test_releasing(self):
releasers = [
MockReleaser('1.2.3'),
MockReleaser('1.2.3'),
]
releaser = AggregateReleaser(releasers=releasers)
releaser.release(None)
released = list(map(lambda r: r.is_released, releasers))
self.assertEqual(released, [True, True])
def test_detecting_releasers(self):
with temp_directory():
touch('VERSION', '1.0.0\n')
releaser = AggregateReleaser()
releasers = list(filter(lambda r: isinstance(r, VersionFileReleaser), releaser.releasers))
self.assertEqual(len(releasers), 1)
def test_detecting_disabled_releasers(self):
with temp_directory():
touch('VERSION', '1.0.0\n')
releaser = AggregateReleaser(config={
'version_file': {
'disabled': True
}
})
releasers = list(filter(lambda r: isinstance(r, VersionFileReleaser), releaser.releasers))
self.assertEqual(len(releasers), 0)
| bsd-2-clause | -6,303,545,453,626,747,000 | 31.587719 | 102 | 0.613728 | false |
PuzzleboxIO/synapse-python | Puzzlebox/Synapse/Device.py | 1 | 12424 | # -*- coding: utf-8 -*-
# Copyright Puzzlebox Productions, LLC (2010-2012)
#
# This code is released under the GNU Pulic License (GPL) version 2
# For more information please refer to http://www.gnu.org/copyleft/gpl.html
__changelog__ = """\
Last Update: 2012.04.23
"""
__todo__ = """
"""
### IMPORTS ###
import os, sys
import Puzzlebox.Synapse.Configuration as configuration
if configuration.ENABLE_PYSIDE:
try:
import PySide as PyQt4
from PySide import QtCore, QtGui
except Exception, e:
print "ERROR: [Synapse:Device] Exception importing PySide:",
print e
configuration.ENABLE_PYSIDE = False
else:
print "INFO: [Synapse:Device] Using PySide module"
if not configuration.ENABLE_PYSIDE:
print "INFO: [Synapse:Device] Using PyQt4 module"
from PyQt4 import QtCore, QtGui
if (sys.platform == 'win32'):
import _winreg as winreg
import itertools
import re
import serial
DEFAULT_IMAGE_PATH = 'images'
elif (sys.platform == 'darwin'):
DEFAULT_IMAGE_PATH = 'images'
else:
import bluetooth
DEFAULT_IMAGE_PATH = '/usr/share/puzzlebox_synapse/images'
#####################################################################
# Globals
#####################################################################
DEBUG = configuration.DEBUG
PATH_TO_HCITOOL = '/usr/bin/hcitool'
#####################################################################
# Classes
#####################################################################
class puzzlebox_synapse_device(QtGui.QWidget):
def __init__(self, log, \
DEBUG=DEBUG, \
parent=None, \
):
self.log = log
self.DEBUG = DEBUG
self.parent=parent
if self.parent == None:
QtGui.QWidget.__init__(self, parent)
#self.setupUi(self)
self.configureSettings()
self.connectWidgets()
self.name = "Synapse:Device"
##################################################################
def configureSettings(self):
pass
##################################################################
def connectWidgets(self):
pass
##################################################################
def enumerateSerialPorts(self):
""" Uses the Win32 registry to return an
iterator of serial (COM) ports
existing on this computer.
from http://eli.thegreenplace.net/2009/07/31/listing-all-serial-ports-on-windows-with-python/
"""
path = 'HARDWARE\\DEVICEMAP\\SERIALCOMM'
try:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, path)
except WindowsError:
#raise IterationError
return
for i in itertools.count():
try:
val = winreg.EnumValue(key, i)
yield str(val[1])
except EnvironmentError:
break
##################################################################
def fullPortName(self, portname):
""" Given a port-name (of the form COM7,
COM12, CNCA0, etc.) returns a full
name suitable for opening with the
Serial class.
"""
m = re.match('^COM(\d+)$', portname)
if m and int(m.group(1)) < 10:
return portname
return '\\\\.\\' + portname
##################################################################
def searchForSerialDevices(self, devices=[]):
if (sys.platform == 'win32'):
for portname in self.enumerateSerialPorts():
if portname not in devices:
#portname = self.fullPortName(portname)
devices.append(portname)
elif (sys.platform == 'darwin'):
# Handle Telekinesis first so it shows up at top of listings
for device in os.listdir('/dev'):
if (device.startswith('tty.Telekinesis')):
devices.append( os.path.join('/dev', device))
for device in os.listdir('/dev'):
if (device.startswith('tty.MindWaveMobile') or \
device.startswith('tty.MindWave')):
devices.append( os.path.join('/dev', device))
# Handle MindSet separately so it shows up second in listings
for device in os.listdir('/dev'):
if (device.startswith('tty.MindSet')):
devices.append( os.path.join('/dev', device))
else:
#if os.path.exists('/dev/tty.MindWaveMobile-SPPDev'):
#devices.append('/dev/tty.MindWaveMobile-SPPDev')
#if os.path.exists('/dev/tty.MindWaveMobile-DevA'):
#devices.append('/dev/tty.MindWaveMobile-DevA')
#if os.path.exists('/dev/tty.MindWaveMobile-DevB'):
#devices.append('/dev/tty.MindWaveMobile-DevB')
#if os.path.exists('/dev/tty.MindWave'):
#devices.append('/dev/tty.MindWave')
#if os.path.exists('/dev/tty.MindWave1'):
#devices.append('/dev/tty.MindWave1')
#if os.path.exists('/dev/tty.MindWave2'):
#devices.append('/dev/tty.MindWave2')
#if os.path.exists('/dev/tty.MindWave3'):
#devices.append('/dev/tty.MindWave3')
#if os.path.exists('/dev/tty.MindWave4'):
#devices.append('/dev/tty.MindWave4')
#if os.path.exists('/dev/tty.MindWave5'):
#devices.append('/dev/tty.MindWave5')
#if os.path.exists('/dev/tty.MindSet-DevB'):
#devices.append('/dev/tty.MindSet-DevB')
for device in os.listdir('/dev'):
if (device.startswith('ttyUSB') or \
device.startswith('ttyACM') or \
device.startswith('tty.usbserial') or \
device.startswith('rfcomm')):
devices.append( os.path.join('/dev', device))
#if os.path.exists('/dev/ttyUSB0'):
#devices.append('/dev/ttyUSB0')
#if os.path.exists('/dev/ttyUSB1'):
#devices.append('/dev/ttyUSB1')
#if os.path.exists('/dev/ttyUSB2'):
#devices.append('/dev/ttyUSB2')
#if os.path.exists('/dev/ttyUSB3'):
#devices.append('/dev/ttyUSB3')
#if os.path.exists('/dev/ttyUSB4'):
#devices.append('/dev/ttyUSB4')
#if os.path.exists('/dev/ttyUSB5'):
#devices.append('/dev/ttyUSB5')
#if os.path.exists('/dev/ttyUSB6'):
#devices.append('/dev/ttyUSB6')
#if os.path.exists('/dev/ttyUSB7'):
#devices.append('/dev/ttyUSB7')
#if os.path.exists('/dev/ttyUSB8'):
#devices.append('/dev/ttyUSB8')
#if os.path.exists('/dev/ttyUSB9'):
#devices.append('/dev/ttyUSB9')
#if os.path.exists('/dev/rfcomm0'):
#devices.append('/dev/rfcomm0')
#if os.path.exists('/dev/rfcomm1'):
#devices.append('/dev/rfcomm1')
#if os.path.exists('/dev/rfcomm2'):
#devices.append('/dev/rfcomm2')
#if os.path.exists('/dev/rfcomm3'):
#devices.append('/dev/rfcomm3')
#if os.path.exists('/dev/rfcomm4'):
#devices.append('/dev/rfcomm4')
#if os.path.exists('/dev/ttyACM0'):
#devices.append('/dev/ttyACM0')
#if os.path.exists('/dev/ttyACM1'):
#devices.append('/dev/ttyACM1')
#if os.path.exists('/dev/ttyACM2'):
#devices.append('/dev/ttyACM2')
#if os.path.exists('/dev/ttyACM3'):
#devices.append('/dev/ttyACM3')
#if os.path.exists('/dev/ttyACM4'):
#devices.append('/dev/ttyACM4')
return(devices)
##################################################################
def hcitoolScanForRemoteDevices(self, devices=[]):
bluetooth_devices = []
#command = '%s scan 2> /dev/null' % PATH_TO_HCITOOL
command = '%s scan' % PATH_TO_HCITOOL
if self.DEBUG > 1:
print 'INFO: Calling "%s"' % command
output = os.popen(command, 'r')
try:
result = output.readlines()
except Exception, e:
if self.DEBUG:
print "ERROR [Synapse-Interface]: Failed reading result from call to hcitool:",
print e
result = ''
if result == '':
return([]) # Under OS X hcitool doesn't exist so we don't see any devices
for line in result:
line = line.strip()
if line == '' or line == 'Scanning ...':
continue
elif self.DEBUG > 1:
print line
try:
address = line.split('\t')[0]
except:
pass
else:
bluetooth_devices.append(address)
for address in bluetooth_devices:
command = '%s name %s' % (PATH_TO_HCITOOL, address)
if self.DEBUG:
print 'INFO: Calling "%s"' % command
output = os.popen(command, 'r')
for line in output.readlines():
line = line.strip()
if line == '':
continue
elif self.DEBUG:
print '\t',
print line
device_name = line.strip()
if ((device_name == 'MindSet' or device_name == 'MindWave Mobile') and \
(address not in devices)):
devices.append(address)
else:
if self.DEBUG:
print 'INFO: Found but not recognized: [%s] %s' % \
(address, device_name)
return (devices)
##################################################################
def hcitoolGetActiveConnections(self, devices=[]):
bluetooth_devices = []
#command = '%s con 2> /dev/null' % PATH_TO_HCITOOL
command = '%s con' % PATH_TO_HCITOOL
if self.DEBUG > 1:
print 'INFO: Calling "%s"' % command
output = os.popen(command, 'r')
try:
result = output.readlines()
except Exception, e:
if self.DEBUG:
print "ERROR [Synapse:Interface]: Failed reading result from call to hcitool:",
print e
result = ''
if result == '':
return([]) # Under OS X hcitool doesn't exist so we don't see any devices
for line in result:
line = line.strip()
if line == '' or line == 'Connections:':
continue
elif self.DEBUG > 1:
print line
try:
address = line.split(' ')[2]
except:
pass
else:
bluetooth_devices.append(address)
for address in bluetooth_devices:
command = '%s name %s' % (PATH_TO_HCITOOL, address)
if self.DEBUG:
print 'INFO: Calling "%s":' % command
output = os.popen(command, 'r')
for line in output.readlines():
line = line.strip()
if line == '':
continue
elif self.DEBUG:
print '\t',
print line
device_name = line.strip()
if ((device_name == 'MindSet' or device_name == 'MindWave Mobile') and \
(address not in devices)):
devices.append(address)
return (devices)
##################################################################
def searchForDevices(self):
enable_hcitool = configuration.ENABLE_HCITOOL
devices = []
#self.pushButtonBluetoothSearch.setText('Searching')
if ((sys.platform != 'win32' and sys.platform != 'darwin') and \
configuration.THINKGEAR_BLUETOOTH_SEARCH):
# Bluetooth module doesn't compile properly under Windows
# and doesn't exist under OS X
# PyBluez API Documentation
# http://pybluez.googlecode.com/svn/www/docs-0.7/index.html
bluetooth_devices = []
if not enable_hcitool:
try:
if self.DEBUG:
print "INFO: Searching for Bluetooth devices using PyBluez module"
bluetooth_devices = bluetooth.discover_devices( \
duration=configuration.THINKGEAR_BLUETOOTH_DISCOVER_DEVICES_TIMEOUT, \
flush_cache=True, \
lookup_names=False)
for address in bluetooth_devices:
if self.DEBUG:
print "INFO: Device discovered",
print address
device_name = bluetooth.lookup_name(address, \
configuration.THINKGEAR_BLUETOOTH_LOOKUP_NAME_TIMEOUT)
if ((device_name == 'MindSet' or device_name == 'MindWave Mobile') and \
(address not in devices)):
devices.append(address)
# There is an issue under recent released of Linux
# in which already-connected Bluetooth ThinkGear devices
# are not appearing in a bluetooth device scan. However,
# using "hcitool" connected devices can be listed correctly.
# There does not appear to be an equivalent PyBluez feature.
# (http://pybluez.googlecode.com/svn/www/docs-0.7/index.html)
if devices == []:
if self.DEBUG:
print "INFO: No devices found through PyBluez module. Falling back to hcitool."
devices = self.hcitoolGetActiveConnections(devices)
except Exception, e:
if self.DEBUG:
print "ERROR: Exception calling Python Bluetooth module. (Is PyBluez installed?):"
print e
#if (sys.platform != 'darwin'):
enable_hcitool = True
if enable_hcitool:
devices = self.hcitoolScanForRemoteDevices(devices)
devices = self.hcitoolGetActiveConnections(devices)
if self.DEBUG > 2:
print "Bluetooth Devices found:",
print devices
devices = self.searchForSerialDevices(devices)
if self.DEBUG:
print "Devices found:",
print devices
return(devices)
| agpl-3.0 | 6,322,157,607,286,303,000 | 24.883333 | 95 | 0.58717 | false |
jamesjarlathlong/resourceful | two_agents_presleep.py | 1 | 8165 | import os
from agent import *
import asyncio
from qlearn import QLearn
from sarsa import Sarsa
import itertools
import functools
import json
import random
import sklearn
import collections
import websockets
import json
import copy
import time
import random
###Helper functions###
def merge(dicts):
super_dict = collections.defaultdict(list)
for d in dicts:
for k, v in d.items():
super_dict[k]+=v
return super_dict
def tuple_namer(name,tupl):
"""convert an unnamed state tuple
to a namedtuple object"""
tupl_templ = collections.namedtuple(name, 'battery status neighbour')
named = tupl_templ(battery = tupl[0], status = tupl[1], neighbour = tupl[2])
return named
def dictionary_saver(d, filename):
"""d is a dictionary whose keys are of the form (namedtuple, 'string')"""
json_friendly_d = {json.dumps(k):v for k,v in d.items()}
sklearn.externals.joblib.dump(json_friendly_d, filename)
def tracker_saver(d, filename):
"""d is a dictionary whose keys are of the form (namedtuple, 'string')"""
json_friendly_d = {json.dumps(k):json.dumps(v) for k,v in d.items()}
sklearn.externals.joblib.dump(json_friendly_d, filename)
#======actions========#
def go_to_sleep(old):
new = old._replace(status = 'sleeping')
return new
def prepare_sleep(old):
new = old._replace(status = 'pending')
return new
def wakeup(old):
new = old._replace(status = 'running')
return new
def noop(old):
print('noop')
return copy.deepcopy(old)
def create_action_states(states):
actions_states_sleeping = {i:[noop, wakeup] for i in states if i.status=='sleeping'}
actions_states_running = {i:[prepare_sleep, noop] for i in states if i.status == 'running'}
actions_states_pending = {i:[go_to_sleep] for i in states if i.status == 'pending'}
return merge([actions_states_sleeping, actions_states_running, actions_states_pending])
#####rewards###########
def state_rewards(state1, state2):
initial_reward = 0
if (state2.status == 'sleeping' and state2.neighbour=='sleeping'):
initial_reward -=50
if state2.status =='running' or state2.neighbour=='running':
initial_reward += 50
if state1.status !=state2.status:
initial_reward -= 2.5
if state2.battery == 0:
initial_reward = -50
return initial_reward
###message passing
def find_lead(qs,recruiter):
"""for recruiter, find potential helper"""
all_candidates = [k for k in qs if k!=recruiter]
return all_candidates[0]
def broadcast_change(old_state, new_state):
"""gets called when a sensor changes
from sleeping to awake, notifies the other
sensors of this change"""
def neighbor_changed(old_other, new_other,old_self):
new_self = old_self._replace(neighbour=new_other.status)
return new_self
update_from = type(new_state).__name__
update_to = find_lead(qs, update_from)
print('updating from: ', update_from, ' to: ', update_to)
neighbor_change_func = functools.partial(neighbor_changed,old_state, new_state)
qs[update_to].update((1,neighbor_change_func))
"""environments"""
#=====autonomous actions=======#
@asyncio.coroutine
def battery_action(q):
sunny = True
def adjust_battery(is_sunny, sensor):
if sensor.status =='sleeping':
new_battery = sensor.battery + (1 + is_sunny*1)#increase by 1 if not sunny, by 2 if sunny
sensor = sensor._replace(battery=new_battery)
else:
new_battery = sensor.battery - (2 - is_sunny*1)
sensor = sensor._replace(battery=new_battery)
if sensor.battery<=0:
sensor = sensor._replace(battery=0)
if sensor.battery>=20:
sensor = sensor._replace(battery=20)
return sensor
while True:
#if random.random()<0.1:
# sunny = not sunny
adjust_battery_sunny = functools.partial(adjust_battery, sunny)
yield from asyncio.sleep(0.15)
print('putting battery action on the q: ',q.qsize(), q._queue)
priority = random.uniform(2.01, 2.99) #we don't care about the order of adjust battery actions
#just want to make sure they don't collide
q.put_nowait((priority,adjust_battery_sunny))
#======reactions to agent actions==========#
def reaction_default(state1,state2, action):
if state1.status!=state2.status:
print('broadcasting change')
broadcast_change(state1, state2)
return state2
"""speak to outside world"""
def writer(self, state):
t = self.loop.time()
name_id_map = {'Sensor1':0, 'Sensor2':1}
idee = name_id_map[type(state).__name__]
update = {'_id':idee, 'battery':state.battery, 'status':state.status, 'neighbour': state.neighbour}
print('update: ', update)
writerq.append((t,update))
#print('put it on the writerq')
@asyncio.coroutine
def socketwriter(websocket, path):
while True:
msg = yield from writerq.get()
print('msg: ', msg)
yield from websocket.send(json.dumps(msg[1]))
"""special update function to ensure only latest event
with info about neighbour is kept on the queue"""
def update(self, new_item):
priority_level = new_item[0]
def matching_level(element, priority_level):
return element[0]==priority_level
try:
match_generator = (index for index,element in enumerate(self._queue)
if matching_level(element, priority_level))
matching_index = next(match_generator)
self._queue[matching_index] = new_item
except StopIteration:
self.put_nowait(new_item)
asyncio.PriorityQueue.update = update
if __name__ == '__main__':
loop = asyncio.get_event_loop()
"""States"""
battery = range(21)
status = ['sleeping','pending', 'running']
neighbour = ['sleeping','pending', 'running']
all_vars = [battery,status, neighbour]
state_combinations = list(itertools.product(*all_vars))
"""websocket comm"""
Agent.writer = writer
"""agent 1"""
states1 = [tuple_namer('Sensor1', i) for i in state_combinations]
initial_state1 = tuple_namer('Sensor1', (3,'running', 'running'))
actions_states1 = create_action_states(states1)
agent1 = Agent(actions_states1, state_rewards, initial_state1, wakeup, Sarsa, 1011, loop)
"""agent 2"""
states2 = [tuple_namer('Sensor2', i) for i in state_combinations]
initial_state2 = tuple_namer('Sensor2', (16,'running', 'sleeping'))
actions_states2 = create_action_states(states2)
agent2 = Agent(actions_states2, state_rewards, initial_state2, wakeup, Sarsa, 1022, loop)
"""message passing between agents"""
qs = {'Sensor1':agent1.sensing_q, 'Sensor2':agent2.sensing_q}
"""message passing to websocket"""
writerq = []#asyncio.PriorityQueue(maxsize = 2048)
start_server = websockets.serve(socketwriter, '127.0.0.1', 8080)
"""now define our environments"""
env_reactions = {'go_to_sleep':reaction_default,'prepare_sleep':reaction_default, 'wakeup':reaction_default,
'noop':reaction_default}
env1 = Environment(env_reactions,[copy.deepcopy(battery_action)], agent1.sensing_q, agent1.action_q)
env2 = Environment(env_reactions,[copy.deepcopy(battery_action)], agent2.sensing_q, agent2.action_q)
"""now run the simulation"""
tasks = [agent1.experience_environment(), env1.react_to_action(),
agent2.experience_environment(), env2.react_to_action()]#,start_server]
for i in env1.env_actions:
tasks.append(i(agent1.sensing_q))
for j in env2.env_actions:
tasks.append(j(agent2.sensing_q))
def loop_stopper():
print('loop stopper')
loop.stop()
print('saving')
dictionary_saver(agent1.learner.q, 'agent1_consolidate')
tracker_saver(agent1.learner.updatecount, 'agent1_hist')
dictionary_saver(agent2.learner.q, 'agent2_consolidate')
tracker_saver(agent2.learner.updatecount, 'agent2_hist')
sklearn.externals.joblib.dump(writerq, 'pend_writer')
print('saved')
loop.call_later(600, loop_stopper)
loop.run_until_complete(asyncio.wait(tasks))
| mit | -7,540,607,257,386,654,000 | 40.446701 | 112 | 0.658298 | false |
Freso/listenbrainz-server | listenbrainz/domain/tests/test_spotify.py | 1 | 9004 | import time
import requests_mock
from flask import current_app
from listenbrainz.domain import spotify
from listenbrainz.webserver.testing import ServerTestCase
from unittest import mock
class SpotifyDomainTestCase(ServerTestCase):
def setUp(self):
super(SpotifyDomainTestCase, self).setUp()
self.spotify_user = spotify.Spotify(
user_id=1,
musicbrainz_id='spotify_user',
musicbrainz_row_id=312,
user_token='old-token',
token_expires=int(time.time()),
refresh_token='old-refresh-token',
last_updated=None,
record_listens=True,
error_message=None,
latest_listened_at=None,
permission='user-read-recently-played',
)
def test_none_values_for_last_updated_and_latest_listened_at(self):
self.assertIsNone(self.spotify_user.last_updated_iso)
self.assertIsNone(self.spotify_user.latest_listened_at_iso)
# apparently, requests_mocker does not follow the usual order in which decorators are applied. :-(
@requests_mock.Mocker()
@mock.patch('listenbrainz.domain.spotify.db_spotify.get_user')
@mock.patch('listenbrainz.domain.spotify.db_spotify.update_token')
def test_refresh_user_token(self, mock_requests, mock_update_token, mock_get_user):
expires_at = int(time.time()) + 3600
mock_requests.post(spotify.OAUTH_TOKEN_URL, status_code=200, json={
'access_token': 'tokentoken',
'refresh_token': 'refreshtokentoken',
'expires_in': 3600,
'scope': '',
})
spotify.refresh_user_token(self.spotify_user)
mock_update_token.assert_called_with(
self.spotify_user.user_id,
'tokentoken',
'refreshtokentoken',
expires_at,
)
mock_get_user.assert_called_with(self.spotify_user.user_id)
@requests_mock.Mocker()
@mock.patch('listenbrainz.domain.spotify.db_spotify.get_user')
@mock.patch('listenbrainz.domain.spotify.db_spotify.update_token')
def test_refresh_user_token_only_access(self, mock_requests, mock_update_token, mock_get_user):
mock_requests.post(spotify.OAUTH_TOKEN_URL, status_code=200, json={
'access_token': 'tokentoken',
'expires_in': 3600,
'scope': '',
})
spotify.refresh_user_token(self.spotify_user)
mock_update_token.assert_called_with(
self.spotify_user.user_id,
'tokentoken',
'old-refresh-token',
mock.ANY # expires_at cannot be accurately calculated hence using mock.ANY
# another option is using a range for expires_at and a Matcher but that seems far more work
)
mock_get_user.assert_called_with(self.spotify_user.user_id)
@requests_mock.Mocker()
def test_refresh_user_token_bad(self, mock_requests):
mock_requests.post(spotify.OAUTH_TOKEN_URL, status_code=400, json={
'error': 'invalid request',
'error_description': 'invalid refresh token',
})
with self.assertRaises(spotify.SpotifyAPIError):
spotify.refresh_user_token(self.spotify_user)
# apparently, requests_mocker does not follow the usual order in which decorators are applied. :-(
@requests_mock.Mocker()
def test_refresh_user_token_revoked(self, mock_requests):
mock_requests.post(spotify.OAUTH_TOKEN_URL, status_code=400, json={
'error': 'invalid_grant',
'error_description': 'Refresh token revoked',
})
with self.assertRaises(spotify.SpotifyInvalidGrantError):
spotify.refresh_user_token(self.spotify_user)
def test_get_spotify_oauth(self):
func_oauth = spotify.get_spotify_oauth()
self.assertEqual(func_oauth.client_id, current_app.config['SPOTIFY_CLIENT_ID'])
self.assertEqual(func_oauth.client_secret, current_app.config['SPOTIFY_CLIENT_SECRET'])
self.assertEqual(func_oauth.redirect_uri, 'http://localhost/profile/connect-spotify/callback')
self.assertIsNone(func_oauth.scope)
func_oauth = spotify.get_spotify_oauth(spotify.SPOTIFY_LISTEN_PERMISSIONS)
self.assertIn('streaming', func_oauth.scope)
self.assertIn('user-read-email', func_oauth.scope)
self.assertIn('user-read-private', func_oauth.scope)
self.assertIn('playlist-modify-public', func_oauth.scope)
self.assertIn('playlist-modify-private', func_oauth.scope)
self.assertNotIn('user-read-recently-played', func_oauth.scope)
self.assertNotIn('user-read-currently-playing', func_oauth.scope)
func_oauth = spotify.get_spotify_oauth(spotify.SPOTIFY_IMPORT_PERMISSIONS)
self.assertIn('user-read-currently-playing', func_oauth.scope)
self.assertIn('user-read-recently-played', func_oauth.scope)
self.assertNotIn('streaming', func_oauth.scope)
self.assertNotIn('user-read-email', func_oauth.scope)
self.assertNotIn('user-read-private', func_oauth.scope)
self.assertNotIn('playlist-modify-public', func_oauth.scope)
self.assertNotIn('playlist-modify-private', func_oauth.scope)
@mock.patch('listenbrainz.domain.spotify.db_spotify.get_user')
def test_get_user(self, mock_db_get_user):
t = int(time.time())
mock_db_get_user.return_value = {
'user_id': 1,
'musicbrainz_id': 'spotify_user',
'musicbrainz_row_id': 312,
'user_token': 'token-token-token',
'token_expires': t,
'refresh_token': 'refresh-refresh-refresh',
'last_updated': None,
'record_listens': True,
'error_message': 'oops',
'latest_listened_at': None,
'permission': 'user-read-recently-played',
}
user = spotify.get_user(1)
self.assertIsInstance(user, spotify.Spotify)
self.assertEqual(user.user_id, 1)
self.assertEqual(user.musicbrainz_id, 'spotify_user')
self.assertEqual(user.user_token, 'token-token-token')
self.assertEqual(user.token_expires, t)
self.assertEqual(user.last_updated, None)
self.assertEqual(user.record_listens, True)
self.assertEqual(user.error_message, 'oops')
@mock.patch('listenbrainz.domain.spotify.db_spotify.delete_spotify')
def test_remove_user(self, mock_delete):
spotify.remove_user(1)
mock_delete.assert_called_with(1)
@mock.patch('listenbrainz.domain.spotify.db_spotify.create_spotify')
@mock.patch('listenbrainz.domain.spotify.time.time')
def test_add_new_user(self, mock_time, mock_create):
mock_time.return_value = 0
spotify.add_new_user(1, {
'access_token': 'access-token',
'refresh_token': 'refresh-token',
'expires_in': 3600,
'scope': '',
})
mock_create.assert_called_with(1, 'access-token', 'refresh-token', 3600, False, '')
@mock.patch('listenbrainz.domain.spotify.db_spotify.get_active_users_to_process')
def test_get_active_users(self, mock_get_active_users):
t = int(time.time())
mock_get_active_users.return_value = [
{
'user_id': 1,
'musicbrainz_id': 'spotify_user',
'musicbrainz_row_id': 312,
'user_token': 'token-token-token',
'token_expires': t,
'refresh_token': 'refresh-refresh-refresh',
'last_updated': None,
'record_listens': True,
'error_message': 'oops',
'latest_listened_at': None,
'permission': 'user-read-recently-played',
},
{
'user_id': 2,
'musicbrainz_id': 'spotify_user_2',
'musicbrainz_row_id': 321,
'user_token': 'token-token-token321',
'token_expires': t + 31,
'refresh_token': 'refresh-refresh-refresh321',
'last_updated': None,
'record_listens': True,
'error_message': 'oops2',
'latest_listened_at': None,
'permission': 'user-read-recently-played',
},
]
lst = spotify.get_active_users_to_process()
mock_get_active_users.assert_called_once()
self.assertEqual(len(lst), 2)
self.assertIsInstance(lst[0], spotify.Spotify)
self.assertIsInstance(lst[1], spotify.Spotify)
self.assertEqual(lst[0].user_id, 1)
self.assertEqual(lst[1].user_id, 2)
@mock.patch('listenbrainz.domain.spotify.db_spotify.update_latest_listened_at')
def test_update_latest_listened_at(self, mock_update_listened_at):
t = int(time.time())
spotify.update_latest_listened_at(1, t)
mock_update_listened_at.assert_called_once_with(1, t)
| gpl-2.0 | 3,376,187,134,347,358,700 | 42.708738 | 103 | 0.615504 | false |
dopuskh3/confluence-publisher | conf_publisher/confluence.py | 1 | 7678 | import os
import copy
from operator import attrgetter
try:
from lxml import etree
except ImportError:
import xml.etree.ElementTree as etree
class Content(object):
type = None
def __init__(self):
self.id = None
def __eq__(self, other):
return self.__dict__ == other.__dict__
class Attachement(Content):
type = 'attachment'
def __init__(self):
self.title = ''
self.media_type = ''
super(Attachement, self).__init__()
class ImageAttachement(Attachement):
pass
class DownloadAttachement(Attachement):
pass
class Page(Content):
type = 'page'
def __init__(self):
self.version_number = 0
self.space_key = None
self.ancestors = list()
self.body = None
self.title = None
self.unused_title = None
super(Page, self).__init__()
def __eq__(self, other):
first = copy.copy(self.__dict__)
second = copy.copy(other.__dict__)
del first['ancestors']
del second['ancestors']
if len(self.ancestors) != len(other.ancestors):
return False
for first_ancestor, second_ancestor in zip(self.ancestors, other.ancestors):
if not (first_ancestor == second_ancestor):
return False
del first['body']
del second['body']
if not PageBodyComparator.is_equal(self.body, other.body):
return False
return first == second
class Ancestor(Content):
type = 'page'
class ConfluenceManager(object):
def __init__(self, api):
self._api = api
class ConfluencePageManager(ConfluenceManager):
def load(self, content_id):
data = self._api.get_content(content_id, 'ancestors,version,space,body.storage')
p = Page()
p.id = data['id']
p.type = data['type']
p.version_number = data['version']['number']
p.space_key = data['space']['key']
p.title = data['title']
p.body = data['body']['storage']['value']
for ancestor_data in data['ancestors']:
ancestor = Ancestor()
ancestor.id = ancestor_data['id']
ancestor.type = ancestor_data['type']
p.ancestors.append(ancestor)
return p
def create(self, page):
ancestor = page.ancestors[-1]
data = self._page_payload(page.space_key, page.body, page.title,
ancestor_id=ancestor.id, ancestor_type=ancestor.type,)
ret = self._api.create_content(data)
page.id = ret['id']
return page.id
def update(self, page, bump_version=True):
if bump_version:
page.version_number += 1
ancestor = page.ancestors[-1]
data = self._page_payload(page.space_key, page.body, page.title,
ancestor_id=ancestor.id, ancestor_type=ancestor.type,
content_id=page.id, version=page.version_number)
ret = self._api.update_content(page.id, data)
page.id = ret['id']
return page.id
@staticmethod
def _page_payload(space_key, body=None, title=None,
ancestor_id=None, ancestor_type='page',
content_id=None, version=None, content_type='page'):
payload = {
'type': content_type,
'space': {
'key': space_key
},
}
if body:
payload['body'] = {
'storage': {
'value': body,
'representation': 'storage'
}
}
if ancestor_id:
payload['ancestors'] = [
{
'type': ancestor_type,
'id': ancestor_id,
}
]
if content_id:
payload['id'] = content_id
if title:
payload['title'] = title
if version:
payload['version'] = {
'number': version
}
return payload
class AttachmentPublisher(ConfluenceManager):
def publish(self, content_id, filepath):
attachments = self._get_page_metadata(content_id)
filename = os.path.basename(filepath)
if filename in map(attrgetter('title'), attachments):
# TODO: fixme. skipping if file already exists. its ugly hack
return
with open(filepath, 'rb') as f:
self._api.create_attachment(content_id, f)
@staticmethod
def _parse_attachments(data):
attachments = []
for attachment_data in data['children']['attachment']['results']:
media_type = attachment_data['metadata']['mediaType']
attachment_class = ImageAttachement if 'image' in media_type else DownloadAttachement
attachment = attachment_class()
attachment.id = attachment_data['id']
attachment.title = attachment_data['title']
attachment.media_type = media_type
attachments.append(attachment)
return attachments
def _get_page_metadata(self, content_id):
data = self._api.get_content(content_id, 'children.attachment')
page_attachments = self._parse_attachments(data)
return page_attachments
class PageBodyComparator(object):
@classmethod
def is_equal(cls, first, second):
if first == '' and second == '':
return True
if first == '' and second != '' or first != '' and second == '':
return False
# 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
# 'xsi:schemaLocation="http://www.atlassian.com/schema/confluence/4/ac/ confluence.xsd" '
wrapper = u'<?xml version="1.0" encoding="UTF-8"?>' \
u'<!DOCTYPE ac:confluence SYSTEM "confluence.dtd">' \
u'<ac:confluence xmlns:ac="http://www.atlassian.com/schema/confluence/4/ac/" ' \
u'xmlns:ri="http://www.atlassian.com/schema/confluence/4/ri/">{}</ac:confluence>'
first_xml = etree.XML(wrapper.format(first).encode(encoding='utf-8'), parser=cls._parser())
second_xml = etree.XML(wrapper.format(second).encode(encoding='utf-8'), parser=cls._parser())
return cls._elements_equal(first_xml, second_xml)
@staticmethod
def _parser():
# use lxml HTMLParser if it exists
if hasattr(etree, 'HTMLParser'):
return etree.HTMLParser()
# or xml.etree.ElementTree.XMLParser
# fix unknown entity
# http://stackoverflow.com/questions/7237466/python-elementtree-support-for-parsing-unknown-xml-entities
parser = etree.XMLParser()
parser.entity['nbsp'] = 'nbsp'
return parser
@classmethod
def _elements_equal(cls, e1, e2):
if e1.tag != e2.tag:
return False
if e1.text != e2.text:
return False
if not cls._attributes_equals(e1, e2):
return False
if len(e1) != len(e2):
return False
return all(cls._elements_equal(c1, c2) for c1, c2 in zip(e1, e2))
@staticmethod
def _attributes_equals(e1, e2):
# confluence create additional attributes for structured macros
if 'structured-macro' == e1.tag:
return e1.attrib.get('name') == e2.attrib.get('name')
elif 'structured-macro' in e1.tag:
confluence_ac_attribute_name = '{http://www.atlassian.com/schema/confluence/4/ac/}name'
return e1.attrib.get(confluence_ac_attribute_name) == e2.attrib.get(confluence_ac_attribute_name)
return e1.attrib == e2.attrib
| mit | 1,458,645,083,216,607,500 | 29.347826 | 112 | 0.565642 | false |
ampotty/uip-pc3 | Ejemplos/ejemplo16.py | 1 | 1153 | def isPalindromicNumber(num: int) -> bool:
"""
Determina sin un numero es palindromico
:param num: Numbero entero a evaluar
:type num: int
:return: Verdadero si es numero palindromico; Falso si no es numero palindromico
:rtype: bool
"""
try:
if type(num) != int:
raise TypeError("(Tipo incorrecto) Tipo <int> esperado.")
source = [int(n) for n in str(num)]
clone = source[:]
clone.reverse()
return source == clone
except TypeError as error:
print(error.with_traceback())
if __name__ == '__main__':
"""
-- Determinar numero palindromico --
Leer un numero entero e imprimir si es numero palindromico.
Un numero es palindromico, si sus digitos se mantiene lo mismo si es invertido.
En otras palabras es simetrico [https://es.wikipedia.org/wiki/Capic%C3%BAa]
NOTA: Ejemplo utiliza interpretador de Python 3.x.x
"""
try:
number = int(input("Digite un numero: "))
except:
raise
truthiness = "es" if isPalindromicNumber(number) else "no"
print("%d %s numero Palindromo." % (number, truthiness))
| mit | 7,433,402,708,684,758,000 | 27.825 | 84 | 0.62706 | false |
Azure/azure-sdk-for-python | sdk/storage/azure-storage-blob/samples/blob_samples_service_async.py | 1 | 8293 | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: blob_samples_service_async.py
DESCRIPTION:
This sample demos basic operations of the blob service client.
USAGE: python blob_samples_service_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_STORAGE_CONNECTION_STRING - the connection string to your storage account
"""
import os
import asyncio
from azure.core.exceptions import ResourceNotFoundError, ResourceExistsError
class BlobServiceSamplesAsync(object):
connection_string = os.getenv("AZURE_STORAGE_CONNECTION_STRING")
async def get_storage_account_information_async(self):
# Instantiate a BlobServiceClient using a connection string
from azure.storage.blob.aio import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
async with blob_service_client:
# [START get_blob_service_account_info]
account_info = await blob_service_client.get_account_information()
print('Using Storage SKU: {}'.format(account_info['sku_name']))
# [END get_blob_service_account_info]
async def blob_service_properties_async(self):
# Instantiate a BlobServiceClient using a connection string
from azure.storage.blob.aio import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
async with blob_service_client:
# [START set_blob_service_properties]
# Create service properties
from azure.storage.blob import BlobAnalyticsLogging, Metrics, CorsRule, RetentionPolicy
# Create logging settings
logging = BlobAnalyticsLogging(read=True, write=True, delete=True, retention_policy=RetentionPolicy(enabled=True, days=5))
# Create metrics for requests statistics
hour_metrics = Metrics(enabled=True, include_apis=True, retention_policy=RetentionPolicy(enabled=True, days=5))
minute_metrics = Metrics(enabled=True, include_apis=True,
retention_policy=RetentionPolicy(enabled=True, days=5))
# Create CORS rules
cors_rule = CorsRule(['www.xyz.com'], ['GET'])
cors = [cors_rule]
# Set the service properties
await blob_service_client.set_service_properties(logging, hour_metrics, minute_metrics, cors)
# [END set_blob_service_properties]
# [START get_blob_service_properties]
properties = await blob_service_client.get_service_properties()
# [END get_blob_service_properties]
async def blob_service_stats_async(self):
# Instantiate a BlobServiceClient using a connection string
from azure.storage.blob.aio import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
async with blob_service_client:
# [START get_blob_service_stats]
stats = await blob_service_client.get_service_stats()
# [END get_blob_service_stats]
async def container_operations_async(self):
# Instantiate a BlobServiceClient using a connection string
from azure.storage.blob.aio import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
async with blob_service_client:
try:
# [START bsc_create_container]
try:
new_container = await blob_service_client.create_container("containerfromblobserviceasync")
properties = await new_container.get_container_properties()
except ResourceExistsError:
print("Container already exists.")
# [END bsc_create_container]
# [START bsc_list_containers]
# List all containers
all_containers = []
async for container in blob_service_client.list_containers(include_metadata=True):
all_containers.append(container)
for container in all_containers:
print(container['name'], container['metadata'])
# Filter results with name prefix
test_containers = []
async for name in blob_service_client.list_containers(name_starts_with='test-'):
test_containers.append(name)
for container in test_containers:
print(container['name'], container['metadata'])
# [END bsc_list_containers]
finally:
# [START bsc_delete_container]
# Delete container if it exists
try:
await blob_service_client.delete_container("containerfromblobserviceasync")
except ResourceNotFoundError:
print("Container already deleted.")
# [END bsc_delete_container]
async def get_blob_and_container_clients_async(self):
# Instantiate a BlobServiceClient using a connection string
from azure.storage.blob.aio import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
async with blob_service_client:
# [START bsc_get_container_client]
# Get a client to interact with a specific container - though it may not yet exist
container_client = blob_service_client.get_container_client("containertestasync")
try:
blobs_list = []
async for blob in container_client.list_blobs():
blobs_list.append(blob)
for blob in blobs_list:
print("Found blob: ", blob.name)
except ResourceNotFoundError:
print("Container not found.")
# [END bsc_get_container_client]
try:
# Create new Container in the service
await container_client.create_container()
# [START bsc_get_blob_client]
blob_client = blob_service_client.get_blob_client(container="containertestasync", blob="my_blob")
try:
stream = await blob_client.download_blob()
except ResourceNotFoundError:
print("No blob found.")
# [END bsc_get_blob_client]
finally:
# Delete the container
await blob_service_client.delete_container("containertestasync")
async def get_blob_service_client_from_container_client_async(self):
# Instantiate a BlobServiceClient using a connection string
from azure.storage.blob.aio import ContainerClient
container_client1 = ContainerClient.from_connection_string(self.connection_string, "container")
await container_client1.create_container()
# [START get_blob_service_client_from_container_client]
blob_service_client = container_client1._get_blob_service_client()
print(await blob_service_client.get_service_properties())
container_client2 = blob_service_client.get_container_client("container")
print(await container_client2.get_container_properties())
await container_client2.delete_container()
await container_client1.close()
# [END get_blob_service_client_from_container_client]
async def main():
sample = BlobServiceSamplesAsync()
await sample.get_storage_account_information_async()
await sample.get_blob_and_container_clients_async()
await sample.container_operations_async()
await sample.blob_service_properties_async()
await sample.blob_service_stats_async()
await sample.get_blob_service_client_from_container_client_async()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| mit | -7,345,336,612,967,402,000 | 42.878307 | 134 | 0.633064 | false |
Azure/azure-sdk-for-python | sdk/cognitiveservices/azure-cognitiveservices-search-visualsearch/setup.py | 1 | 2961 | #!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-cognitiveservices-search-visualsearch"
PACKAGE_PPRINT_NAME = "Cognitive Services Visual Search"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'This package is incompatible with azure=={}. '.format(ver) +
'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('CHANGELOG.md', encoding='utf-8') as f:
changelog = f.read()
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + changelog,
long_description_content_type='text/markdown',
license='MIT License',
author='Microsoft Corporation',
author_email='[email protected]',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=[
'tests',
# Exclude packages that will be covered by PEP420 or nspkg
'azure',
'azure.cognitiveservices',
'azure.cognitiveservices.search',
]),
install_requires=[
'msrest>=0.5.0',
'azure-common~=1.1',
],
extras_require={
":python_version<'3.0'": ['azure-cognitiveservices-search-nspkg'],
}
)
| mit | 5,800,291,530,648,143,000 | 32.647727 | 91 | 0.602499 | false |
beeftornado/sentry | src/sentry/migrations/0023_hide_environment_none_20191126.py | 2 | 1733 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def hide_environment_none(apps, schema_editor):
"""
Hide environments that are named none, since they're blacklisted and no longer can be created.
We should iterate over each environment row individually in python instead so that we don't lock the DB up. This is
far slower but much safer
"""
EnvironmentProject = apps.get_model("sentry", "EnvironmentProject")
for project in EnvironmentProject.objects.filter(environment__name='none'):
project.is_hidden = True
project.save()
class Migration(migrations.Migration):
# This flag is used to mark that a migration shouldn't be automatically run in
# production. We set this to True for operations that we think are risky and want
# someone from ops to run manually and monitor.
# General advice is that if in doubt, mark your migration as `is_dangerous`.
# Some things you should always mark as dangerous:
# - Adding indexes to large tables. These indexes should be created concurrently,
# unfortunately we can't run migrations outside of a transaction until Django
# 1.10. So until then these should be run manually.
# - Large data migrations. Typically we want these to be run manually by ops so that
# they can be monitored. Since data migrations will now hold a transaction open
# this is even more important.
# - Adding columns to highly active tables, even ones that are NULL.
is_dangerous = False
dependencies = [
("sentry", "0022_merge"),
]
operations = [
migrations.RunPython(hide_environment_none, migrations.RunPython.noop)
]
| bsd-3-clause | -3,321,665,843,704,771,600 | 41.268293 | 119 | 0.709175 | false |
denz/swarm | swarm/helpers.py | 1 | 11212 | # -*- coding: utf-8 -*-
import sys
import os
import pkgutil
from multiprocessing import RLock
from types import StringTypes
def get_root_path(import_name):
"""Returns the path to a package or cwd if that cannot be found. This
returns the path of a package or the folder that contains a module.
Not to be confused with the package path returned by :func:`find_package`.
"""
# Module already imported and has a file attribute. Use that first.
mod = sys.modules.get(import_name)
if mod is not None and hasattr(mod, '__file__'):
return os.path.dirname(os.path.abspath(mod.__file__))
# Next attempt: check the loader.
loader = pkgutil.get_loader(import_name)
# Loader does not exist or we're referring to an unloaded main module
# or a main module without path (interactive sessions), go with the
# current working directory.
if loader is None or import_name == '__main__':
return os.getcwd()
# For .egg, zipimporter does not have get_filename until Python 2.7.
# Some other loaders might exhibit the same behavior.
if hasattr(loader, 'get_filename'):
filepath = loader.get_filename(import_name)
else:
# Fall back to imports.
__import__(import_name)
filepath = sys.modules[import_name].__file__
# filepath is import_name.py for a module, or __init__.py for a package.
return os.path.dirname(os.path.abspath(filepath))
class _PackageBoundObject(object):
def __init__(self, import_name):
#: The name of the package or module. Do not change this once
#: it was set by the constructor.
self.import_name = import_name
#: Where is the app root located?
self.root_path = get_root_path(self.import_name)
# sentinel
_missing = object()
class cached_property(object):
"""A decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value::
class Flask(object):
@cached_property
def foo(self):
# calculate something important here
return 42
The class has to have a `__dict__` in order for this property to
work.
.. versionchanged:: 0.6
the `writeable` attribute and parameter was deprecated. If a
cached property is writeable or not has to be documented now.
For performance reasons the implementation does not honor the
writeable setting and will always make the property writeable.
"""
# implementation detail: this property is implemented as non-data
# descriptor. non-data descriptors are only invoked if there is
# no entry with the same name in the instance's __dict__.
# this allows us to completely get rid of the access function call
# overhead. If one choses to invoke __get__ by hand the property
# will still work as expected because the lookup logic is replicated
# in __get__ for manual invocation.
def __init__(self, func, name=None, doc=None, writeable=False):
if writeable:
from warnings import warn
warn(DeprecationWarning('the writeable argument to the '
'cached property is a noop since 0.6 '
'because the property is writeable '
'by default for performance reasons'))
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
def __get__(self, obj, type=None):
if obj is None:
return self
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
class locked_cached_property(object):
"""A decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value. Works like the one in Werkzeug but has a lock for
thread safety.
"""
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
self.lock = RLock()
def __get__(self, obj, type=None):
if obj is None:
return self
with self.lock:
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
def import_string(import_name, silent=False):
"""Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If `silent` is True the return value will be `None` if the import fails.
For better debugging we recommend the new :func:`import_module`
function to be used instead.
:param import_name: the dotted name for the object to import.
:param silent: if set to `True` import errors are ignored and
`None` is returned instead.
:return: imported object
"""
# force the import name to automatically convert to strings
if isinstance(import_name, unicode):
import_name = str(import_name)
try:
if ':' in import_name:
module, obj = import_name.split(':', 1)
elif '.' in import_name:
module, obj = import_name.rsplit('.', 1)
else:
return __import__(import_name)
# __import__ is not able to handle unicode strings in the fromlist
# if the module is a package
if isinstance(obj, unicode):
obj = obj.encode('utf-8')
try:
return getattr(__import__(module, None, None, [obj]), obj)
except (ImportError, AttributeError):
# support importing modules not yet set up by the parent module
# (or package for that matter)
modname = module + '.' + obj
__import__(modname)
return sys.modules[modname]
except ImportError, e:
if not silent:
raise ImportStringError(import_name, e), None, sys.exc_info()[2]
def find_package(import_name):
"""Finds a package and returns the prefix (or None if the package is
not installed) as well as the folder that contains the package or
module as a tuple. The package path returned is the module that would
have to be added to the pythonpath in order to make it possible to
import the module. The prefix is the path below which a UNIX like
folder structure exists (lib, share etc.).
"""
root_mod_name = import_name.split('.')[0]
loader = pkgutil.get_loader(root_mod_name)
if loader is None or import_name == '__main__':
# import name is not found, or interactive/main module
package_path = os.getcwd()
else:
# For .egg, zipimporter does not have get_filename until Python 2.7.
if hasattr(loader, 'get_filename'):
filename = loader.get_filename(root_mod_name)
elif hasattr(loader, 'archive'):
# zipimporter's loader.archive points to the .egg or .zip
# archive filename is dropped in call to dirname below.
filename = loader.archive
else:
# At least one loader is missing both get_filename and archive:
# Google App Engine's HardenedModulesHook
#
# Fall back to imports.
__import__(import_name)
filename = sys.modules[import_name].__file__
package_path = os.path.abspath(os.path.dirname(filename))
# package_path ends with __init__.py for a package
if loader.is_package(root_mod_name):
package_path = os.path.dirname(package_path)
site_parent, site_folder = os.path.split(package_path)
py_prefix = os.path.abspath(sys.prefix)
if package_path.startswith(py_prefix):
return py_prefix, package_path
elif site_folder.lower() == 'site-packages':
parent, folder = os.path.split(site_parent)
# Windows like installations
if folder.lower() == 'lib':
base_dir = parent
# UNIX like installations
elif os.path.basename(parent).lower() == 'lib':
base_dir = os.path.dirname(parent)
else:
base_dir = site_parent
return base_dir, package_path
return None, package_path
class ImportStringError(ImportError):
"""Provides information about a failed :func:`import_string` attempt."""
#: String in dotted notation that failed to be imported.
import_name = None
#: Wrapped exception.
exception = None
def __init__(self, import_name, exception):
self.import_name = import_name
self.exception = exception
msg = (
'import_string() failed for %r. Possible reasons are:\n\n'
'- missing __init__.py in a package;\n'
'- package or module path not included in sys.path;\n'
'- duplicated package or module name taking precedence in '
'sys.path;\n'
'- missing module, class, function or variable;\n\n'
'Debugged import:\n\n%s\n\n'
'Original exception:\n\n%s: %s')
name = ''
tracked = []
for part in import_name.replace(':', '.').split('.'):
name += (name and '.') + part
imported = import_string(name, silent=True)
if imported:
tracked.append((name, getattr(imported, '__file__', None)))
else:
track = ['- %r found in %r.' % (n, i) for n, i in tracked]
track.append('- %r not found.' % name)
msg = msg % (import_name, '\n'.join(track),
exception.__class__.__name__, str(exception))
break
ImportError.__init__(self, msg)
def __repr__(self):
return '<%s(%r, %r)>' % (self.__class__.__name__, self.import_name,
self.exception)
def caller_locals(decorators=0):
'''
Returns x frames back and returns namespaces for that frame
'''
f = sys._getframe(1 + decorators)
return f.f_locals
obj_converter = lambda obj_path: import_string(obj_path) \
if isinstance(obj_path, basestring)\
else obj_path
def obj_list_converter(string_or_stringlist):
if string_or_stringlist is None:
return []
if isinstance(string_or_stringlist, StringTypes):
return [obj_converter(string_or_stringlist),]
else:
lst = []
for obj_path in string_or_stringlist:
lst.append(obj_converter(obj_path))
return lst | bsd-3-clause | 4,586,606,665,246,934,000 | 38.206294 | 78 | 0.604085 | false |
beiko-lab/gengis | bin/Lib/site-packages/numpy/numarray/numerictypes.py | 1 | 15882 | """numerictypes: Define the numeric type objects
This module is designed so 'from numerictypes import *' is safe.
Exported symbols include:
Dictionary with all registered number types (including aliases):
typeDict
Numeric type objects:
Bool
Int8 Int16 Int32 Int64
UInt8 UInt16 UInt32 UInt64
Float32 Double64
Complex32 Complex64
Numeric type classes:
NumericType
BooleanType
SignedType
UnsignedType
IntegralType
SignedIntegralType
UnsignedIntegralType
FloatingType
ComplexType
$Id: numerictypes.py,v 1.55 2005/12/01 16:22:03 jaytmiller Exp $
"""
__all__ = ['NumericType','HasUInt64','typeDict','IsType',
'BooleanType', 'SignedType', 'UnsignedType', 'IntegralType',
'SignedIntegralType', 'UnsignedIntegralType', 'FloatingType',
'ComplexType', 'AnyType', 'ObjectType', 'Any', 'Object',
'Bool', 'Int8', 'Int16', 'Int32', 'Int64', 'Float32',
'Float64', 'UInt8', 'UInt16', 'UInt32', 'UInt64',
'Complex32', 'Complex64', 'Byte', 'Short', 'Int','Long',
'Float', 'Complex', 'genericTypeRank', 'pythonTypeRank',
'pythonTypeMap', 'scalarTypeMap', 'genericCoercions',
'typecodes', 'genericPromotionExclusions','MaximumType',
'getType','scalarTypes', 'typefrom']
MAX_ALIGN = 8
MAX_INT_SIZE = 8
import numpy
LP64 = numpy.intp(0).itemsize == 8
HasUInt64 = 1
try:
numpy.int64(0)
except:
HasUInt64 = 0
#from typeconv import typeConverters as _typeConverters
#import numinclude
#from _numerictype import _numerictype, typeDict
# Enumeration of numarray type codes
typeDict = {}
_tAny = 0
_tBool = 1
_tInt8 = 2
_tUInt8 = 3
_tInt16 = 4
_tUInt16 = 5
_tInt32 = 6
_tUInt32 = 7
_tInt64 = 8
_tUInt64 = 9
_tFloat32 = 10
_tFloat64 = 11
_tComplex32 = 12
_tComplex64 = 13
_tObject = 14
def IsType(rep):
"""Determines whether the given object or string, 'rep', represents
a numarray type."""
return isinstance(rep, NumericType) or rep in typeDict
def _register(name, type, force=0):
"""Register the type object. Raise an exception if it is already registered
unless force is true.
"""
if name in typeDict and not force:
raise ValueError("Type %s has already been registered" % name)
typeDict[name] = type
return type
class NumericType(object):
"""Numeric type class
Used both as a type identification and the repository of
characteristics and conversion functions.
"""
def __new__(type, name, bytes, default, typeno):
"""__new__() implements a 'quasi-singleton pattern because attempts
to create duplicate types return the first created instance of that
particular type parameterization, i.e. the second time you try to
create "Int32", you get the original Int32, not a new one.
"""
if name in typeDict:
self = typeDict[name]
if self.bytes != bytes or self.default != default or \
self.typeno != typeno:
raise ValueError("Redeclaration of existing NumericType "\
"with different parameters.")
return self
else:
self = object.__new__(type)
self.name = "no name"
self.bytes = None
self.default = None
self.typeno = -1
return self
def __init__(self, name, bytes, default, typeno):
if not isinstance(name, str):
raise TypeError("name must be a string")
self.name = name
self.bytes = bytes
self.default = default
self.typeno = typeno
self._conv = None
_register(self.name, self)
def __getnewargs__(self):
"""support the pickling protocol."""
return (self.name, self.bytes, self.default, self.typeno)
def __getstate__(self):
"""support pickling protocol... no __setstate__ required."""
False
class BooleanType(NumericType):
pass
class SignedType(object):
"""Marker class used for signed type check"""
pass
class UnsignedType(object):
"""Marker class used for unsigned type check"""
pass
class IntegralType(NumericType):
pass
class SignedIntegralType(IntegralType, SignedType):
pass
class UnsignedIntegralType(IntegralType, UnsignedType):
pass
class FloatingType(NumericType):
pass
class ComplexType(NumericType):
pass
class AnyType(NumericType):
pass
class ObjectType(NumericType):
pass
# C-API Type Any
Any = AnyType("Any", None, None, _tAny)
Object = ObjectType("Object", None, None, _tObject)
# Numeric Types:
Bool = BooleanType("Bool", 1, 0, _tBool)
Int8 = SignedIntegralType( "Int8", 1, 0, _tInt8)
Int16 = SignedIntegralType("Int16", 2, 0, _tInt16)
Int32 = SignedIntegralType("Int32", 4, 0, _tInt32)
Int64 = SignedIntegralType("Int64", 8, 0, _tInt64)
Float32 = FloatingType("Float32", 4, 0.0, _tFloat32)
Float64 = FloatingType("Float64", 8, 0.0, _tFloat64)
UInt8 = UnsignedIntegralType( "UInt8", 1, 0, _tUInt8)
UInt16 = UnsignedIntegralType("UInt16", 2, 0, _tUInt16)
UInt32 = UnsignedIntegralType("UInt32", 4, 0, _tUInt32)
UInt64 = UnsignedIntegralType("UInt64", 8, 0, _tUInt64)
Complex32 = ComplexType("Complex32", 8, complex(0.0), _tComplex32)
Complex64 = ComplexType("Complex64", 16, complex(0.0), _tComplex64)
Object.dtype = 'O'
Bool.dtype = '?'
Int8.dtype = 'i1'
Int16.dtype = 'i2'
Int32.dtype = 'i4'
Int64.dtype = 'i8'
UInt8.dtype = 'u1'
UInt16.dtype = 'u2'
UInt32.dtype = 'u4'
UInt64.dtype = 'u8'
Float32.dtype = 'f4'
Float64.dtype = 'f8'
Complex32.dtype = 'c8'
Complex64.dtype = 'c16'
# Aliases
Byte = _register("Byte", Int8)
Short = _register("Short", Int16)
Int = _register("Int", Int32)
if LP64:
Long = _register("Long", Int64)
if HasUInt64:
_register("ULong", UInt64)
MaybeLong = _register("MaybeLong", Int64)
__all__.append('MaybeLong')
else:
Long = _register("Long", Int32)
_register("ULong", UInt32)
MaybeLong = _register("MaybeLong", Int32)
__all__.append('MaybeLong')
_register("UByte", UInt8)
_register("UShort", UInt16)
_register("UInt", UInt32)
Float = _register("Float", Float64)
Complex = _register("Complex", Complex64)
# short forms
_register("b1", Bool)
_register("u1", UInt8)
_register("u2", UInt16)
_register("u4", UInt32)
_register("i1", Int8)
_register("i2", Int16)
_register("i4", Int32)
_register("i8", Int64)
if HasUInt64:
_register("u8", UInt64)
_register("f4", Float32)
_register("f8", Float64)
_register("c8", Complex32)
_register("c16", Complex64)
# NumPy forms
_register("1", Int8)
_register("B", Bool)
_register("c", Int8)
_register("b", UInt8)
_register("s", Int16)
_register("w", UInt16)
_register("i", Int32)
_register("N", Int64)
_register("u", UInt32)
_register("U", UInt64)
if LP64:
_register("l", Int64)
else:
_register("l", Int32)
_register("d", Float64)
_register("f", Float32)
_register("D", Complex64)
_register("F", Complex32)
# scipy.base forms
def _scipy_alias(scipy_type, numarray_type):
_register(scipy_type, eval(numarray_type))
globals()[scipy_type] = globals()[numarray_type]
_scipy_alias("bool_", "Bool")
_scipy_alias("bool8", "Bool")
_scipy_alias("int8", "Int8")
_scipy_alias("uint8", "UInt8")
_scipy_alias("int16", "Int16")
_scipy_alias("uint16", "UInt16")
_scipy_alias("int32", "Int32")
_scipy_alias("uint32", "UInt32")
_scipy_alias("int64", "Int64")
_scipy_alias("uint64", "UInt64")
_scipy_alias("float64", "Float64")
_scipy_alias("float32", "Float32")
_scipy_alias("complex128", "Complex64")
_scipy_alias("complex64", "Complex32")
# The rest is used by numeric modules to determine conversions
# Ranking of types from lowest to highest (sorta)
if not HasUInt64:
genericTypeRank = ['Bool','Int8','UInt8','Int16','UInt16',
'Int32', 'UInt32', 'Int64',
'Float32','Float64', 'Complex32', 'Complex64', 'Object']
else:
genericTypeRank = ['Bool','Int8','UInt8','Int16','UInt16',
'Int32', 'UInt32', 'Int64', 'UInt64',
'Float32','Float64', 'Complex32', 'Complex64', 'Object']
pythonTypeRank = [ bool, int, long, float, complex ]
# The next line is not platform independent XXX Needs to be generalized
if not LP64:
pythonTypeMap = {
int:("Int32","int"),
long:("Int64","int"),
float:("Float64","float"),
complex:("Complex64","complex")}
scalarTypeMap = {
int:"Int32",
long:"Int64",
float:"Float64",
complex:"Complex64"}
else:
pythonTypeMap = {
int:("Int64","int"),
long:("Int64","int"),
float:("Float64","float"),
complex:("Complex64","complex")}
scalarTypeMap = {
int:"Int64",
long:"Int64",
float:"Float64",
complex:"Complex64"}
pythonTypeMap.update({bool:("Bool","bool") })
scalarTypeMap.update({bool:"Bool"})
# Generate coercion matrix
def _initGenericCoercions():
global genericCoercions
genericCoercions = {}
# vector with ...
for ntype1 in genericTypeRank:
nt1 = typeDict[ntype1]
rank1 = genericTypeRank.index(ntype1)
ntypesize1, inttype1, signedtype1 = nt1.bytes, \
isinstance(nt1, IntegralType), isinstance(nt1, SignedIntegralType)
for ntype2 in genericTypeRank:
# vector
nt2 = typeDict[ntype2]
ntypesize2, inttype2, signedtype2 = nt2.bytes, \
isinstance(nt2, IntegralType), isinstance(nt2, SignedIntegralType)
rank2 = genericTypeRank.index(ntype2)
if (signedtype1 != signedtype2) and inttype1 and inttype2:
# mixing of signed and unsigned ints is a special case
# If unsigned same size or larger, final size needs to be bigger
# if possible
if signedtype1:
if ntypesize2 >= ntypesize1:
size = min(2*ntypesize2, MAX_INT_SIZE)
else:
size = ntypesize1
else:
if ntypesize1 >= ntypesize2:
size = min(2*ntypesize1, MAX_INT_SIZE)
else:
size = ntypesize2
outtype = "Int"+str(8*size)
else:
if rank1 >= rank2:
outtype = ntype1
else:
outtype = ntype2
genericCoercions[(ntype1, ntype2)] = outtype
for ntype2 in pythonTypeRank:
# scalar
mapto, kind = pythonTypeMap[ntype2]
if ((inttype1 and kind=="int") or (not inttype1 and kind=="float")):
# both are of the same "kind" thus vector type dominates
outtype = ntype1
else:
rank2 = genericTypeRank.index(mapto)
if rank1 >= rank2:
outtype = ntype1
else:
outtype = mapto
genericCoercions[(ntype1, ntype2)] = outtype
genericCoercions[(ntype2, ntype1)] = outtype
# scalar-scalar
for ntype1 in pythonTypeRank:
maptype1 = scalarTypeMap[ntype1]
genericCoercions[(ntype1,)] = maptype1
for ntype2 in pythonTypeRank:
maptype2 = scalarTypeMap[ntype2]
genericCoercions[(ntype1, ntype2)] = genericCoercions[(maptype1, maptype2)]
# Special cases more easily dealt with outside of the loop
genericCoercions[("Complex32", "Float64")] = "Complex64"
genericCoercions[("Float64", "Complex32")] = "Complex64"
genericCoercions[("Complex32", "Int64")] = "Complex64"
genericCoercions[("Int64", "Complex32")] = "Complex64"
genericCoercions[("Complex32", "UInt64")] = "Complex64"
genericCoercions[("UInt64", "Complex32")] = "Complex64"
genericCoercions[("Int64","Float32")] = "Float64"
genericCoercions[("Float32", "Int64")] = "Float64"
genericCoercions[("UInt64","Float32")] = "Float64"
genericCoercions[("Float32", "UInt64")] = "Float64"
genericCoercions[(float, "Bool")] = "Float64"
genericCoercions[("Bool", float)] = "Float64"
genericCoercions[(float,float,float)] = "Float64" # for scipy.special
genericCoercions[(int,int,float)] = "Float64" # for scipy.special
_initGenericCoercions()
# If complex is subclassed, the following may not be necessary
genericPromotionExclusions = {
'Bool': (),
'Int8': (),
'Int16': (),
'Int32': ('Float32','Complex32'),
'UInt8': (),
'UInt16': (),
'UInt32': ('Float32','Complex32'),
'Int64' : ('Float32','Complex32'),
'UInt64' : ('Float32','Complex32'),
'Float32': (),
'Float64': ('Complex32',),
'Complex32':(),
'Complex64':()
} # e.g., don't allow promotion from Float64 to Complex32 or Int64 to Float32
# Numeric typecodes
typecodes = {'Integer': '1silN',
'UnsignedInteger': 'bBwuU',
'Float': 'fd',
'Character': 'c',
'Complex': 'FD' }
if HasUInt64:
_MaximumType = {
Bool : UInt64,
Int8 : Int64,
Int16 : Int64,
Int32 : Int64,
Int64 : Int64,
UInt8 : UInt64,
UInt16 : UInt64,
UInt32 : UInt64,
UInt8 : UInt64,
Float32 : Float64,
Float64 : Float64,
Complex32 : Complex64,
Complex64 : Complex64
}
else:
_MaximumType = {
Bool : Int64,
Int8 : Int64,
Int16 : Int64,
Int32 : Int64,
Int64 : Int64,
UInt8 : Int64,
UInt16 : Int64,
UInt32 : Int64,
UInt8 : Int64,
Float32 : Float64,
Float64 : Float64,
Complex32 : Complex64,
Complex64 : Complex64
}
def MaximumType(t):
"""returns the type of highest precision of the same general kind as 't'"""
return _MaximumType[t]
def getType(type):
"""Return the numeric type object for type
type may be the name of a type object or the actual object
"""
if isinstance(type, NumericType):
return type
try:
return typeDict[type]
except KeyError:
raise TypeError("Not a numeric type")
scalarTypes = (bool,int,long,float,complex)
_scipy_dtypechar = {
Int8 : 'b',
UInt8 : 'B',
Int16 : 'h',
UInt16 : 'H',
Int32 : 'i',
UInt32 : 'I',
Int64 : 'q',
UInt64 : 'Q',
Float32 : 'f',
Float64 : 'd',
Complex32 : 'F', # Note the switchup here:
Complex64 : 'D' # numarray.Complex32 == scipy.complex64, etc.
}
_scipy_dtypechar_inverse = {}
for key,value in _scipy_dtypechar.items():
_scipy_dtypechar_inverse[value] = key
_val = numpy.int_(0).itemsize
if _val == 8:
_scipy_dtypechar_inverse['l'] = Int64
_scipy_dtypechar_inverse['L'] = UInt64
elif _val == 4:
_scipy_dtypechar_inverse['l'] = Int32
_scipy_dtypechar_inverse['L'] = UInt32
del _val
if LP64:
_scipy_dtypechar_inverse['p'] = Int64
_scipy_dtypechar_inverse['P'] = UInt64
else:
_scipy_dtypechar_inverse['p'] = Int32
_scipy_dtypechar_inverse['P'] = UInt32
def typefrom(obj):
return _scipy_dtypechar_inverse[obj.dtype.char]
| gpl-3.0 | -3,608,736,657,460,471,300 | 26.981752 | 87 | 0.581287 | false |
ryota-sugimoto/hackerrank | vmware/logical_hub.py | 1 | 1266 | #!/usr/bin/env python
def wire_port2port(d):
for host in d.keys():
hubs = d[host]
for i in range(len(hubs)):
for j in range(i+1,len(hubs)):
if hubs[i] == hubs[j]:
print "PORT_TO_PORT %s %i %i" % (host,i,j)
print "PORT_TO_PORT %s %i %i" % (host,j,i)
def make_hub_map(d):
hub_map = {}
for host in d.keys():
hubs = d[host]
for hub in hubs:
if hub_map.has_key(hub):
hub_map[hub].add(host)
else:
hub_map[hub] = set([host])
return hub_map
def wire_port2tunnel(d,hub_map):
for host in d.keys():
hubs = d[host]
for i,hub in enumerate(hubs):
for dst_host in hub_map[hub]:
if dst_host != host:
print "PORT_TO_TUNNEL %s %i %s %s" % (host,i,dst_host,hub)
def wire_tunnel2port(d,hub_map):
if len(d.keys()) > 1:
for host in d.keys():
hubs = d[host]
for i,hub in enumerate(hubs):
if len(hub_map[hub]) > 1:
print "TUNNEL_TO_PORT %s %s %i" % (host,hub,i)
import sys
hosts = {}
for s in sys.stdin:
l = s.strip().split()
host = l[0]
hosts[host] = []
for hub in l[1:]:
hosts[host].append(hub)
hub_map = make_hub_map(hosts)
wire_port2port(hosts)
wire_port2tunnel(hosts,hub_map)
wire_tunnel2port(hosts,hub_map)
| gpl-2.0 | -9,089,723,739,468,877,000 | 23.346154 | 68 | 0.561611 | false |
dparnell/rethinkdb | drivers/java/convert_tests.py | 1 | 31689 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''Finds yaml tests, converts them to Java tests.'''
from __future__ import print_function
import sys
import os
import os.path
import re
import time
import ast
import argparse
import metajava
import process_polyglot
import logging
from process_polyglot import Unhandled, Skip, FatalSkip, SkippedTest
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from collections import namedtuple
sys.path.append(
os.path.abspath(os.path.join(__file__, "../../../test/common")))
import parsePolyglot
parsePolyglot.printDebug = False
logger = logging.getLogger("convert_tests")
# Supplied by import_python_driver
r = None
TEST_EXCLUSIONS = [
# python only tests
# 'regression/1133',
# 'regression/767',
# 'regression/1005',
'regression/',
'limits', # pending fix in issue #4965
# double run
'changefeeds/squash',
# arity checked at compile time
'arity',
'.rb.yaml',
]
def main():
logging.basicConfig(format="[%(name)s] %(message)s", level=logging.INFO)
start = time.clock()
args = parse_args()
if args.debug:
logger.setLevel(logging.DEBUG)
logging.getLogger('process_polyglot').setLevel(logging.DEBUG)
elif args.info:
logger.setLevel(logging.INFO)
logging.getLogger('process_polyglot').setLevel(logging.INFO)
else:
logger.root.setLevel(logging.WARNING)
if args.e:
evaluate_snippet(args.e)
exit(0)
global r
r = import_python_driver(args.python_driver_dir)
renderer = metajava.Renderer(
args.template_dir,
invoking_filenames=[
__file__,
process_polyglot.__file__,
])
for testfile in process_polyglot.all_yaml_tests(
args.test_dir,
TEST_EXCLUSIONS):
logger.info("Working on %s", testfile)
TestFile(
test_dir=args.test_dir,
filename=testfile,
test_output_dir=args.test_output_dir,
renderer=renderer,
).load().render()
logger.info("Finished in %s seconds", time.clock() - start)
def parse_args():
'''Parse command line arguments'''
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--test-dir",
help="Directory where yaml tests are",
default="../../test/rql_test/src"
)
parser.add_argument(
"--test-output-dir",
help="Directory to render tests to",
default="./src/test/java/gen",
)
parser.add_argument(
"--template-dir",
help="Where to find test generation templates",
default="./templates",
)
parser.add_argument(
"--python-driver-dir",
help="Where the built python driver is located",
default="../../build/drivers/python"
)
parser.add_argument(
"--test-file",
help="Only convert the specified yaml file",
)
parser.add_argument(
'--debug',
help="Print debug output",
dest='debug',
action='store_true')
parser.set_defaults(debug=False)
parser.add_argument(
'--info',
help="Print info level output",
dest='info',
action='store_true')
parser.set_defaults(info=False)
parser.add_argument(
'-e',
help="Convert an inline python reql to java reql snippet",
)
return parser.parse_args()
def import_python_driver(py_driver_dir):
'''Imports the test driver header'''
stashed_path = sys.path
sys.path.insert(0, os.path.realpath(py_driver_dir))
import rethinkdb as r
sys.path = stashed_path
return r
JavaQuery = namedtuple(
'JavaQuery',
('line',
'expected_type',
'expected_line',
'testfile',
'line_num',
'runopts')
)
JavaDef = namedtuple(
'JavaDef',
('line',
'varname',
'vartype',
'value',
'run_if_query',
'testfile',
'line_num',
'runopts')
)
Version = namedtuple("Version", "original java")
JAVA_DECL = re.compile(r'(?P<type>.+) (?P<var>\w+) = (?P<value>.*);')
def evaluate_snippet(snippet):
'''Just converts a single expression snippet into java'''
try:
parsed = ast.parse(snippet, mode='eval').body
except Exception as e:
return print("Error:", e)
try:
print(ReQLVisitor(smart_bracket=True).convert(parsed))
except Exception as e:
return print("Error:", e)
class TestFile(object):
'''Represents a single test file'''
def __init__(self, test_dir, filename, test_output_dir, renderer):
self.filename = filename
self.full_path = os.path.join(test_dir, filename)
self.module_name = metajava.camel(
filename.split('.')[0].replace('/', '_'))
self.test_output_dir = test_output_dir
self.reql_vars = {'r'}
self.renderer = renderer
def load(self):
'''Load the test file, yaml parse it, extract file-level metadata'''
with open(self.full_path) as f:
parsed_yaml = parsePolyglot.parseYAML(f)
self.description = parsed_yaml.get('desc', 'No description')
self.table_var_names = self.get_varnames(parsed_yaml)
self.reql_vars.update(self.table_var_names)
self.raw_test_data = parsed_yaml['tests']
self.test_generator = process_polyglot.tests_and_defs(
self.filename,
self.raw_test_data,
context=process_polyglot.create_context(r, self.table_var_names),
custom_field='java',
)
return self
def get_varnames(self, yaml_file):
'''Extract table variable names from yaml variable
They can be specified just space separated, or comma separated'''
raw_var_names = yaml_file.get('table_variable_name', '')
if not raw_var_names:
return set()
return set(re.split(r'[, ]+', raw_var_names))
def render(self):
'''Renders the converted tests to a runnable test file'''
defs_and_test = ast_to_java(self.test_generator, self.reql_vars)
self.renderer.render(
'Test.java',
output_dir=self.test_output_dir,
output_name=self.module_name + '.java',
dependencies=[self.full_path],
defs_and_test=defs_and_test,
table_var_names=list(sorted(self.table_var_names)),
module_name=self.module_name,
JavaQuery=JavaQuery,
JavaDef=JavaDef,
description=self.description,
)
def py_to_java_type(py_type):
'''Converts python types to their Java equivalents'''
if py_type is None:
return None
elif isinstance(py_type, str):
# This can be called on something already converted
return py_type
elif py_type.__name__ == 'function':
return 'ReqlFunction1'
elif (py_type.__module__ == 'datetime' and
py_type.__name__ == 'datetime'):
return 'OffsetDateTime'
elif py_type.__module__ == 'builtins':
return {
bool: 'Boolean',
bytes: 'byte[]',
int: 'Long',
float: 'Double',
str: 'String',
dict: 'Map',
list: 'List',
object: 'Object',
type(None): 'Object',
}[py_type]
elif py_type.__module__ == 'rethinkdb.ast':
# Anomalous non-rule based capitalization in the python driver
return {
'DB': 'Db'
}.get(py_type.__name__, py_type.__name__)
elif py_type.__module__ == 'rethinkdb.errors':
return py_type.__name__
elif py_type.__module__ == '?test?':
return {
'uuid': 'UUIDMatch', # clashes with ast.Uuid
}.get(py_type.__name__, metajava.camel(py_type.__name__))
elif py_type.__module__ == 'rethinkdb.query':
# All of the constants like minval maxval etc are defined in
# query.py, but no type name is provided to `type`, so we have
# to pull it out of a class variable
return metajava.camel(py_type.st)
else:
raise Unhandled(
"Don't know how to convert python type {}.{} to java"
.format(py_type.__module__, py_type.__name__))
def is_reql(t):
'''Determines if a type is a reql term'''
# Other options for module: builtins, ?test?, datetime
return t.__module__ == 'rethinkdb.ast'
def escape_string(s, out):
out.write('"')
for codepoint in s:
rpr = repr(codepoint)[1:-1]
if rpr.startswith('\\x'):
# Python will shorten unicode escapes that are less than a
# byte to use \x instead of \u . Java doesn't accept \x so
# we have to expand it back out.
rpr = '\\u00' + rpr[2:]
elif rpr == '"':
rpr = r'\"'
out.write(rpr)
out.write('"')
def attr_matches(path, node):
'''Helper function. Several places need to know if they are an
attribute of some root object'''
root, name = path.split('.')
ret = is_name(root, node.value) and node.attr == name
return ret
def is_name(name, node):
'''Determine if the current attribute node is a Name with the
given name'''
return type(node) == ast.Name and node.id == name
def def_to_java(item, reql_vars):
if is_reql(item.term.type):
reql_vars.add(item.varname)
try:
if is_reql(item.term.type):
visitor = ReQLVisitor
else:
visitor = JavaVisitor
java_line = visitor(reql_vars,
type_=item.term.type,
is_def=True,
).convert(item.term.ast)
except Skip as skip:
return SkippedTest(line=item.term.line, reason=str(skip))
java_decl = JAVA_DECL.match(java_line).groupdict()
return JavaDef(
line=Version(
original=item.term.line,
java=java_line,
),
varname=java_decl['var'],
vartype=java_decl['type'],
value=java_decl['value'],
run_if_query=item.run_if_query,
testfile=item.testfile,
line_num=item.line_num,
runopts=convert_runopts(reql_vars, java_decl['type'], item.runopts)
)
def convert_runopts(reql_vars, type_, runopts):
if runopts is None:
return None
return {
key: JavaVisitor(
reql_vars, type_=type_).convert(val)
for key, val in runopts.items()
}
def query_to_java(item, reql_vars):
if item.runopts is not None:
converted_runopts = convert_runopts(
reql_vars, item.query.type, item.runopts)
else:
converted_runopts = item.runopts
try:
java_line = ReQLVisitor(
reql_vars, type_=item.query.type).convert(item.query.ast)
if is_reql(item.expected.type):
visitor = ReQLVisitor
else:
visitor = JavaVisitor
java_expected_line = visitor(
reql_vars, type_=item.expected.type)\
.convert(item.expected.ast)
except Skip as skip:
return SkippedTest(line=item.query.line, reason=str(skip))
return JavaQuery(
line=Version(
original=item.query.line,
java=java_line,
),
expected_type=py_to_java_type(item.expected.type),
expected_line=Version(
original=item.expected.line,
java=java_expected_line,
),
testfile=item.testfile,
line_num=item.line_num,
runopts=converted_runopts,
)
def ast_to_java(sequence, reql_vars):
'''Converts the the parsed test data to java source lines using the
visitor classes'''
reql_vars = set(reql_vars)
for item in sequence:
if type(item) == process_polyglot.Def:
yield def_to_java(item, reql_vars)
elif type(item) == process_polyglot.CustomDef:
yield JavaDef(line=Version(item.line, item.line),
testfile=item.testfile,
line_num=item.line_num)
elif type(item) == process_polyglot.Query:
yield query_to_java(item, reql_vars)
elif type(item) == SkippedTest:
yield item
else:
assert False, "shouldn't happen, item was {}".format(item)
class JavaVisitor(ast.NodeVisitor):
'''Converts python ast nodes into a java string'''
def __init__(self,
reql_vars=frozenset("r"),
out=None,
type_=None,
is_def=False,
smart_bracket=False,
):
self.out = StringIO() if out is None else out
self.reql_vars = reql_vars
self.type = py_to_java_type(type_)
self._type = type_
self.is_def = is_def
self.smart_bracket = smart_bracket
super(JavaVisitor, self).__init__()
self.write = self.out.write
def skip(self, message, *args, **kwargs):
cls = Skip
is_fatal = kwargs.pop('fatal', False)
if self.is_def or is_fatal:
cls = FatalSkip
raise cls(message, *args, **kwargs)
def convert(self, node):
'''Convert a text line to another text line'''
self.visit(node)
return self.out.getvalue()
def join(self, sep, items):
first = True
for item in items:
if first:
first = False
else:
self.write(sep)
self.visit(item)
def to_str(self, s):
escape_string(s, self.out)
def cast_null(self, arg, cast='ReqlExpr'):
'''Emits a cast to (ReqlExpr) if the node represents null'''
if (type(arg) == ast.Name and arg.id == 'null') or \
(type(arg) == ast.NameConstant and arg.value == "None"):
self.write("(")
self.write(cast)
self.write(") ")
self.visit(arg)
def to_args(self, args, optargs=[]):
self.write("(")
if args:
self.cast_null(args[0])
for arg in args[1:]:
self.write(', ')
self.cast_null(arg)
self.write(")")
for optarg in optargs:
self.write(".optArg(")
self.to_str(optarg.arg)
self.write(", ")
self.visit(optarg.value)
self.write(")")
def generic_visit(self, node):
logger.error("While translating: %s", ast.dump(node))
logger.error("Got as far as: %s", ''.join(self.out))
raise Unhandled("Don't know what this thing is: " + str(type(node)))
def visit_Assign(self, node):
if len(node.targets) != 1:
Unhandled("We only support assigning to one variable")
self.write(self.type + " ")
self.write(node.targets[0].id)
self.write(" = (")
self.write(self.type)
self.write(") (")
if is_reql(self._type):
ReQLVisitor(self.reql_vars,
out=self.out,
type_=self.type,
is_def=True,
).visit(node.value)
else:
self.visit(node.value)
self.write(");")
def visit_Str(self, node):
self.to_str(node.s)
def visit_Bytes(self, node, skip_prefix=False, skip_suffix=False):
if not skip_prefix:
self.write("new byte[]{")
for i, byte in enumerate(node.s):
if i > 0:
self.write(", ")
# Java bytes are signed :(
if byte > 127:
self.write(str(-(256 - byte)))
else:
self.write(str(byte))
if not skip_suffix:
self.write("}")
else:
self.write(", ")
def visit_Name(self, node):
name = node.id
if name == 'frozenset':
self.skip("can't convert frozensets to GroupedData yet")
if name in metajava.java_term_info.JAVA_KEYWORDS or \
name in metajava.java_term_info.OBJECT_METHODS:
name += '_'
self.write({
'True': 'true',
'False': 'false',
'None': 'null',
'nil': 'null',
}.get(name, name))
def visit_arg(self, node):
self.write(node.arg)
def visit_NameConstant(self, node):
if node.value is None:
self.write("null")
elif node.value is True:
self.write("true")
elif node.value is False:
self.write("false")
else:
raise Unhandled(
"Don't know NameConstant with value %s" % node.value)
def visit_Attribute(self, node, emit_parens=True):
skip_parent = False
if attr_matches("r.ast", node):
# The java driver doesn't have that namespace, so we skip
# the `r.` prefix and create an ast class member in the
# test file. So stuff like `r.ast.rqlTzinfo(...)` converts
# to `ast.rqlTzinfo(...)`
skip_parent = True
if not skip_parent:
self.visit(node.value)
self.write(".")
self.write(metajava.dromedary(node.attr))
def visit_Num(self, node):
self.write(repr(node.n))
if not isinstance(node.n, float):
if node.n > 9223372036854775807 or node.n < -9223372036854775808:
self.write(".0")
else:
self.write("L")
def visit_Index(self, node):
self.visit(node.value)
def skip_if_arity_check(self, node):
'''Throws out tests for arity'''
rgx = re.compile('.*([Ee]xpect(ed|s)|Got) .* argument')
try:
if node.func.id == 'err' and rgx.match(node.args[1].s):
self.skip("arity checks done by java type system")
except (AttributeError, TypeError):
pass
def convert_if_string_encode(self, node):
'''Finds strings like 'foo'.encode("utf-8") and turns them into the
java version: "foo".getBytes(StandardCharsets.UTF_8)'''
try:
assert node.func.attr == 'encode'
node.func.value.s
encoding = node.args[0].s
except Exception:
return False
java_encoding = {
"ascii": "US_ASCII",
"utf-16": "UTF_16",
"utf-8": "UTF_8",
}[encoding]
self.visit(node.func.value)
self.write(".getBytes(StandardCharsets.")
self.write(java_encoding)
self.write(")")
return True
def bag_data_hack(self, node):
'''This is a very specific hack that isn't a general conversion method
whatsoever. In the tests we have an expected value like
bag(data * 2) where data is a list. This doesn't work in Java
obviously, but the only way to detect it "correctly" requires
type information in the ast, which we don't have. So the hack
here looks for this very specific case and rejiggers it. PRs
welcome for fixing this in a non-nasty way. In the meantime
I've made this extremely specific so it hopefully only gets
triggered by this specific case in the tests and not on
general conversions.
'''
try:
assert node.func.id == 'bag'
assert node.args[0].left.id == 'data'
assert type(node.args[0].op) == ast.Mult
assert node.args[0].right.n == 2
self.write("bag((List)")
self.write("Stream.concat(data.stream(), data.stream())")
self.write(".collect(Collectors.toList())")
self.write(")")
except Exception:
return False
else:
return True
def visit_Call(self, node):
self.skip_if_arity_check(node)
if self.convert_if_string_encode(node):
return
if self.bag_data_hack(node):
return
if type(node.func) == ast.Attribute and node.func.attr == 'error':
# This weird special case is because sometimes the tests
# use r.error and sometimes they use r.error(). The java
# driver only supports r.error(). Since we're coming in
# from a call here, we have to prevent visit_Attribute
# from emitting the parents on an r.error for us.
self.visit_Attribute(node.func, emit_parens=False)
else:
self.visit(node.func)
self.to_args(node.args, node.keywords)
def visit_Dict(self, node):
self.write("r.hashMap(")
if len(node.keys) > 0:
self.visit(node.keys[0])
self.write(", ")
self.visit(node.values[0])
for k, v in zip(node.keys[1:], node.values[1:]):
self.write(").with(")
self.visit(k)
self.write(", ")
self.visit(v)
self.write(")")
def visit_List(self, node):
self.write("r.array(")
self.join(", ", node.elts)
self.write(")")
def visit_Tuple(self, node):
self.visit_List(node)
def visit_Lambda(self, node):
if len(node.args.args) == 1:
self.visit(node.args.args[0])
else:
self.to_args(node.args.args)
self.write(" -> ")
self.visit(node.body)
def visit_Subscript(self, node):
if node.slice is None or type(node.slice.value) != ast.Num:
logger.error("While doing: %s", ast.dump(node))
raise Unhandled("Only integers subscript can be converted."
" Got %s" % node.slice.value.s)
self.visit(node.value)
self.write(".get(")
self.write(str(node.slice.value.n))
self.write(")")
def visit_ListComp(self, node):
gen = node.generators[0]
if type(gen.iter) == ast.Call and gen.iter.func.id.endswith('range'):
# This is really a special-case hacking of [... for i in
# range(i)] comprehensions that are used in the polyglot
# tests sometimes. It won't handle translating arbitrary
# comprehensions to Java streams.
self.write("LongStream.range(")
if len(gen.iter.args) == 1:
self.write("0, ")
self.visit(gen.iter.args[0])
elif len(gen.iter.args) == 2:
self.visit(gen.iter.args[0])
self.write(", ")
self.visit(gen.iter.args[1])
self.write(").boxed()")
else:
# Somebody came up with a creative new use for
# comprehensions in the test suite...
raise Unhandled("ListComp hack couldn't handle: ", ast.dump(node))
self.write(".map(")
self.visit(gen.target)
self.write(" -> ")
self.visit(node.elt)
self.write(").collect(Collectors.toList())")
def visit_UnaryOp(self, node):
opMap = {
ast.USub: "-",
ast.Not: "!",
ast.UAdd: "+",
ast.Invert: "~",
}
self.write(opMap[type(node.op)])
self.visit(node.operand)
def visit_BinOp(self, node):
opMap = {
ast.Add: " + ",
ast.Sub: " - ",
ast.Mult: " * ",
ast.Div: " / ",
ast.Mod: " % ",
}
t = type(node.op)
if t in opMap.keys():
self.visit(node.left)
self.write(opMap[t])
self.visit(node.right)
elif t == ast.Pow:
if type(node.left) == ast.Num and node.left.n == 2:
self.visit(node.left)
self.write(" << ")
self.visit(node.right)
else:
raise Unhandled("Can't do exponent with non 2 base")
class ReQLVisitor(JavaVisitor):
'''Mostly the same as the JavaVisitor, but converts some
reql-specific stuff. This should only be invoked on an expression
if it's already known to return true from is_reql'''
TOPLEVEL_CONSTANTS = {
'monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday', 'january', 'february', 'march', 'april',
'may', 'june', 'july', 'august', 'september', 'october',
'november', 'december', 'minval', 'maxval', 'error'
}
def is_byte_array_add(self, node):
'''Some places we do stuff like b'foo' + b'bar' and byte
arrays don't like that much'''
if (type(node.left) == ast.Bytes and
type(node.right) == ast.Bytes and
type(node.op) == ast.Add):
self.visit_Bytes(node.left, skip_suffix=True)
self.visit_Bytes(node.right, skip_prefix=True)
return True
else:
return False
def visit_BinOp(self, node):
if self.is_byte_array_add(node):
return
opMap = {
ast.Add: "add",
ast.Sub: "sub",
ast.Mult: "mul",
ast.Div: "div",
ast.Mod: "mod",
ast.BitAnd: "and",
ast.BitOr: "or",
}
func = opMap[type(node.op)]
if self.is_not_reql(node.left):
self.prefix(func, node.left, node.right)
else:
self.infix(func, node.left, node.right)
def visit_Compare(self, node):
opMap = {
ast.Lt: "lt",
ast.Gt: "gt",
ast.GtE: "ge",
ast.LtE: "le",
ast.Eq: "eq",
ast.NotEq: "ne",
}
if len(node.ops) != 1:
# Python syntax allows chained comparisons (a < b < c) but
# we don't deal with that here
raise Unhandled("Compare hack bailed on: ", ast.dump(node))
left = node.left
right = node.comparators[0]
func_name = opMap[type(node.ops[0])]
if self.is_not_reql(node.left):
self.prefix(func_name, left, right)
else:
self.infix(func_name, left, right)
def prefix(self, func_name, left, right):
self.write("r.")
self.write(func_name)
self.write("(")
self.visit(left)
self.write(", ")
self.visit(right)
self.write(")")
def infix(self, func_name, left, right):
self.visit(left)
self.write(".")
self.write(func_name)
self.write("(")
self.visit(right)
self.write(")")
def is_not_reql(self, node):
if type(node) in (ast.Name, ast.NameConstant,
ast.Num, ast.Str, ast.Dict, ast.List):
return True
else:
return False
def visit_Subscript(self, node):
self.visit(node.value)
if type(node.slice) == ast.Index:
# Syntax like a[2] or a["b"]
if self.smart_bracket and type(node.slice.value) == ast.Str:
self.write(".g(")
elif self.smart_bracket and type(node.slice.value) == ast.Num:
self.write(".nth(")
else:
self.write(".bracket(")
self.visit(node.slice.value)
self.write(")")
elif type(node.slice) == ast.Slice:
# Syntax like a[1:2] or a[:2]
self.write(".slice(")
lower, upper, rclosed = self.get_slice_bounds(node.slice)
self.write(str(lower))
self.write(", ")
self.write(str(upper))
self.write(")")
if rclosed:
self.write('.optArg("right_bound", "closed")')
else:
raise Unhandled("No translation for ExtSlice")
def get_slice_bounds(self, slc):
'''Used to extract bounds when using bracket slice
syntax. This is more complicated since Python3 parses -1 as
UnaryOp(op=USub, operand=Num(1)) instead of Num(-1) like
Python2 does'''
if not slc:
return 0, -1, True
def get_bound(bound, default):
if bound is None:
return default
elif type(bound) == ast.UnaryOp and type(bound.op) == ast.USub:
return -bound.operand.n
elif type(bound) == ast.Num:
return bound.n
else:
raise Unhandled(
"Not handling bound: %s" % ast.dump(bound))
right_closed = slc.upper is None
return get_bound(slc.lower, 0), get_bound(slc.upper, -1), right_closed
def visit_Attribute(self, node, emit_parens=True):
is_toplevel_constant = False
if attr_matches("r.row", node):
self.skip("Java driver doesn't support r.row", fatal=True)
elif is_name("r", node.value) and node.attr in self.TOPLEVEL_CONSTANTS:
# Python has r.minval, r.saturday etc. We need to emit
# r.minval() and r.saturday()
is_toplevel_constant = True
python_clashes = {
# These are underscored in the python driver to avoid
# keywords, but they aren't java keywords so we convert
# them back.
'or_': 'or',
'and_': 'and',
'not_': 'not',
}
method_aliases = {metajava.dromedary(k): v
for k, v in metajava.java_term_info
.METHOD_ALIASES.items()}
self.visit(node.value)
self.write(".")
initial = python_clashes.get(
node.attr, metajava.dromedary(node.attr))
initial = method_aliases.get(initial, initial)
self.write(initial)
if initial in metajava.java_term_info.JAVA_KEYWORDS or \
initial in metajava.java_term_info.OBJECT_METHODS:
self.write('_')
if emit_parens and is_toplevel_constant:
self.write('()')
def visit_UnaryOp(self, node):
if type(node.op) == ast.Invert:
self.visit(node.operand)
self.write(".not()")
else:
super(ReQLVisitor, self).visit_UnaryOp(node)
def visit_Call(self, node):
# We call the superclass first, so if it's going to fail
# because of r.row or other things it fails first, rather than
# hitting the checks in this method. Since everything is
# written to a stringIO object not directly to a file, if we
# bail out afterwards it's still ok
super_result = super(ReQLVisitor, self).visit_Call(node)
# r.for_each(1) etc should be skipped
if (attr_equals(node.func, "attr", "for_each") and
type(node.args[0]) != ast.Lambda):
self.skip("the java driver doesn't allow "
"non-function arguments to forEach")
# map(1) should be skipped
elif attr_equals(node.func, "attr", "map"):
def check(node):
if type(node) == ast.Lambda:
return True
elif hasattr(node, "func") and attr_matches("r.js", node.func):
return True
elif type(node) == ast.Dict:
return True
elif type(node) == ast.Name:
# The assumption is that if you're passing a
# variable to map, it's at least potentially a
# function. This may be misguided
return True
else:
return False
if not check(node.args[-1]):
self.skip("the java driver statically checks that "
"map contains a function argument")
else:
return super_result
def attr_equals(node, attr, value):
'''Helper for digging into ast nodes'''
return hasattr(node, attr) and getattr(node, attr) == value
if __name__ == '__main__':
main()
| agpl-3.0 | 2,851,923,914,246,198,000 | 32.321767 | 79 | 0.545426 | false |
upconsulting/IsisCB | isiscb/zotero/tests.py | 1 | 54915 | from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import str
from builtins import object
from unittest import TestCase
from django.test.client import RequestFactory
from django.contrib.contenttypes.models import ContentType
from django.db import models
import rdflib, datetime, tempfile, types, os
from collections import Counter
from zotero.models import *
from .suggest import *
from .tasks import *
from zotero.parse import ZoteroIngest
from zotero import ingest
from rdflib import Graph, Literal, BNode, Namespace, RDF, URIRef
from rdflib.namespace import DC, FOAF, DCTERMS
BIB = Namespace('http://purl.org/net/biblio#')
RSS = Namespace('http://purl.org/rss/1.0/modules/link/')
ZOTERO = Namespace('http://www.zotero.org/namespaces/export#')
# Create your tests here.
datapath = 'zotero/test_data/IsisCBTest.rdf'
AUTHOR = rdflib.URIRef("http://purl.org/net/biblio#authors")
partdetails_fields = [
('page_start', 'page_begin'),
('page_end', 'page_end'),
('pages_free_text', 'pages_free_text'),
('issue', 'issue_free_text'),
('volume', 'volume_free_text'),
]
def clearAll():
Citation.objects.all().delete()
Authority.objects.all().delete()
CCRelation.objects.all().delete()
ACRelation.objects.all().delete()
InstanceResolutionEvent.objects.all().delete()
ImportAccession.objects.all().delete()
DraftAuthority.objects.all().delete()
DraftCitation.objects.all().delete()
DraftACRelation.objects.all().delete()
DraftCCRelation.objects.all().delete()
class TestExtra(TestCase):
"""
The "extra" field in Zotero is represented using the DC.description
predicate.
"""
def test_extra(self):
"""
"""
papers = ZoteroIngest('zotero/test_data/TestExtraField.rdf')
instance = ImportAccession.objects.create(name='TestAccession')
citations = ingest.IngestManager(papers, instance).process()
for citation in citations:
self.assertTrue(citation.extra is not None)
self.assertEqual(citation.extra,
"This is a test of the extra data field")
def tearDown(self):
clearAll()
class TestPages(TestCase):
"""
Sometimes we get unicode oddities in the page numbers.
"""
def test_page_number(self):
"""
Both of these journal articles should have clear start and end pages.
"""
book_data = 'zotero/test_data/Journal test.rdf'
papers = ZoteroIngest(book_data)
instance = ImportAccession.objects.create(name='TestAccession')
citations = ingest.IngestManager(papers, instance).process()
for citation in citations:
self.assertTrue(citation.page_start is not None)
self.assertTrue(citation.page_end is not None)
def tearDown(self):
clearAll()
class TestVolumeAndIssue(TestCase):
"""
ISISCB-779 Issue and Volume should be imported.
"""
def test_volume_and_issue(self):
"""
Each of the citations in this RDF document should have values for
volume and issue.
"""
book_data = 'zotero/test_data/Journal test.rdf'
papers = ZoteroIngest(book_data)
instance = ImportAccession.objects.create(name='TestAccession')
citations = ingest.IngestManager(papers, instance).process()
for citation in citations:
self.assertTrue(citation.issue is not None)
self.assertTrue(citation.volume is not None)
def tearDown(self):
clearAll()
class TestPublisher(TestCase):
"""
Information about publisher should be retained.
"""
def test_publisher_info(self):
"""
Both of the books in this document have publishers, so we should expect
corresponding ACRelations.
"""
book_data = 'zotero/test_data/Book test.rdf'
papers = ZoteroIngest(book_data)
instance = ImportAccession.objects.create(name='TestAccession')
citations = ingest.IngestManager(papers, instance).process()
for citation in citations:
type_counts = Counter()
auth_type_counts = Counter()
for rel in citation.authority_relations.all():
type_counts[rel.type_controlled] += 1
auth_type_counts[rel.authority.type_controlled] += 1
self.assertEqual(type_counts[DraftACRelation.PUBLISHER], 1)
# ISISCB-789: Target of Publisher relation should be Institution.
self.assertEqual(auth_type_counts[DraftAuthority.INSTITUTION], 1)
def tearDown(self):
clearAll()
class TestExtent(TestCase):
"""
z:numPages should be interpreted as DraftCitation.extent.
"""
def test_parse_extent(self):
"""
Both of the books in this document should have ``extent`` data.
"""
book_data = 'zotero/test_data/Book test.rdf'
papers = ZoteroIngest(book_data)
instance = ImportAccession.objects.create(name='TestAccession')
citations = ingest.IngestManager(papers, instance).process()
for citation in citations:
self.assertGreater(citation.extent, 0)
def tearDown(self):
clearAll()
class TestBookSeries(TestCase):
"""
"""
def setUp(self):
codes = [
'=151-360=',
'=102-375=',
'=150-340=',
'=102-350=',
'=103-340=',
'=160-370=',
'=160-375=',
'=151-375=',
'=121-320=',
'=120-370=',
'=123-360=',
'=160-360=',
'=161-360=',
'=150=',
'=160-380=',
'=1-330=',
'=150-370=',
'=1-340=',
'=131=',
'=150-380=',
'=42-370=',
'=151-360=',
'=152-360=',
'=151-360=',
'=160=',
'=150-230=',
'=160-370=',
'=150-350=',
'=163-370=',
'=140-360=',
]
for code in codes:
Authority.objects.create(
name='The real %s' % code,
type_controlled=DraftAuthority.CONCEPT,
classification_code=code.replace('=', ''),
)
def test_process_bookseries(self):
"""
If we ingest a citation that is part of something else, we should use
BOOK_SERIES for the ACRelation.
We're also double-checking that percent-encoded subject codes are
resolved correctly.
"""
book_data = 'zotero/test_data/Books test 1 SR 2016.09.27.rdf'
papers = ZoteroIngest(book_data)
instance = ImportAccession.objects.create(name='TestAccession')
citations = ingest.IngestManager(papers, instance).process()
self.assertGreater(len(citations), 0)
for citation in citations:
citation.refresh_from_db()
type_counts = Counter()
for rel in citation.authority_relations.all():
type_counts[rel.type_controlled] += 1
if rel.type_controlled == DraftACRelation.SUBJECT:
# We have matched all percent-encoded subject authorities.
self.assertFalse(rel.authority.name.startswith('='))
# ISISCB-1163: book series relation are not generate anymore
#if citation.book_series is not None:
# self.assertEqual(type_counts[DraftACRelation.BOOK_SERIES], 1)
def tearDown(self):
clearAll()
class TestLanguageParsing(TestCase):
def test_parse_language(self):
"""
ISISCB-749 Should parse Language from Zotero metadata.
All of the chapters in this dataset have language fields.
"""
book_data = 'zotero/test_data/Chapter Test 8-9-16.rdf'
for entry in ZoteroIngest(book_data):
if entry.get('type_controlled')[0].lower() == 'booksection':
self.assertIn('language', entry)
def test_process_language(self):
"""
ISISCB-749 IngestManager should use language data to attempt to fill
:prop:`.DraftCitation.language`\.
"""
accession = ImportAccession.objects.create(name=u'test')
draftcitation = DraftCitation.objects.create(
title = 'Test',
type_controlled = DraftCitation.ARTICLE,
part_of = accession,
)
language = Language.objects.create(id='TL', name='TestLanguage')
data = {
'language': ['TL'],
}
ingest.IngestManager.generate_language_relations(data, draftcitation)
draftcitation.refresh_from_db()
self.assertEqual(draftcitation.language, language,
"Should match language by ID.")
data = {
'language': ['TestLanguage'],
}
ingest.IngestManager.generate_language_relations(data, draftcitation)
draftcitation.refresh_from_db()
self.assertEqual(draftcitation.language, language,
"Otherwise, should match language by ID.")
def test_accession_language(self):
"""
:prop:`.DraftCitation.language` should be used to fill
:class:`.Citation.language`\.
"""
accession = ImportAccession.objects.create(name=u'test')
draftcitation = DraftCitation.objects.create(
title = 'Test',
type_controlled = DraftCitation.ARTICLE,
part_of = accession,
)
language = Language.objects.create(id='TL', name='TestLanguage')
rf = RequestFactory()
request = rf.get('/hello/')
user = User.objects.create(username='bob', password='what', email='[email protected]')
request.user = user
data = {
'language': ['TL'],
}
ingest.IngestManager.generate_language_relations(data, draftcitation)
draftcitation.refresh_from_db()
new_citation = ingest_citation(request, accession, draftcitation)
self.assertEqual(new_citation.language.first(), language)
def tearDown(self):
for model in [DraftCitation, DraftAuthority, DraftACRelation,
DraftCCRelation, ImportAccession, DraftCitationLinkedData,
DraftAuthorityLinkedData, Authority, AttributeType,
User, Attribute, Language]:
model.objects.all().delete()
class TestBookReviews(TestCase):
"""
Reviews are linked to book citations via the "reviewed author" field in
Zotero. The foaf:surname of the target "author" can contain either an
"""
def setUp(self):
isbn_type, _ = LinkedDataType.objects.get_or_create(name='ISBN')
identifiers = [u'CBB001552823', u'CBB001202302', u'CBB001510022',
u'CBB001422653', u'CBB001551200']
isbns = [u'9782853672665', u'9782021111293', u'9783319121017',
u'CBB001552823', u'CBB001202302', u'9789004225534',
u'CBB001510022', u'CBB001422653', u'CBB001551200',
u'9783515104418', u'9788387992842']
for identifier in identifiers:
test_book = Citation.objects.create(title='A Test Citation',
type_controlled=Citation.BOOK,
id=identifier)
for isbn in isbns:
test_book = Citation.objects.create(title='A Test Citation',
type_controlled=Citation.BOOK)
LinkedData.objects.create(universal_resource_name=isbn,
type_controlled=isbn_type,
subject=test_book)
def test_process_bookreviews(self):
book_data = 'zotero/test_data/IsisReviewExamples.rdf'
papers = ZoteroIngest(book_data)
instance = ImportAccession.objects.create(name='TestAccession')
citations = ingest.IngestManager(papers, instance).process()
# There is one book in this dataset, and the chapters are chapter of
# this book.
type_counts = Counter()
for citation in citations:
type_counts[citation.type_controlled] += 1
if citation.type_controlled == Citation.REVIEW:
self.assertGreater(citation.relations_to.count(), 0)
relation = citation.relations_to.first()
self.assertEqual(relation.type_controlled,
CCRelation.REVIEWED_BY)
self.assertEqual(type_counts[Citation.REVIEW], 8)
def test_ingest_reviews(self):
rf = RequestFactory()
request = rf.get('/hello/')
user = User.objects.create(username='bob', password='what', email='[email protected]')
request.user = user
accession = ImportAccession.objects.create(name='TestAccession')
book_data = 'zotero/test_data/IsisReviewExamples.rdf'
for citation in ingest.process(ZoteroIngest(book_data), accession):
new_citation = ingest_citation(request, accession, citation)
# CCRelations are no longer created in tasks.ingest_citation; they
# are now handled by tasks.ingest_ccrelations, which is called by
# tasks.ingest_accession. This is to prevent circular recursion
# when attempting to resolve dependencies.
self.assertEqual(new_citation.relations_to.count(), 0)
def tearDown(self):
Citation.objects.all().delete()
Authority.objects.all().delete()
ACRelation.objects.all().delete()
CCRelation.objects.all().delete()
ImportAccession.objects.all().delete()
DraftAuthority.objects.all().delete()
DraftCitation.objects.all().delete()
DraftACRelation.objects.all().delete()
DraftCCRelation.objects.all().delete()
User.objects.all().delete()
class TestBookChapters(TestCase):
"""
Chapters are linked to book citations via the "Book title" field in Zotero.
This is represented as dc.isPartOf -> bib:Book.
"""
def test_process_bookchapters2(self):
book_data = "zotero/test_data/Chapter Test 8-9-16.rdf"
papers = ZoteroIngest(book_data)
instance = ImportAccession.objects.create(name='TestAccession')
citations = ingest.IngestManager(papers, instance).process()
type_counts = Counter([c.type_controlled for c in citations])
self.assertEqual(type_counts[Citation.BOOK], 1)
self.assertEqual(type_counts[Citation.CHAPTER], 2)
instance.refresh_from_db()
def test_process_bookchapters_resolve(self):
book_data = "zotero/test_data/Chapter Test 8-9-16.rdf"
papers = ZoteroIngest(book_data)
accession = ImportAccession.objects.create(name='TestAccession')
citations = ingest.IngestManager(papers, accession).process()
accession.refresh_from_db()
# The ImportAccession should be fully resolved, so we need to create
# corresponding Authority records ahead of time.
for draftauthority in accession.draftauthority_set.all():
authority = Authority.objects.create(
name = draftauthority.name,
type_controlled = draftauthority.type_controlled,
)
InstanceResolutionEvent.objects.create(
for_instance = draftauthority,
to_instance = authority,
)
accession.draftauthority_set.all().update(processed=True)
# We need a user for the accession.
rf = RequestFactory()
request = rf.get('/hello/')
user = User.objects.create(username='bob', password='what', email='[email protected]')
request.user = user
prod_citations = ingest_accession(request, accession)
for citation in prod_citations:
self.assertGreater(citation.relations_from.count() + citation.relations_to.count(), 0)
def test_process_bookchapters(self):
test_book = Citation.objects.create(title='A Test Citation',
type_controlled=Citation.BOOK)
isbn_type, _ = LinkedDataType.objects.get_or_create(name='ISBN')
LinkedData.objects.create(universal_resource_name='9783110225784',
type_controlled=isbn_type,
subject=test_book)
book_data = 'zotero/test_data/BookChapterExamples.rdf'
papers = ZoteroIngest(book_data)
instance = ImportAccession.objects.create(name='TestAccession')
citations = ingest.IngestManager(papers, instance).process()
# There is one book in this dataset, and the chapters are chapter of
# this book.
book = [c for c in citations if c.type_controlled == Citation.BOOK][0]
type_counts = Counter()
for citation in citations:
type_counts[citation.type_controlled] += 1
if citation.type_controlled == Citation.CHAPTER:
self.assertGreater(citation.relations_to.count(), 0)
relation = citation.relations_to.first()
self.assertEqual(relation.type_controlled,
CCRelation.INCLUDES_CHAPTER)
self.assertEqual(type_counts[Citation.BOOK], 2, "There are two unique"
"ISBNs, thus two unique books.")
self.assertEqual(type_counts[Citation.CHAPTER], 6)
def tearDown(self):
for model in [DraftCitation, DraftAuthority, DraftACRelation,
DraftCCRelation, ImportAccession, DraftCitationLinkedData,
DraftAuthorityLinkedData, Authority, AttributeType,
Attribute, User, Citation, ACRelation, CCRelation]:
model.objects.all().delete()
class TestSubjects(TestCase):
def test_parse_subjects(self):
papers = ZoteroIngest('zotero/test_data/Hist Europ Idea 2015 41 7.rdf')
for paper in papers:
self.assertIn('subjects', paper)
def test_process_subjects(self):
Authority.objects.create(name='testauthority', classification_code='140-340')
papers = ZoteroIngest('zotero/test_data/Hist Europ Idea 2015 41 7.rdf')
instance = ImportAccession.objects.create(name='TestAccession')
citations = ingest.IngestManager(papers, instance).process()
for citation in citations:
for acrelation in citation.authority_relations.filter(type_controlled=DraftACRelation.CATEGORY):
if acrelation.authority.name == 'testauthority':
self.assertEqual(acrelation.authority.resolutions.count(), 1)
def tearDown(self):
Citation.objects.all().delete()
Authority.objects.all().delete()
ACRelation.objects.all().delete()
CCRelation.objects.all().delete()
ImportAccession.objects.all().delete()
DraftAuthority.objects.all().delete()
DraftCitation.objects.all().delete()
DraftACRelation.objects.all().delete()
DraftCCRelation.objects.all().delete()
User.objects.all().delete()
class TestSuggest(TestCase):
def test_suggest_citation_by_linkeddata(self):
"""
TODO: complete this.
"""
accession = ImportAccession(name='test')
accession.save()
instance = ImportAccession.objects.create(name='TestAccession')
citations = ingest.IngestManager(ZoteroIngest(datapath), instance).process()
def tearDown(self):
Citation.objects.all().delete()
Authority.objects.all().delete()
ACRelation.objects.all().delete()
CCRelation.objects.all().delete()
ImportAccession.objects.all().delete()
DraftAuthority.objects.all().delete()
DraftCitation.objects.all().delete()
DraftACRelation.objects.all().delete()
DraftCCRelation.objects.all().delete()
User.objects.all().delete()
class TestIngest(TestCase):
"""
After all :class:`.DraftAuthority` instances have been resolved for a
:class:`.ImportAccession`\, the curator will elect to ingest all of the
records in that accession into the production database.
"""
def setUp(self):
self.dataset = Dataset.objects.create(name='test dataset')
self.accession = ImportAccession.objects.create(name='test',
ingest_to=self.dataset)
instance = ImportAccession.objects.create(name='TestAccession')
self.citations = ingest.IngestManager(ZoteroIngest(datapath), self.accession).process()
# We need a user for the accession.
rf = RequestFactory()
self.request = rf.get('/hello/')
self.user = User.objects.create(username='bob', password='what', email='[email protected]')
self.request.user = self.user
isodate_type = ContentType.objects.get_for_model(ISODateValue)
self.publicationDateType, _ = AttributeType.objects.get_or_create(
name='PublicationDate',
value_content_type=isodate_type,
)
# The ImportAccession should be fully resolved, so we need to create
# corresponding Authority records ahead of time.
for draftauthority in self.accession.draftauthority_set.all():
authority = Authority.objects.create(
name = draftauthority.name,
type_controlled = draftauthority.type_controlled,
)
InstanceResolutionEvent.objects.create(
for_instance = draftauthority,
to_instance = authority,
)
self.accession.draftauthority_set.all().update(processed=True)
def test_ingest_accession(self):
citation = ingest_accession(self.request, self.accession)
self.accession.refresh_from_db()
self.assertEqual(self.accession.citation_set.count(),
self.accession.draftcitation_set.count(),
'did not ingest all citations')
def test_ingest_citation(self):
draftcitation = self.citations[1]
citation = ingest_citation(self.request, self.accession, draftcitation)
self.assertIsInstance(citation.created_on, datetime.datetime,
'created_on not populated correctly')
self.assertIsInstance(citation.created_by, User,
'created_by not populated correctly')
if citation.publication_date:
self.assertIsInstance(citation.publication_date, datetime.date,
'publication_date not populated correctly')
self.assertFalse(citation.public,
'new citation is public; should be non-public')
self.assertEqual(citation.record_status_value, CuratedMixin.INACTIVE,
'new citation is not inactive')
self.assertEqual(citation.title, draftcitation.title,
'title not transferred correctly')
self.assertEqual(citation.type_controlled,
draftcitation.type_controlled,
'type_controlled not transferred correctly')
self.assertTrue(draftcitation.processed,
'DraftCitation not flagged as processed')
self.assertEqual(self.accession.ingest_to, citation.belongs_to,
'citation not assigned to the correct dataset')
model_fields = {f.name: type(f) for f in PartDetails._meta.fields}
for field, pfield in partdetails_fields:
draft_value = getattr(draftcitation, field, None)
if model_fields[pfield] is models.IntegerField:
try:
draft_value = int(draft_value)
except (ValueError, TypeError):
continue
prod_value = getattr(citation.part_details, pfield, None)
self.assertEqual(draft_value, prod_value,
'%s not populated correctly, %s != %s' % \
(pfield, draft_value, prod_value))
for draft in draftcitation.authority_relations.all():
self.assertTrue(draft.processed,
'DraftACRelation not flagged as processed')
self.assertEqual(draft.resolutions.count(), 1,
'resolution not created for DraftACRelation')
prod = draft.resolutions.first().to_instance
self.assertEqual(draft.type_controlled, prod.type_controlled,
'type_controlled transferred incorrectly')
self.assertEqual(draft.authority.name,
prod.name_for_display_in_citation,
'DraftAuthority name not transferred to ACR')
self.assertEqual(self.accession.ingest_to, prod.belongs_to)
attribute = citation.attributes.first()
self.assertIsInstance(attribute.value_freeform, str)
self.assertEqual(len(attribute.value_freeform), 4,
"ISISCB-736: freeform value should be four-digit year")
self.assertEqual(attribute.type_controlled, self.publicationDateType,
'attribute has the wrong type')
self.assertIsInstance(attribute.value.get_child_class(), ISODateValue,
'attribute value instantiates the wrong class')
self.assertEqual(attribute.value.get_child_class().as_date,
citation.publication_date,
'publication date attribute incorrect')
def tearDown(self):
self.accession.delete()
self.dataset.delete()
self.user.delete()
for model in [DraftCitation, DraftAuthority, DraftACRelation,
DraftCCRelation, ImportAccession, DraftCitationLinkedData,
DraftAuthorityLinkedData, Authority, AttributeType,
Attribute, User, Citation, ACRelation, CCRelation]:
model.objects.all().delete()
class TestAccessionProperties(TestCase):
def setUp(self):
rf = RequestFactory()
self.request = rf.get('/hello/')
self.user = User.objects.create(username='bob', password='what', email='[email protected]')
self.request.user = self.user
def test_citations_ready(self):
accession = ImportAccession.objects.create(name='accession')
draftauthority = DraftAuthority.objects.create(name='testauthority', part_of=accession)
draftcitation = DraftCitation.objects.create(title='testcitation', part_of=accession)
draftcitation2 = DraftCitation.objects.create(title='testcitation2', part_of=accession)
DraftACRelation.objects.create(authority=draftauthority, citation=draftcitation, type_controlled=DraftACRelation.AUTHOR, part_of=accession)
DraftCCRelation.objects.create(subject=draftcitation, object=draftcitation2, part_of=accession)
# draftcitation2 has no ACRelations, so should be ready from the start.
self.assertEqual(len(accession.citations_ready), 1)
authority = Authority.objects.create(name='testtest', type_controlled=Authority.PERSON)
InstanceResolutionEvent.objects.create(for_instance=draftauthority, to_instance=authority)
draftauthority.processed = True
draftauthority.save()
# Now draftcitation is ready, since the target of its one ACRelation is
# resolved.
self.assertEqual(len(accession.citations_ready), 2)
citations_before = Citation.objects.count()
ccrelations_before = CCRelation.objects.count()
ingest_accession(self.request, accession)
self.assertEqual(citations_before + 2, Citation.objects.count())
self.assertEqual(ccrelations_before + 1, CCRelation.objects.count())
def tearDown(self):
Citation.objects.all().delete()
Authority.objects.all().delete()
CCRelation.objects.all().delete()
ACRelation.objects.all().delete()
InstanceResolutionEvent.objects.all().delete()
ImportAccession.objects.all().delete()
DraftAuthority.objects.all().delete()
DraftCitation.objects.all().delete()
DraftACRelation.objects.all().delete()
DraftCCRelation.objects.all().delete()
class TestZoteroIngesterRDFOnlyReviews(TestCase):
def test_parse_zotero_rdf(self):
from pprint import pprint
ingester = ZoteroIngest("zotero/test_data/Chapter Test 8-9-16.rdf")
# for datum in ingester:
# pprint(datum)
class TestImportMethods(TestCase):
def test_get_dtype(self):
"""
:func:`zotero.ingest.IngestManager._get_dtype` is a private function that
extracts field-data for :prop`.DraftCitation.type_controlled` from a
parsed entry.
"""
for dtype, value in list(ingest.DOCUMENT_TYPES.items()):
entry = {
'type_controlled': [dtype],
}
data = ingest.IngestManager._get_dtype(entry)
self.assertIn('type_controlled', data)
self.assertEqual(data.get('type_controlled'), value)
data = ingest.IngestManager._get_dtype({})
self.assertEqual(data.get('type_controlled'), DraftCitation.ARTICLE,
"type_controlled should default to Article.")
def test_get_pages_data(self):
"""
:func:`zotero.ingest.IngestManager._get_pages_data` is a private function that
extracts field-data for for ``page_start``, ``page_end``, and
``pages_free_text`` from a parsed entry.
"""
# Both start and end available.
data = ingest.IngestManager._get_pages_data({'pages':[('1', '2')]})
self.assertEqual(data.get('page_start'), '1')
self.assertEqual(data.get('page_end'), '2')
self.assertEqual(data.get('pages_free_text'), u'1-2')
# Single value available.
data = ingest.IngestManager._get_pages_data({'pages':[u'555']})
self.assertEqual(data.get('page_start'), u'555')
self.assertEqual(data.get('page_end'), None)
self.assertEqual(data.get('pages_free_text'), u'555')
# Single value available.
data = ingest.IngestManager._get_pages_data({'pages':[(u'555',)]})
self.assertEqual(data.get('page_start'), u'555')
self.assertEqual(data.get('page_end'), None)
self.assertEqual(data.get('pages_free_text'), u'555')
def test_generate_generic_acrelationss(self):
accession = ImportAccession.objects.create(name=u'test')
draftcitation = DraftCitation.objects.create(
title = 'Test',
type_controlled = DraftCitation.ARTICLE,
part_of = accession,
)
data = {
u'authors': [{
u'name_first': u'Fokko Jan',
u'name': u'Fokko Jan Dijksterhuis',
u'name_last': u'Dijksterhuis',
u'data_display_order': '1.0'
},{
u'name_first': u'Carsten',
u'name': u'Carsten Timmermann',
u'name_last': u'Timmermann',
u'data_display_order': '2.0'
}]
}
results = ingest.IngestManager.generate_generic_acrelations(
data, 'authors', draftcitation, DraftAuthority.PERSON,
DraftACRelation.AUTHOR, 1)
self.assertEqual(len(results), 2, "Should create two DraftACRelations.")
self.assertIsInstance(results[0], tuple,
"Should return two objects per relation.")
self.assertIsInstance(results[0][0], DraftAuthority,
"The first object is the DraftAuthority"
" instance.")
self.assertIsInstance(results[0][1], DraftACRelation,
"The second object is the DraftACRelation"
" instance.")
# make sure acrelations data display order are calculated correctly
self.assertEqual(results[0][1].data_display_order, 2.0,
"First DraftACRelation should have data display order = 2.0 but is " + str(results[0][1].data_display_order))
for draft_authority, draft_relation in results:
self.assertEqual(draft_relation.citation, draftcitation,
"The DraftACRelation should be linked to the"
" passed DraftCitation.")
self.assertEqual(draft_relation.type_controlled,
DraftACRelation.AUTHOR,
"The DraftACRelation should have the correct"
" type.")
self.assertEqual(draft_authority.type_controlled,
DraftAuthority.PERSON,
"The DraftAuthority should have the correct type.")
def test_generate_reviewed_works_relations(self):
"""
If reviews are found, then ``draft_citation`` will be re-typed as a
Review.
Attempts to match reviewed works against (1) citations in this ingest batch,
(2) citations with matching IDs, and (3) citations with matching linked
data.
"""
accession = ImportAccession.objects.create(name=u'test')
draftcitation = DraftCitation.objects.create(
title = 'Test',
type_controlled = DraftCitation.ARTICLE,
part_of = accession,
)
manager = ingest.IngestManager([], accession)
try:
r = manager.generate_reviewed_works_relations({}, draftcitation)
except:
self.fail("Should not choke on nonsensical data, but rather fail"
" quietly.")
self.assertEqual(draftcitation.type_controlled, DraftCitation.ARTICLE,
"If no review is found, type should not be changed.")
data = {'reviewed_works': [{'name': 'nonsense'}]}
try:
r = manager.generate_reviewed_works_relations(data, draftcitation)
except:
self.fail("Should not choke on nonsensical data, but rather fail"
" quietly.")
self.assertEqual(len(r), 0,
"If the identifier can't be resolved into something"
" meaningful, should simply bail.")
draftcitation.refresh_from_db()
data = {
'reviewed_works': [{'name': '12345678'}]
}
alt_draftcitation = DraftCitation.objects.create(**{
'title': 'Alt',
'type_controlled': DraftCitation.BOOK,
'part_of': accession,
})
manager.draft_citation_map = {'12345678': alt_draftcitation}
r = manager.generate_reviewed_works_relations(data, draftcitation)
self.assertEqual(len(r), 1, "If a matching citation is found in this"
" accession, it should be associated.")
draftcitation.refresh_from_db()
self.assertEqual(draftcitation.type_controlled, DraftCitation.REVIEW,
"If a review is found, type should be review.")
citation = Citation.objects.create(
title = 'A real citation',
type_controlled = Citation.BOOK,
)
data = {
'reviewed_works': [{'name': citation.id}]
}
r = manager.generate_reviewed_works_relations(data, draftcitation)
self.assertEqual(len(r), 1, "If a matching citation is found in the"
" database, it should be associated.")
self.assertEqual(r[0][0].resolutions.first().to_instance, citation)
draftcitation.refresh_from_db()
self.assertEqual(draftcitation.type_controlled, DraftCitation.REVIEW,
"If a review is found, type should be review.")
def test_generate_book_chapter_relations(self):
accession = ImportAccession.objects.create(name=u'test')
draftcitation = DraftCitation.objects.create(
title = 'Test',
type_controlled = DraftCitation.ARTICLE,
part_of = accession,
)
data = {
u'part_of': [{
u'linkeddata': [(u'ISBN', u'9781874267621')],
u'title': u"Thinking Through the Environment: Green"
u" Approaches to Global History",
u'type_controlled': u'Book'
}],
}
manager = ingest.IngestManager([], accession)
result = manager.generate_book_chapter_relations(data, draftcitation)
alt_draftcitation = DraftCitation.objects.create(**{
'title': u"Thinking Through the Environment: Green"
u" Approaches to Global History",
'type_controlled': DraftCitation.BOOK,
'part_of': accession,
})
manager.draft_citation_map = {'9781874267621': alt_draftcitation}
r = manager.generate_book_chapter_relations(data, draftcitation)
self.assertEqual(len(r), 1, "If a matching citation is found in this"
" accession, it should be associated.")
draftcitation.refresh_from_db()
self.assertEqual(draftcitation.type_controlled, DraftCitation.CHAPTER,
"If a book is found, type should be chapter.")
def test_generate_part_of_relations(self):
accession = ImportAccession.objects.create(name=u'test')
draftcitation = DraftCitation.objects.create(
title = 'Test',
type_controlled = DraftCitation.ARTICLE,
part_of = accession,
)
data = {
'part_of': [{
u'linkeddata': [(u'ISSN', u'0191-6599')],
u'title': u'History of European Ideas',
u'type_controlled': u'Journal'
}, {
u'title': u'En temps & lieux.',
u'type_controlled': u'Series'
}],
}
result = ingest.IngestManager.generate_part_of_relations(data, draftcitation)
self.assertEqual(len(result), 1, "Should yield one records")
def test_generate_citation_linkeddata(self):
"""
:func:`zotero.ingest.IngestManager.generate_citation_linkeddata` creates new
:class:`.DraftCitationLinkedData` instances from a parsed entry.
"""
accession = ImportAccession.objects.create(name=u'test')
draftcitation = DraftCitation.objects.create(
title = 'Test',
type_controlled = DraftCitation.ARTICLE,
part_of = accession,
)
data = {
u'linkeddata': [
(u'URI',
u'http://www.journals.uchicago.edu/doi/abs/10.1086/687176'),
(u'ISSN', u'0021-1753'),
]
}
linkeddata = ingest.IngestManager.generate_citation_linkeddata(data, draftcitation)
self.assertEqual(len(linkeddata), 2, "Should create two records.")
for linkeddatum in linkeddata:
self.assertIsInstance(linkeddatum, DraftCitationLinkedData,
"Should return DraftCitationLinkedData"
" instances.")
self.assertEqual(linkeddatum.citation, draftcitation,
"Should point to the passed DraftCitation.")
try:
ingest.IngestManager.generate_citation_linkeddata({}, draftcitation)
except:
self.fail("Should not choke when no linkeddata are present.")
def test_generate_draftcitation(self):
"""
:func:`zotero.ingest.IngestManager.generate_draftcitation` creates a new
:class:`.DraftCitation` instance from a parsed entry.
"""
_title = u'The Test Title'
_date = datetime.datetime.now()
_vol = u'5'
_iss = u'1'
_abstract = u'A very abstract abstract'
data = {
u'title': [_title],
u'type_controlled': [u'Book'],
u'publication_date': [_date],
u'pages': [('373', '374')],
u'volume': [_vol],
u'issue': [_iss],
u'abstract': [_abstract],
}
accession = ImportAccession.objects.create(name=u'test')
manager = ingest.IngestManager([], accession)
draft_citation = manager.generate_draftcitation(data, accession)
self.assertIsInstance(draft_citation, DraftCitation,
"generate_draftcitation should return a"
" DraftCitation instance.")
self.assertEqual(draft_citation.title, _title)
self.assertEqual(draft_citation.type_controlled, DraftCitation.BOOK)
self.assertEqual(draft_citation.volume, _vol)
self.assertEqual(draft_citation.issue, _iss)
self.assertEqual(draft_citation.abstract, _abstract)
self.assertEqual(draft_citation.page_start, '373')
self.assertEqual(draft_citation.page_end, '374')
self.assertEqual(draft_citation.pages_free_text, '373-374')
def test_find_mapped_authority(self):
"""
:func:`zotero.ingest.IngestManager._find_mapped_authority` attempts to find an
:class:`.Authority` record using the table in
``zotero/AuthorityIDmap.tab``.
"""
authority = Authority.objects.create(
name = 'test_authority',
type_controlled = Authority.PERSON,
id = 'CBA000113709'
)
candidate = ingest.IngestManager._find_mapped_authority('Whoops', '13')
self.assertEqual(candidate, authority,
"Should return the corresponding authority.")
self.assertEqual(ingest.IngestManager._find_mapped_authority('Whoops', '12'), None,
"If not found, should return None.")
def test_find_encoded_authority(self):
"""
:func:`zotero.ingest.IngestManager._find_encoded_authority` attempts to find an
:class:`.Authority` record using an equals-encoded classification code.
"""
authority = Authority.objects.create(
name = 'test_authority',
type_controlled = Authority.PERSON,
id = 'CBA000113709',
classification_code='55-56'
)
authority2 = Authority.objects.create(
name = 'test_authority2',
type_controlled = Authority.PERSON,
id = 'CBA000113710',
classification_code='6'
)
candidate = ingest.IngestManager._find_encoded_authority('=56-55=', None)
self.assertEqual(candidate, authority,
"Should return the corresponding authority.")
candidate = ingest.IngestManager._find_encoded_authority('=6=', None)
self.assertEqual(candidate, authority2,
"Should return the corresponding authority.")
self.assertEqual(ingest.IngestManager._find_encoded_authority('=55-56=', None), None,
"If not found, should return None.")
def test_find_explicit_authority(self):
"""
:func:`zotero.ingest.IngestManager._find_explicit_authority` attempts to find an
existing :class:`.Authority` using an explicit identifier.
"""
authority = Authority.objects.create(
name = 'test_authority',
type_controlled = Authority.PERSON,
id = 'CBA000113709',
classification_code='55-56'
)
candidate = ingest.IngestManager._find_explicit_authority('CBA000113709', None)
self.assertEqual(candidate, authority,
"Should return the corresponding authority when"
" the identifier is passed as the label.")
candidate = ingest.IngestManager._find_explicit_authority(None, 'CBA000113709')
self.assertEqual(candidate, authority,
"Should return the corresponding authority when"
" the identifier is passed as the identifier.")
candidate = ingest.IngestManager._find_explicit_authority(None, ' CBA000113709')
self.assertEqual(candidate, authority,
"Should be tolerant of whitespace.")
def test_generate_subject_acrelations(self):
accession = ImportAccession.objects.create(name=u'test')
draftcitation = DraftCitation.objects.create(
title = 'Test',
type_controlled = DraftCitation.ARTICLE,
part_of = accession,
)
data = {
u'subjects': [
(u'National Socialism', u'1145'),
(u'1938', None),
(u'T\xf6nnies, Ferdinand', u'CBA000102771'),
(u'=370-140=', None)
],
}
nat_soc = Authority.objects.create(name='National Socialism', id='CBA000114074', type_controlled=Authority.CONCEPT)
ferdinand = Authority.objects.create(name=u'T\xf6nnies, Ferdinand', id='CBA000102771', type_controlled=Authority.PERSON)
something = Authority.objects.create(name='Something', classification_code='140-370', type_controlled=Authority.PERSON)
results = ingest.IngestManager.generate_subject_acrelations(data, draftcitation)
self.assertEqual(len(results), 4, "Should return four results")
for da, dac in results:
if da.name != '1938':
self.assertEqual(da.resolutions.count(), 1)
def test_check_for_authority_reference(self):
data = {
u'subjects': [
(u'moral philosophy', u'CBA000120197'),
(u'Science and civilization', u'1122'),
(u'Enlightenment', u'416'),
(u'Montesquieu, Charles de Secondat, Baron de (1689-1755)',
u'11244'),
(u'18th century', u'12'),
(u'Diderot, Denis (1713-1784)', u'8290'),
(u'Raynal, Guillaume (1713-1796)', u'12117'),
(u'Colonialism', u'246'),
(u'=6=', None)
],
}
def test_source_data_is_preserved(self):
book_data = 'zotero/test_data/Book test.rdf'
papers = ZoteroIngest(book_data)
instance = ImportAccession.objects.create(name='TestAccession')
citations = ingest.IngestManager(papers, instance).process()
for citation in citations:
self.assertNotEqual(citation.source_data, 'null')
def tearDown(self):
for model in [DraftCitation, DraftAuthority, DraftACRelation,
DraftCCRelation, ImportAccession, DraftCitationLinkedData,
DraftAuthorityLinkedData, Authority, AttributeType,
Attribute]:
model.objects.all().delete()
class TestExtraDataParsing(TestCase):
"""
Curators may want to pass additional data in Zotero records, beyond what
is supported by the Zotero scheme. To do this, they may insert key/value
pairs in curly braces in certain fields.
"""
def test_find_extra_data(self):
"""
:meth:`.IngestManager.find_extra_data` parses strings for explicit
key/value data in curly braces, and returns the data as a list.
"""
raw = 'Some freeform text {key:value}'
data = ingest.IngestManager.find_extra_data(raw)
self.assertIsInstance(data, tuple)
self.assertIn('key', dict(data[0]))
self.assertEqual(dict(data[0]).get('key'), 'value')
def test_apply_extra_data(self):
"""
:meth:`.IngestManager.apply_extra_data` handles parsed key/value pairs
and returns a callable that will update a model instance accordingly.
The callable should return the instance that was passed.
"""
class DummyObject(object):
pass
raw = 'Some freeform text {name:The Best Name}'
data, value = ingest.IngestManager.find_extra_data(raw)
func = ingest.IngestManager.apply_extra_data(data)
obj = func(DummyObject())
self.assertEqual(obj.name, "The Best Name")
def test_match_extra_viaf(self):
"""
:meth:`.IngestManager.apply_extra_data` should operate on VIAF IDs.
"""
raw = 'Some freeform text {viaf:76382712}'
data, value = ingest.IngestManager.find_extra_data(raw)
func = ingest.IngestManager.apply_extra_data(data)
obj = DraftAuthority.objects.create(
name = 'Pratchett, Terry, 1948-2015',
type_controlled = DraftAuthority.PERSON,
part_of = ImportAccession.objects.create(name='TestAccession')
)
obj = func(obj)
self.assertEqual(obj.linkeddata.count(), 1)
self.assertEqual(obj.linkeddata.first().value, 'http://viaf.org/viaf/76382712')
def test_match_is_applied_during_ingest(self):
"""
When creating :class:`.DraftACRelation`\s during ingest, authority
names are parsed for VIAF IDs.
"""
accession = ImportAccession.objects.create(name='Test')
draftcitation = DraftCitation.objects.create(
title = 'Test',
type_controlled = DraftCitation.ARTICLE,
part_of = accession,
)
data = {
u'authors': [{
u'name_first': u'Fokko Jan',
u'name': u'Fokko Jan Dijksterhuis {viaf:76382712}',
u'name_last': u'Dijksterhuis'
},{
u'name_first': u'Carsten',
u'name': u'Carsten Timmermann {viaf:76382713}',
u'name_last': u'Timmermann'
}]
}
results = ingest.IngestManager.generate_generic_acrelations(
data, 'authors', draftcitation, DraftAuthority.PERSON,
DraftACRelation.AUTHOR, 0)
draftcitation.refresh_from_db()
for relation in draftcitation.authority_relations.all():
self.assertEqual(relation.authority.linkeddata.count(), 1)
def test_viaf_data_survives_to_production(self):
"""
When accessioned from Zotero to IsisData, linkeddata for authorities
persist.
"""
accession = ImportAccession.objects.create(name='Test')
draftcitation = DraftCitation.objects.create(
title = 'Test',
type_controlled = DraftCitation.ARTICLE,
part_of = accession,
)
data = {
u'authors': [{
u'name_first': u'Fokko Jan',
u'name': u'Fokko Jan Dijksterhuis {viaf:76382712}',
u'name_last': u'Dijksterhuis'
},{
u'name_first': u'Carsten',
u'name': u'Carsten Timmermann {viaf:76382713}',
u'name_last': u'Timmermann'
}]
}
ingest.IngestManager.generate_generic_acrelations(
data, 'authors', draftcitation, DraftAuthority.PERSON,
DraftACRelation.AUTHOR, 0)
draftcitation.refresh_from_db()
for relation in draftcitation.authority_relations.all():
auth = Authority.objects.create(name=relation.authority.name, type_controlled=relation.authority.type_controlled)
ingest.IngestManager.resolve(relation.authority, auth)
rf = RequestFactory()
request = rf.get('/hello/')
user = User.objects.create(username='bob', password='what', email='[email protected]')
request.user = user
new_citation = ingest_citation(request, accession, draftcitation)
for relation in new_citation.acrelation_set.all():
self.assertEqual(relation.authority.linkeddata_entries.count(), 1)
def tearDown(self):
for model in [DraftCitation, DraftAuthority, DraftACRelation,
DraftCCRelation, ImportAccession, DraftCitationLinkedData,
DraftAuthorityLinkedData, Authority, AttributeType,
User, Attribute]:
model.objects.all().delete()
class BookSeriesShouldBeSkippedAutomatically(TestCase):
"""
From ISISCB-734:
> During Authority matching. Do not attempt to match book series items to
> periodicals. These should all be skipped. They end up as text in the free
> text field, which is fine.
"""
def test_generate_part_of_relations(self):
accession = ImportAccession.objects.create(name=u'test')
draftcitation = DraftCitation.objects.create(
title = 'Test',
type_controlled = DraftCitation.ARTICLE,
part_of = accession,
)
data = {
'part_of': [{
u'linkeddata': [(u'ISSN', u'0191-6599')],
u'title': u'History of European Ideas',
u'type_controlled': u'Journal'
}, {
u'title': u'En temps & lieux.',
u'type_controlled': u'Series'
}],
}
result = ingest.IngestManager.generate_part_of_relations(data, draftcitation)
self.assertEqual(len(result), 1, "Should yield one records")
for authority, relation in result:
if relation.type_controlled == ACRelation.BOOK_SERIES:
self.assertTrue(authority.processed)
class ThesisACRelationsAreABitDifferent(TestCase):
"""
From ISISCB-928:
> During Zotero ingest of theses / dissertations, please make the following
> two changes:
> 1) all names marked as contributors should be designated as advisors in
> the ACR link.
> 2) all university data should be marked as school not publisher (as it is
> currently)
"""
def test_universities_should_be_schools_not_publishers(self):
papers = ZoteroIngest('zotero/test_data/thesis.rdf')
instance = ImportAccession.objects.create(name='TestAccession')
# There is only one citation in this accession.
citation = ingest.IngestManager(papers, instance).process()[0]
school_relation = citation.authority_relations.filter(authority__name='Arizona State University').first()
self.assertEqual(school_relation.type_controlled, DraftACRelation.SCHOOL)
def test_contributors_should_be_advisors(self):
papers = ZoteroIngest('zotero/test_data/thesis.rdf')
instance = ImportAccession.objects.create(name='TestAccession')
# There is only one citation in this accession.
citation = ingest.IngestManager(papers, instance).process()[0]
advisor_relation = citation.authority_relations.filter(authority__name='Laubichler, Manfred D.').first()
self.assertEqual(advisor_relation.type_controlled, DraftACRelation.ADVISOR)
| mit | -3,700,071,653,428,453,000 | 39.798663 | 147 | 0.604079 | false |
kcompher/FreeDiscovUI | freediscovery/server/tests/test_clustering.py | 1 | 2200 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import pytest
import json
import itertools
from unittest import SkipTest
from numpy.testing import assert_equal, assert_almost_equal
from .. import fd_app
from ...utils import _silent, dict2type, sdict_keys
from .base import parse_res, V01, app, app_notest, get_features, get_features_lsi
#=============================================================================#
#
# Clustering
#
#=============================================================================#
@pytest.mark.parametrize("model, use_lsi", [('k-mean', False),
('birch', True),
('ward_hc', True),
('dbscan', True)])
def test_api_clustering(app, model, use_lsi):
if use_lsi:
dsid, lsi_id, _ = get_features_lsi(app, hashed=False)
parent_id = lsi_id
else:
dsid, _ = get_features(app, hashed=False)
lsi_id = None
parent_id = dsid
method = V01 + "/feature-extraction/{}".format(dsid)
res = app.get(method)
assert res.status_code == 200
data = parse_res(res) # TODO unused variable
#if (model == 'birch' or model == "ward_hc"):
url = V01 + "/clustering/" + model
pars = { 'parent_id': parent_id, }
if model != 'dbscan':
pars['n_clusters'] = 2
if model == 'dbscan':
pars.update({'eps': 0.1, "min_samples": 2})
res = app.post(url, json=pars)
assert res.status_code == 200
data = parse_res(res)
assert sorted(data.keys()) == sorted(['id'])
mid = data['id']
url += '/{}'.format(mid)
res = app.get(url)
assert res.status_code == 200
data = parse_res(res)
assert sorted(data.keys()) == \
sorted(['cluster_terms', 'labels', 'pars', 'htree'])
if data['htree']:
assert sorted(data['htree'].keys()) == \
sorted(['n_leaves', 'n_components', 'children'])
res = app.delete(method)
assert res.status_code == 200
| bsd-3-clause | 8,105,949,624,726,091,000 | 28.72973 | 81 | 0.519545 | false |
wbsavage/shinken | shinken/modules/glances_ui/plugins/cv_memory/cv_memory.py | 1 | 2398 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, [email protected]
# Gerhard Lausser, [email protected]
# Gregory Starck, [email protected]
# Hartmut Goebel, [email protected]
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from shinken.webui.bottle import redirect, abort
from pprint import pprint
import xmlrpclib
import socket
import json
### Will be populated by the UI with it's own value
app = None
def fancy_units(num):
for x in ['','KB','MB','GB']:
if num < 1024.0 and num > -1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
def get_processes(h):
addr = h.address
gs = xmlrpclib.ServerProxy('http://%s:%d' % (addr, 61209))
# 10s max to aswer
gs.sock.timeout = 10
ps = json.loads(gs.getProcessList())
return ps
def get_page(hname):
print "MEMORY??"
# First we look for the user sid
# so we bail out if it's a false one
user = app.get_user_auth()
if not user:
redirect("/user/login")
# Ok, we can lookup it
h = app.datamgr.get_host(hname)
error = ''
ps = []
try:
ps = get_processes(h)
except (xmlrpclib.Error, socket.error), exp:
error = str(exp)
return {'app': app, 'elt': h, 'ps':ps, 'fancy_units':fancy_units, 'error' : error}
return {'app': app, 'elt': h, 'ps':ps, 'fancy_units':fancy_units, 'error' : error}
def get_page_proc(hname):
return get_page(hname)
# Void plugin
pages = {get_page: {'routes': ['/cv/memory/:hname'], 'view': 'cv_memory', 'static': True},
get_page_proc: {'routes': ['/cv/processes/:hname'], 'view': 'cv_processes', 'static': True}
}
| agpl-3.0 | -5,770,006,532,879,468,000 | 25.94382 | 100 | 0.634696 | false |
vdloo/raptiformica | tests/unit/raptiformica/actions/mesh/test_stop_detached_cjdroute.py | 1 | 2606 | from raptiformica.actions.mesh import stop_detached_cjdroute
from raptiformica.shell.execute import COMMAND_TIMEOUT
from tests.testcase import TestCase
class TestStopDetachedCjdroute(TestCase):
def setUp(self):
self.log = self.set_up_patch('raptiformica.actions.mesh.log')
self.execute_process = self.set_up_patch(
'raptiformica.shell.execute.execute_process'
)
self.process_output = (0, 'standard out output', '')
self.execute_process.return_value = self.process_output
def test_stop_detached_cjdroute_logs_stopping_detached_cjdroute_message(self):
stop_detached_cjdroute()
self.log.info.assert_called_once_with("Stopping cjdroute in the background")
def test_stop_detached_cjdroute_stops_detached_cjdroute(self):
stop_detached_cjdroute()
expected_command = "ps aux | grep [c]jdroute | awk '{print $2}' | " \
"xargs --no-run-if-empty -I {} " \
"sh -c \"grep -q docker /proc/{}/cgroup && " \
"! grep -q name=systemd:/docker /proc/1/cgroup || kill {}\""
self.execute_process.assert_called_once_with(
expected_command,
shell=True,
buffered=False,
timeout=COMMAND_TIMEOUT
)
self.assertIn(
'ps aux |', expected_command,
'It should list all processes on the system'
)
self.assertIn(
'| grep [c]jdroute |', expected_command,
'Should find the processes with cjdroute in the name, '
'excluding this one'
)
self.assertIn(
"| awk '{print $2}' |", expected_command,
'Should print the PID of the processes matching the name'
)
self.assertIn(
"xargs --no-run-if-empty", expected_command,
'Should map over the found PIDs, do nothing if no matches'
)
self.assertIn(
"-I {} sh -c \"grep -q docker /proc/{}/cgroup && "
"! grep -q name=systemd:/docker /proc/1/cgroup || kill {}\"",
expected_command,
'Should only kill processes not in Docker containers unless '
'running inside a Docker, those could have their own raptiformica '
'instances running'
)
def test_stop_detached_cjdroute_does_not_raise_error_when_stopping_returned_nonzero(self):
process_output = (1, 'standard out output', 'standard error output')
self.execute_process.return_value = process_output
stop_detached_cjdroute()
| mit | -887,037,330,012,145,900 | 41.032258 | 94 | 0.59363 | false |
trustpilot/python-lambdarest | lambdarest/__init__.py | 1 | 13223 | # -*- coding: utf-8 -*-
import json
import logging
from string import Template
from jsonschema import validate, ValidationError, FormatChecker
from werkzeug.routing import Map, Rule, NotFound
from werkzeug.http import HTTP_STATUS_CODES
from functools import wraps
__validate_kwargs = {"format_checker": FormatChecker()}
__required_keys = ["httpMethod"]
__either_keys = ["path", "resource"]
class Response(object):
"""Class to conceptualize a response with default attributes
if no body is specified, empty string is returned
if no status_code is specified, 200 is returned
if no headers are specified, empty dict is returned
"""
def __init__(
self, body=None, status_code=None, headers=None, multiValueHeaders=None
):
self.body = body
self.status_code = status_code
self.headers = headers
self.multiValueHeaders = multiValueHeaders
self.status_code_description = None
self.isBase64_encoded = False
def to_json(self, encoder=json.JSONEncoder, application_load_balancer=False):
"""Generates and returns an object with the expected field names.
Note: method name is slightly misleading, should be populate_response or with_defaults etc
"""
status_code = self.status_code or 200
# if it's already a str, we don't need json.dumps
do_json_dumps = self.body is not None and not isinstance(self.body, str)
response = {
"body": json.dumps(self.body, cls=encoder, sort_keys=True)
if do_json_dumps
else self.body,
"statusCode": status_code,
}
## handle multiValueHeaders if defined, default to headers
if self.multiValueHeaders == None:
response["headers"] = self.headers or {}
else:
response["multiValueHeaders"] = self.multiValueHeaders
# if body is None, remove the key
if response.get("body") == None:
response.pop("body")
if application_load_balancer:
response.update(
{
# note must be HTTP [description] as per:
# https://docs.aws.amazon.com/lambda/latest/dg/services-alb.html
# the value of 200 OK fails:
# https://docs.aws.amazon.com/elasticloadbalancing/latest/application/lambda-functions.html#respond-to-load-balancer
"statusDescription": self.status_code_description
or "HTTP " + HTTP_STATUS_CODES[status_code],
"isBase64Encoded": self.isBase64_encoded,
}
)
return response
class ScopeMissing(Exception):
pass
def __float_cast(value):
try:
return float(value)
except Exception:
pass
return value
def __marshall_query_params(value):
try:
value = json.loads(value)
except Exception:
value_cand = value.split(",")
if len(value_cand) > 1:
value = list(map(__float_cast, value_cand))
return value
def __json_load_query(query):
query = query or {}
return {key: __marshall_query_params(value) for key, value in query.items()}
def default_error_handler(error, method):
logging_message = "[%s][{status_code}]: {message}" % method
logging.exception(logging_message.format(status_code=500, message=str(error)))
def check_update_and_fill_resource_placeholders(resource, path_parameters):
"""
Prepare resource parameters before routing.
In case when resource defined as /path/to/{placeholder}/resource,
the router can't find a correct handler.
This method inserts path parameters
instead of placeholders and returns the result.
:param resource: Resource path definition
:param path_parameters: Path parameters dict
:return: resource definition with inserted path parameters
"""
base_resource = resource
# prepare resource.
# evaluate from /foo/{key1}/bar/{key2}/{proxy+}
# to /foo/${key1}/bar/${key2}/{proxy+}
if path_parameters is not None:
for path_key in path_parameters:
resource = resource.replace("{%s}" % path_key, "${%s}" % path_key)
else:
return base_resource
# insert path_parameteres by template
# /foo/${key1}/bar/${key2}/{proxy+} -> /foo/value1/bar/value2/{proxy+}
template = Template(resource)
try:
resource = template.substitute(**(path_parameters))
return resource
except KeyError:
return base_resource
def create_lambda_handler(
error_handler=default_error_handler,
json_encoder=json.JSONEncoder,
application_load_balancer=False,
):
"""Create a lambda handler function with `handle` decorator as attribute
example:
lambda_handler = create_lambda_handler()
lambda_handler.handle("get")
def my_get_func(event):
pass
Inner_lambda_handler:
is the one you will receive when calling this function. It acts like a
dispatcher calling the registered http handler functions on the basis of the
incoming httpMethod.
All responses are formatted using the lambdarest.Response class.
Inner_handler:
Is the decorator function used to register funtions as handlers of
different http methods.
The inner_handler is also able to validate incoming data using a specified
JSON schema, please see http://json-schema.org for info.
"""
url_maps = Map()
def inner_lambda_handler(event, context=None):
# check if running as "aws lambda proxy"
if (
not isinstance(event, dict)
or not all(key in event for key in __required_keys)
or not any(key in event for key in __either_keys)
):
message = "Bad request, maybe not using Lambda Proxy?"
logging.error(message)
return Response(message, 500).to_json(
application_load_balancer=application_load_balancer
)
# Save context within event for easy access
event["context"] = context
# for application load balancers, no api definition is used hence no resource is set so just use path
if "resource" not in event:
resource = event["path"]
else:
resource = event["resource"]
# Fill placeholders in resource path
if "pathParameters" in event:
resource = check_update_and_fill_resource_placeholders(
resource, event["pathParameters"]
)
path = resource
# Check if a path is set, if so, check if the base path is the same as
# the resource. If not, this is an api with a custom domainname.
# if so, the path will contain the actual request, but it will be
# prefixed with the basepath, which needs to be removed. Api Gateway
# only supports single level basepaths
# eg:
# path: /v2/foo/foobar
# resource: /foo/{name}
# the /v2 needs to be removed
if "path" in event and event["path"].split("/")[1] != resource.split("/")[1]:
path = "/%s" % "/".join(event["path"].split("/")[2:])
# proxy is a bit weird. We just replace the value in the uri with the
# actual value provided by apigw, and use that
if "{proxy+}" in resource:
path = resource.replace("{proxy+}", event["pathParameters"]["proxy"])
method_name = event["httpMethod"].lower()
func = None
kwargs = {}
error_tuple = ("Internal server error", 500)
logging_message = "[%s][{status_code}]: {message}" % method_name
try:
# bind the mapping to an empty server name
mapping = url_maps.bind("")
rule, kwargs = mapping.match(path, method=method_name, return_rule=True)
func = rule.endpoint
# if this is a catch-all rule, don't send any kwargs
if rule.rule == "/<path:path>":
kwargs = {}
except NotFound as e:
logging.warning(logging_message.format(status_code=404, message=str(e)))
error_tuple = (str(e), 404)
if func:
try:
response = func(event, **kwargs)
if not isinstance(response, Response):
# Set defaults
status_code = headers = multiValueHeaders = None
if isinstance(response, tuple):
response_len = len(response)
if response_len > 3:
raise ValueError("Response tuple has more than 3 items")
# Unpack the tuple, missing items will be defaulted
body, status_code, headers, multiValueHeaders = response + (
None,
) * (4 - response_len)
elif isinstance(response, dict) and all(
key in ["body", "statusCode", "headers", "multiValueHeaders"]
for key in response.keys()
):
body = response.get("body")
status_code = response.get("statusCode") or status_code
headers = response.get("headers") or headers
multiValueHeaders = (
response.get("multiValueHeaders") or multiValueHeaders
)
else: # if response is string, int, etc.
body = response
response = Response(body, status_code, headers, multiValueHeaders)
return response.to_json(
encoder=json_encoder,
application_load_balancer=application_load_balancer,
)
except ValidationError as error:
error_description = "Schema[{}] with value {}".format(
"][".join(str(error.absolute_schema_path)), error.message
)
logging.warning(
logging_message.format(status_code=400, message=error_description)
)
error_tuple = ("Validation Error", 400)
except ScopeMissing as error:
error_description = "Permission denied"
logging.warning(
logging_message.format(status_code=403, message=error_description)
)
error_tuple = (error_description, 403)
except Exception as error:
if error_handler:
error_handler(error, method_name)
else:
raise
body, status_code = error_tuple
return Response(body, status_code).to_json(
application_load_balancer=application_load_balancer
)
def inner_handler(method_name, path="/", schema=None, load_json=True, scopes=None):
if schema and not load_json:
raise ValueError("if schema is supplied, load_json needs to be true")
def wrapper(func):
@wraps(func)
def inner(event, *args, **kwargs):
if load_json:
json_data = {
"body": json.loads(event.get("body") or "{}"),
"query": __json_load_query(event.get("queryStringParameters")),
}
event["json"] = json_data
if schema:
# jsonschema.validate using given schema
validate(json_data, schema, **__validate_kwargs)
try:
provided_scopes = json.loads(
event["requestContext"]["authorizer"]["scopes"]
)
except KeyError:
provided_scopes = []
except json.decoder.JSONDecodeError:
# Ignore passed scopes if it isn't properly json encoded
provided_scopes = []
for scope in scopes or []:
if scope not in provided_scopes:
raise ScopeMissing("Scope: '{}' is missing".format(scope))
return func(event, *args, **kwargs)
# if this is a catch all url, make sure that it's setup correctly
if path == "*":
target_path = "/*"
else:
target_path = path
# replace the * with the werkzeug catch all path
if "*" in target_path:
target_path = target_path.replace("*", "<path:path>")
# make sure the path starts with /
if not target_path.startswith("/"):
raise ValueError("Please configure path with starting slash")
# register http handler function
rule = Rule(target_path, endpoint=inner, methods=[method_name.lower()])
url_maps.add(rule)
return inner
return wrapper
lambda_handler = inner_lambda_handler
lambda_handler.handle = inner_handler
return lambda_handler
# singleton
lambda_handler = create_lambda_handler()
| mit | -7,489,941,035,686,895,000 | 36.247887 | 138 | 0.571807 | false |
zayamatias/retrotool | retroclasses.py | 1 | 6684 | class sprite:
# Sprite class, to make it easier to manipulate afterwards
spriteCount = 0
def __init__ (self,pattern,colors,ored,x,y):
self.pattern=pattern #binary pattern of the sprite
self.colors=colors #colors of the sprite
self.ored = ored #does this sprite come from an ored sprite (for palette purposes)
self.number = sprite.spriteCount #Sprite index
self.x = x #X location of the sprite according to the original image
self.y = y #y location of the sprite according to the original image
sprite.spriteCount = sprite.spriteCount+1 #add one to the index for next sprite
def displayPattern (self):
#for testing purposes, show the pattern on console
rows = self.pattern
for row in rows:
print (row)
def displayColors (self):
#for testing purposes, show the color on console
rows = self.colors
for row in rows:
print (row)
def getPattern (self):
#retruns the pattern of a sprite
line = ""
rows = self.pattern
for row in rows:
line = line + str(row) + "\n"
return line
def getColors (self,ysize):
#returns the colors of a sprite
line = ""
count = 1
rows = self.colors
for row in rows:
line = line + str(row)
if count < ysize :
count = count + 1
line = line + ","
return line
def getAsmPattern (self,width):
#get the pattern of a sprite in ASM mode (db %xxxxxxxxxxxxxxxx)
#attention: for 16bit sprites, msx splits into 2 8x16 patterns
line = ""
rows = self.pattern
pat1 =""
pat2 =""
for row in rows:
pat1=pat1+"\tdb %"+str(row)[:8]+"\n"
pat2=pat2+"\tdb %"+str(row)[8:]+"\n"
line = pat1
if width > 8:
line = line + pat2
return line
def getBasicPattern (self,width):
#get the pattern of a sprite in ASM mode (db %xxxxxxxxxxxxxxxx)
#attention: for 16bit sprites, msx splits into 2 8x16 patterns
linel = []
liner = []
rows = self.pattern
for row in rows:
linel.append(" DATA "+str(row)[:8]+"\n")
liner.append(" DATA "+str(row)[8:]+"\n")
return linel+liner
def getAsmColors (self,ysize):
#get the colors of a sprite in ASM mode (db 1,2,3....) each byte represents the # of the color in the palette
#for ored colors, bit #7 should be set, thus the +64
line = "\tdb "
rows = self.colors
count = 1
for row in rows:
if self.ored :
if (row!=0):
row = row + 64
line = line + str(row)
if count < ysize :
count = count + 1
line = line + ","
line = line + "\n"
return line
def getBASICColors (self,ysize):
#get the colors of a sprite in ASM mode (db 1,2,3....) each byte represents the # of the color in the palette
#for ored colors, bit #7 should be set, thus the +64
line = ""
rows = self.colors
count = 1
for row in rows:
if self.ored :
if (row!=0):
row = row + 64
line = line + str(row)
if count < ysize :
count = count + 1
line = line + ","
line = line + "\n"
return line
class character:
# defines a character that wil contains a matrix of sprites
def __init__ (self,rows,cols):
self.rows = rows
self.cols = cols
self.sprites = [[0 for x in range(cols)] for y in range(rows)]
def insertSprite (self,sprite,row,col):
self.sprites[row][col]=sprite
class animation:
# defines a animation, which is a list of characters to be shown one after the other
def __init__ (self):
self.characters = []
def addCharacter(self,character):
self.characters.append(character)
def numFrames(self):
return (len(self.characters))
class tile:
# Tile class, to make it easier to manipulate afterwards
tileCount = 0
def __init__ (self,pattern,colors):
self.pattern=pattern #binary pattern of the sprite
self.number = tile.tileCount #Sprite index
self.colors=colors #colors of the sprite
tile.tileCount = tile.tileCount+1 #add one to the index for next sprite
def displayPattern (self):
#for testing purposes, show the pattern on console
rows = self.pattern
for row in rows:
print (row)
def displayColors (self):
#for testing purposes, show the color on console
rows = self.colors
for row in rows:
print (row)
def getPattern (self):
#retruns the pattern of a sprite
line = ""
rows = self.pattern
for row in rows:
line = line + str(row) + "\n"
return line
def getColors (self,ysize):
#returns the colors of a sprite
line = ""
count = 1
rows = self.colors
for row in rows:
line = line + str(row)
if count < ysize :
count = count + 1
line = line + ","
return line
def getAsmPattern (self,width):
#get the pattern of a tile in ASM mode (db %xxxxxxxxxxxxxxxx)
#Normally width is always 8, but let's keep it system agnostic
line = ""
rows = self.pattern
line =""
for row in rows:
line = line + "\tdb %"+str(row)+"\n"
return line
def getAsmColors (self,ysize):
#get the colors of a tile in ASM mode
#things get tricky, 2 colors are saved on a single byte
#bg color is stored in 4 less significant bits (0000XXXX)
#fg color is stored in 4 most significant bits (XXXX0000)
#so final byte is $fgbg
#I could have done it by simply sticking the values together, but shifting is fun!
rows = self.colors
line = "\tdb "
count = 0
for row in rows:
line = line + "$"+"{0:02x}".format(row)
count = count +1
if count < len(rows):
line = line + ","
return line | gpl-3.0 | -4,547,728,878,825,318,000 | 31.939086 | 117 | 0.524087 | false |
akash1808/cachetools | cachetools/func.py | 1 | 3382 | import collections
import functools
import random
import time
from .lfu import LFUCache
from .lru import LRUCache
from .rr import RRCache
from .ttl import TTLCache
try:
from threading import RLock
except ImportError:
from dummy_threading import RLock
_CacheInfo = collections.namedtuple('CacheInfo', [
'hits', 'misses', 'maxsize', 'currsize'
])
class _NullContext:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
_nullcontext = _NullContext()
def _makekey_untyped(args, kwargs):
return (args, tuple(sorted(kwargs.items())))
def _makekey_typed(args, kwargs):
key = _makekey_untyped(args, kwargs)
key += tuple(type(v) for v in args)
key += tuple(type(v) for _, v in sorted(kwargs.items()))
return key
def _cachedfunc(cache, typed=False, lock=None):
makekey = _makekey_typed if typed else _makekey_untyped
context = lock() if lock else _nullcontext
def decorator(func):
stats = [0, 0]
def wrapper(*args, **kwargs):
key = makekey(args, kwargs)
with context:
try:
result = cache[key]
stats[0] += 1
return result
except KeyError:
stats[1] += 1
result = func(*args, **kwargs)
with context:
try:
cache[key] = result
except ValueError:
pass # value too large
return result
def cache_info():
with context:
hits, misses = stats
maxsize = cache.maxsize
currsize = cache.currsize
return _CacheInfo(hits, misses, maxsize, currsize)
def cache_clear():
with context:
stats[:] = [0, 0]
cache.clear()
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return functools.update_wrapper(wrapper, func)
return decorator
def lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Frequently Used (LFU)
algorithm.
"""
return _cachedfunc(LFUCache(maxsize, getsizeof), typed, lock)
def lru_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Recently Used (LRU)
algorithm.
"""
return _cachedfunc(LRUCache(maxsize, getsizeof), typed, lock)
def rr_cache(maxsize=128, choice=random.choice, typed=False, getsizeof=None,
lock=RLock):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Random Replacement (RR)
algorithm.
"""
return _cachedfunc(RRCache(maxsize, choice, getsizeof), typed, lock)
def ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False,
getsizeof=None, lock=RLock):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Recently Used (LRU)
algorithm with a per-item time-to-live (TTL) value.
"""
return _cachedfunc(TTLCache(maxsize, ttl, timer, getsizeof), typed, lock)
| mit | -298,878,277,565,286,600 | 26.950413 | 77 | 0.611473 | false |
cernops/cloudbase-init | cloudbaseinit/tests/plugins/windows/test_localscripts.py | 1 | 2254 | # Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import os
import unittest
from cloudbaseinit.plugins import base
from cloudbaseinit.plugins.windows import localscripts
from oslo.config import cfg
CONF = cfg.CONF
class LocalScriptsPluginTests(unittest.TestCase):
def setUp(self):
self._localscripts = localscripts.LocalScriptsPlugin()
@mock.patch('os.listdir')
@mock.patch('os.path.isfile')
def test_get_files_in_dir(self, mock_isfile, mock_listdir):
fake_path = os.path.join('fake', 'path')
fake_file_list = ['second', 'first', 'third', 'last']
mock_isfile.return_value = True
mock_listdir.return_value = fake_file_list
response = self._localscripts._get_files_in_dir(fake_path)
mock_listdir.assert_called_once_with(fake_path)
self.assertEqual(
sorted(os.path.join(fake_path, f) for f in fake_file_list),
response)
@mock.patch('cloudbaseinit.plugins.windows.localscripts'
'.LocalScriptsPlugin._get_files_in_dir')
@mock.patch('cloudbaseinit.plugins.windows.fileexecutils.exec_file')
def test_execute(self, mock_exec_file, mock_get_files_in_dir):
mock_service = mock.MagicMock()
fake_path = os.path.join('fake', 'path')
CONF.set_override('local_scripts_path', True)
mock_get_files_in_dir.return_value = [fake_path]
response = self._localscripts.execute(mock_service, shared_data=None)
mock_get_files_in_dir.assert_called_once_with(CONF.local_scripts_path)
mock_exec_file.assert_called_once_with(fake_path)
self.assertEqual((base.PLUGIN_EXECUTION_DONE, False), response)
| apache-2.0 | -5,743,922,069,370,638,000 | 37.862069 | 78 | 0.692103 | false |
HuaweiSwitch/CloudEngine-Ansible | library/ce_vrrp.py | 1 | 55373 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = '''
---
module: ce_vrrp
version_added: "2.4"
short_description: Manages VRRP interfaces on HUAWEI CloudEngine devices.
description:
- Manages VRRP interface attributes on HUAWEI CloudEngine devices.
author:
- Li Yanfeng (@CloudEngine-Ansible)
options:
interface:
description:
- Name of an interface. The value is a string of 1 to 63 characters.
required: false
default: null
vrid:
description:
- VRRP backup group ID.
The value is an integer ranging from 1 to 255.
required: false
default: present
virtual_ip :
description:
- Virtual IP address. The value is a string of 0 to 255 characters.
required: false
default: null
vrrp_type:
description:
- Type of a VRRP backup group.
required: false
choices: ['normal', 'member', 'admin']
default: null
admin_ignore_if_down:
description:
- mVRRP ignores an interface Down event.
required: false
default: False
admin_vrid:
description:
- Tracked mVRRP ID. The value is an integer ranging from 1 to 255.
required: false
default: null
admin_interface:
description:
- Tracked mVRRP interface name. The value is a string of 1 to 63 characters.
required: false
default: null
admin_flowdown:
description:
- Disable the flowdown function for service VRRP.
required: false
default: False
priority:
description:
- Configured VRRP priority.
The value ranges from 1 to 254. The default value is 100. A larger value indicates a higher priority.
required: false
default: null
version:
description:
- VRRP version. The default version is v2.
required: false
choices: ['v2','v3']
default: null
advertise_interval:
description:
- Configured interval between sending advertisements, in milliseconds.
Only the master router sends VRRP advertisements. The default value is 1000 milliseconds.
required: false
default: null
preempt_timer_delay:
description:
- Preemption delay.
The value is an integer ranging from 0 to 3600. The default value is 0.
required: false
default: null
gratuitous_arp_interval:
description:
- Interval at which gratuitous ARP packets are sent, in seconds.
The value ranges from 30 to 1200.The default value is 300.
required: false
default: null
recover_delay:
description:
- Delay in recovering after an interface goes Up.
The delay is used for interface flapping suppression.
The value is an integer ranging from 0 to 3600.
The default value is 0 seconds.
required: false
default: null
holding_multiplier:
description:
- The configured holdMultiplier.The value is an integer ranging from 3 to 10. The default value is 3.
required: false
default: null
auth_mode:
description:
- Authentication type used for VRRP packet exchanges between virtual routers.
The values are noAuthentication, simpleTextPassword, md5Authentication.
The default value is noAuthentication.
required: false
choices: ['simple','md5','none']
default: null
is_plain:
description:
- Select the display mode of an authentication key.
By default, an authentication key is displayed in ciphertext.
required: false
default: False
auth_key:
description:
- This object is set based on the authentication type.
When noAuthentication is specified, the value is empty.
When simpleTextPassword or md5Authentication is specified, the value is a string of 1 to 8 characters
in plaintext and displayed as a blank text for security.
required: false
default: null
fast_resume:
description:
- mVRRP's fast resume mode.
required: false
choices: ['enable','disable']
default: null
state:
description:
- Specify desired state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: vrrp module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Set vrrp version
ce_vrrp:
version: v3
provider: "{{ cli }}"
- name: Set vrrp gratuitous-arp interval
ce_vrrp:
gratuitous_arp_interval: 40
mlag_id: 4
provider: "{{ cli }}"
- name: Set vrrp recover-delay
ce_vrrp:
recover_delay: 10
provider: "{{ cli }}"
- name: Set vrrp vrid virtual-ip
ce_vrrp:
interface: 40GE2/0/8
vrid: 1
virtual_ip: 10.14.2.7
provider: "{{ cli }}"
- name: Set vrrp vrid admin
ce_vrrp:
interface: 40GE2/0/8
vrid: 1
vrrp_type: admin
provider: "{{ cli }}"
- name: Set vrrp vrid fast_resume
ce_vrrp:
interface: 40GE2/0/8
vrid: 1
fast_resume: enable
provider: "{{ cli }}"
- name: Set vrrp vrid holding-multiplier
ce_vrrp:
interface: 40GE2/0/8
vrid: 1
holding_multiplier: 4
provider: "{{ cli }}"
- name: Set vrrp vrid preempt timer delay
ce_vrrp:
interface: 40GE2/0/8
vrid: 1
preempt_timer_delay: 10
provider: "{{ cli }}"
- name: Set vrrp vrid admin-vrrp
ce_vrrp:
interface: 40GE2/0/8
vrid: 1
admin_interface: 40GE2/0/9
admin_vrid: 2
vrrp_type: member
provider: "{{ cli }}"
- name: Set vrrp vrid authentication-mode
ce_vrrp:
interface: 40GE2/0/8
vrid: 1
is_plain: true
auth_mode: simple
auth_key: aaa
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {
"auth_key": "aaa",
"auth_mode": "simple",
"interface": "40GE2/0/8",
"is_plain": true,
"state": "present",
"vrid": "1"
}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {
"auth_mode": "none",
"interface": "40GE2/0/8",
"is_plain": "false",
"vrid": "1",
"vrrp_type": "normal"
}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {
"auth_mode": "simple",
"interface": "40GE2/0/8",
"is_plain": "true",
"vrid": "1",
"vrrp_type": "normal"
}
updates:
description: command sent to the device
returned: always
type: list
sample: { "interface 40GE2/0/8",
"vrrp vrid 1 authentication-mode simple plain aaa"}
'''
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_nc_config, set_nc_config, ce_argument_spec
CE_NC_GET_VRRP_GROUP_INFO = """
<filter type="subtree">
<vrrp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<vrrpGroups>
<vrrpGroup>
<ifName>%s</ifName>
<vrrpId>%s</vrrpId>
</vrrpGroup>
</vrrpGroups>
</vrrp>
</filter>
"""
CE_NC_SET_VRRP_GROUP_INFO_HEAD = """
<config>
<vrrp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<vrrpGroups>
<vrrpGroup operation="merge">
<ifName>%s</ifName>
<vrrpId>%s</vrrpId>
"""
CE_NC_SET_VRRP_GROUP_INFO_TAIL = """
</vrrpGroup>
</vrrpGroups>
</vrrp>
</config>
"""
CE_NC_GET_VRRP_GLOBAL_INFO = """
<filter type="subtree">
<vrrp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<vrrpGlobalCfg>
<gratuitousArpFlag></gratuitousArpFlag>
<gratuitousArpTimeOut></gratuitousArpTimeOut>
<recoverDelay></recoverDelay>
<version></version>
</vrrpGlobalCfg>
</vrrp>
</filter>
"""
CE_NC_SET_VRRP_GLOBAL_HEAD = """
<config>
<vrrp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<vrrpGlobalCfg operation="merge">
"""
CE_NC_SET_VRRP_GLOBAL_TAIL = """
</vrrpGlobalCfg>
</vrrp>
</config>
"""
CE_NC_GET_VRRP_VIRTUAL_IP_INFO = """
<filter type="subtree">
<vrrp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<vrrpGroups>
<vrrpGroup>
<vrrpId>%s</vrrpId>
<ifName>%s</ifName>
<virtualIps>
<virtualIp>
<virtualIpAddress></virtualIpAddress>
</virtualIp>
</virtualIps>
</vrrpGroup>
</vrrpGroups>
</vrrp>
</filter>
"""
CE_NC_CREATE_VRRP_VIRTUAL_IP_INFO = """
<config>
<vrrp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<vrrpGroups>
<vrrpGroup>
<vrrpId>%s</vrrpId>
<ifName>%s</ifName>
<vrrpType>normal</vrrpType>
<virtualIps>
<virtualIp operation="create">
<virtualIpAddress>%s</virtualIpAddress>
</virtualIp>
</virtualIps>
</vrrpGroup>
</vrrpGroups>
</vrrp>
</config>
"""
CE_NC_DELETE_VRRP_VIRTUAL_IP_INFO = """
<config>
<vrrp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<vrrpGroups>
<vrrpGroup>
<vrrpId>%s</vrrpId>
<ifName>%s</ifName>
<virtualIps>
<virtualIp operation="delete">
<virtualIpAddress>%s</virtualIpAddress>
</virtualIp>
</virtualIps>
</vrrpGroup>
</vrrpGroups>
</vrrp>
</config>
"""
def is_valid_address(address):
"""check ip-address is valid"""
if address.find('.') != -1:
addr_list = address.split('.')
if len(addr_list) != 4:
return False
for each_num in addr_list:
if not each_num.isdigit():
return False
if int(each_num) > 255:
return False
return True
return False
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('NULL'):
iftype = 'null'
elif interface.upper().startswith('VLANIF'):
iftype = 'vlanif'
else:
return None
return iftype.lower()
class Vrrp(object):
"""
Manages Manages vrrp information.
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# module input info
self.interface = self.module.params['interface']
self.vrid = self.module.params['vrid']
self.virtual_ip = self.module.params['virtual_ip']
self.vrrp_type = self.module.params['vrrp_type']
self.admin_ignore_if_down = self.module.params['admin_ignore_if_down']
self.admin_vrid = self.module.params['admin_vrid']
self.admin_interface = self.module.params['admin_interface']
self.admin_flowdown = self.module.params['admin_flowdown']
self.priority = self.module.params['priority']
self.version = self.module.params['version']
self.advertise_interval = self.module.params['advertise_interval']
self.preempt_timer_delay = self.module.params['preempt_timer_delay']
self.gratuitous_arp_interval = self.module.params[
'gratuitous_arp_interval']
self.recover_delay = self.module.params['recover_delay']
self.holding_multiplier = self.module.params['holding_multiplier']
self.auth_mode = self.module.params['auth_mode']
self.is_plain = self.module.params['is_plain']
self.auth_key = self.module.params['auth_key']
self.fast_resume = self.module.params['fast_resume']
self.state = self.module.params['state']
# vrrp info
self.vrrp_global_info = None
self.virtual_ip_info = None
self.vrrp_group_info = None
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.existing = dict()
self.proposed = dict()
self.end_state = dict()
def init_module(self):
""" init module """
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def get_virtual_ip_info(self):
""" get vrrp virtual ip info."""
virtual_ip_info = dict()
conf_str = CE_NC_GET_VRRP_VIRTUAL_IP_INFO % (self.vrid, self.interface)
xml_str = get_nc_config(self.module, conf_str)
if "<data/>" in xml_str:
return virtual_ip_info
else:
xml_str = xml_str.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
virtual_ip_info["vrrpVirtualIpInfos"] = list()
root = ElementTree.fromstring(xml_str)
vrrp_virtual_ip_infos = root.findall(
"data/vrrp/vrrpGroups/vrrpGroup/virtualIps/virtualIp")
if vrrp_virtual_ip_infos:
for vrrp_virtual_ip_info in vrrp_virtual_ip_infos:
virtual_ip_dict = dict()
for ele in vrrp_virtual_ip_info:
if ele.tag in ["virtualIpAddress"]:
virtual_ip_dict[ele.tag] = ele.text
virtual_ip_info["vrrpVirtualIpInfos"].append(
virtual_ip_dict)
return virtual_ip_info
def get_vrrp_global_info(self):
""" get vrrp global info."""
vrrp_global_info = dict()
conf_str = CE_NC_GET_VRRP_GLOBAL_INFO
xml_str = get_nc_config(self.module, conf_str)
if "<data/>" in xml_str:
return vrrp_global_info
else:
xml_str = xml_str.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
global_info = root.findall(
"data/vrrp/vrrpGlobalCfg")
if global_info:
for tmp in global_info:
for site in tmp:
if site.tag in ["gratuitousArpTimeOut", "gratuitousArpFlag", "recoverDelay", "version"]:
vrrp_global_info[site.tag] = site.text
return vrrp_global_info
def get_vrrp_group_info(self):
""" get vrrp group info."""
vrrp_group_info = dict()
conf_str = CE_NC_GET_VRRP_GROUP_INFO % (self.interface, self.vrid)
xml_str = get_nc_config(self.module, conf_str)
if "<data/>" in xml_str:
return vrrp_group_info
else:
xml_str = xml_str.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
global_info = root.findall(
"data/vrrp/vrrpGroups/vrrpGroup")
if global_info:
for tmp in global_info:
for site in tmp:
if site.tag in ["ifName", "vrrpId", "priority", "advertiseInterval", "preemptMode", "delayTime",
"authenticationMode", "authenticationKey", "vrrpType", "adminVrrpId",
"adminIfName", "adminIgnoreIfDown", "isPlain", "unflowdown", "fastResume",
"holdMultiplier"]:
vrrp_group_info[site.tag] = site.text
return vrrp_group_info
def check_params(self):
"""Check all input params"""
# interface check
if self.interface:
intf_type = get_interface_type(self.interface)
if not intf_type:
self.module.fail_json(
msg='Error: Interface name of %s '
'is error.' % self.interface)
# vrid check
if self.vrid:
if not self.vrid.isdigit():
self.module.fail_json(
msg='Error: The value of vrid is an integer.')
if int(self.vrid) < 1 or int(self.vrid) > 255:
self.module.fail_json(
msg='Error: The value of vrid ranges from 1 to 255.')
# virtual_ip check
if self.virtual_ip:
if not is_valid_address(self.virtual_ip):
self.module.fail_json(
msg='Error: The %s is not a valid ip address.' % self.virtual_ip)
# admin_vrid check
if self.admin_vrid:
if not self.admin_vrid.isdigit():
self.module.fail_json(
msg='Error: The value of admin_vrid is an integer.')
if int(self.admin_vrid) < 1 or int(self.admin_vrid) > 255:
self.module.fail_json(
msg='Error: The value of admin_vrid ranges from 1 to 255.')
# admin_interface check
if self.admin_interface:
intf_type = get_interface_type(self.admin_interface)
if not intf_type:
self.module.fail_json(
msg='Error: Admin interface name of %s '
'is error.' % self.admin_interface)
# priority check
if self.priority:
if not self.priority.isdigit():
self.module.fail_json(
msg='Error: The value of priority is an integer.')
if int(self.priority) < 1 or int(self.priority) > 254:
self.module.fail_json(
msg='Error: The value of priority ranges from 1 to 254. The default value is 100.')
# advertise_interval check
if self.advertise_interval:
if not self.advertise_interval.isdigit():
self.module.fail_json(
msg='Error: The value of advertise_interval is an integer.')
if int(self.advertise_interval) < 1 or int(self.advertise_interval) > 255000:
self.module.fail_json(
msg='Error: The value of advertise_interval ranges from 1 to 255000. The default value is 1000.')
# preempt_timer_delay check
if self.preempt_timer_delay:
if not self.preempt_timer_delay.isdigit():
self.module.fail_json(
msg='Error: The value of preempt_timer_delay is an integer.')
if int(self.preempt_timer_delay) < 1 or int(self.preempt_timer_delay) > 3600:
self.module.fail_json(
msg='Error: The value of preempt_timer_delay ranges from 1 to 3600. The default value is 0.')
# holding_multiplier check
if self.holding_multiplier:
if not self.holding_multiplier.isdigit():
self.module.fail_json(
msg='Error: The value of holding_multiplier is an integer.')
if int(self.holding_multiplier) < 3 or int(self.holding_multiplier) > 10:
self.module.fail_json(
msg='Error: The value of holding_multiplier ranges from 3 to 10. The default value is 3.')
# auth_key check
if self.auth_key:
if len(self.auth_key) > 16 \
or len(self.auth_key.replace(' ', '')) < 1:
self.module.fail_json(
msg='Error: The length of auth_key is not in the range from 1 to 16.')
def is_virtual_ip_change(self):
"""whether virtual ip change"""
if not self.virtual_ip_info:
return True
for info in self.virtual_ip_info["vrrpVirtualIpInfos"]:
if info["virtualIpAddress"] == self.virtual_ip:
return False
return True
def is_virtual_ip_exist(self):
"""whether virtual ip info exist"""
if not self.virtual_ip_info:
return False
for info in self.virtual_ip_info["vrrpVirtualIpInfos"]:
if info["virtualIpAddress"] == self.virtual_ip:
return True
return False
def is_vrrp_global_info_change(self):
"""whether vrrp global attribute info change"""
if not self.vrrp_global_info:
return True
if self.gratuitous_arp_interval:
if self.vrrp_global_info["gratuitousArpFlag"] == "false":
self.module.fail_json(msg="Error: gratuitousArpFlag is false.")
if self.vrrp_global_info["gratuitousArpTimeOut"] != self.gratuitous_arp_interval:
return True
if self.recover_delay:
if self.vrrp_global_info["recoverDelay"] != self.recover_delay:
return True
if self.version:
if self.vrrp_global_info["version"] != self.version:
return True
return False
def is_vrrp_global_info_exist(self):
"""whether vrrp global attribute info exist"""
if self.gratuitous_arp_interval or self.recover_delay or self.version:
if self.gratuitous_arp_interval:
if self.vrrp_global_info["gratuitousArpFlag"] == "false":
self.module.fail_json(
msg="Error: gratuitousArpFlag is false.")
if self.vrrp_global_info["gratuitousArpTimeOut"] != self.gratuitous_arp_interval:
return False
if self.recover_delay:
if self.vrrp_global_info["recoverDelay"] != self.recover_delay:
return False
if self.version:
if self.vrrp_global_info["version"] != self.version:
return False
return True
return False
def is_vrrp_group_info_change(self):
"""whether vrrp group attribute info change"""
if self.vrrp_type:
if self.vrrp_group_info["vrrpType"] != self.vrrp_type:
return True
if self.admin_ignore_if_down:
if self.vrrp_group_info["adminIgnoreIfDown"] != self.admin_ignore_if_down:
return True
if self.admin_vrid:
if self.vrrp_group_info["adminVrrpId"] != self.admin_vrid:
return True
if self.admin_interface:
if self.vrrp_group_info["adminIfName"] != self.admin_interface:
return True
if self.admin_flowdown:
if self.vrrp_group_info["unflowdown"] != self.admin_flowdown:
return True
if self.priority:
if self.vrrp_group_info["priority"] != self.priority:
return True
if self.fast_resume:
fast_resume = "false"
if self.fast_resume == "enable":
fast_resume = "true"
if self.vrrp_group_info["fastResume"] != fast_resume:
return True
if self.advertise_interval:
if self.vrrp_group_info["advertiseInterval"] != self.advertise_interval:
return True
if self.preempt_timer_delay:
if self.vrrp_group_info["delayTime"] != self.preempt_timer_delay:
return True
if self.holding_multiplier:
if self.vrrp_group_info["holdMultiplier"] != self.holding_multiplier:
return True
if self.auth_mode:
if self.vrrp_group_info["authenticationMode"] != self.auth_mode:
return True
if self.auth_key:
return True
if self.is_plain:
if self.vrrp_group_info["isPlain"] != self.is_plain:
return True
return False
def is_vrrp_group_info_exist(self):
"""whether vrrp group attribute info exist"""
if self.vrrp_type:
if self.vrrp_group_info["vrrpType"] != self.vrrp_type:
return False
if self.admin_ignore_if_down:
if self.vrrp_group_info["adminIgnoreIfDown"] != self.admin_ignore_if_down:
return False
if self.admin_vrid:
if self.vrrp_group_info["adminVrrpId"] != self.admin_vrid:
return False
if self.admin_interface:
if self.vrrp_group_info["adminIfName"] != self.admin_interface:
return False
if self.admin_flowdown:
if self.vrrp_group_info["unflowdown"] != self.admin_flowdown:
return False
if self.priority:
if self.vrrp_group_info["priority"] != self.priority:
return False
if self.fast_resume:
fast_resume = "false"
if self.fast_resume == "enable":
fast_resume = "true"
if self.vrrp_group_info["fastResume"] != fast_resume:
return False
if self.advertise_interval:
if self.vrrp_group_info["advertiseInterval"] != self.advertise_interval:
return False
if self.preempt_timer_delay:
if self.vrrp_group_info["delayTime"] != self.preempt_timer_delay:
return False
if self.holding_multiplier:
if self.vrrp_group_info["holdMultiplier"] != self.holding_multiplier:
return False
if self.auth_mode:
if self.vrrp_group_info["authenticationMode"] != self.auth_mode:
return False
if self.is_plain:
if self.vrrp_group_info["isPlain"] != self.is_plain:
return False
return True
def create_virtual_ip(self):
"""create virtual ip info"""
if self.is_virtual_ip_change():
conf_str = CE_NC_CREATE_VRRP_VIRTUAL_IP_INFO % (
self.vrid, self.interface, self.virtual_ip)
recv_xml = set_nc_config(self.module, conf_str)
if "<ok/>" not in recv_xml:
self.module.fail_json(
msg='Error: create virtual ip info failed.')
self.updates_cmd.append("interface %s" % self.interface)
self.updates_cmd.append(
"vrrp vrid %s virtual-ip %s" % (self.vrid, self.virtual_ip))
self.changed = True
def delete_virtual_ip(self):
"""delete virtual ip info"""
if self.is_virtual_ip_exist():
conf_str = CE_NC_DELETE_VRRP_VIRTUAL_IP_INFO % (
self.vrid, self.interface, self.virtual_ip)
recv_xml = set_nc_config(self.module, conf_str)
if "<ok/>" not in recv_xml:
self.module.fail_json(
msg='Error: delete virtual ip info failed.')
self.updates_cmd.append("interface %s" % self.interface)
self.updates_cmd.append(
"undo vrrp vrid %s virtual-ip %s " % (self.vrid, self.virtual_ip))
self.changed = True
def set_vrrp_global(self):
"""set vrrp global attribute info"""
if self.is_vrrp_global_info_change():
conf_str = CE_NC_SET_VRRP_GLOBAL_HEAD
if self.gratuitous_arp_interval:
conf_str += "<gratuitousArpTimeOut>%s</gratuitousArpTimeOut>" % self.gratuitous_arp_interval
if self.recover_delay:
conf_str += "<recoverDelay>%s</recoverDelay>" % self.recover_delay
if self.version:
conf_str += "<version>%s</version>" % self.version
conf_str += CE_NC_SET_VRRP_GLOBAL_TAIL
recv_xml = set_nc_config(self.module, conf_str)
if "<ok/>" not in recv_xml:
self.module.fail_json(
msg='Error: set vrrp global atrribute info failed.')
if self.gratuitous_arp_interval:
self.updates_cmd.append(
"vrrp gratuitous-arp interval %s" % self.gratuitous_arp_interval)
if self.recover_delay:
self.updates_cmd.append(
"vrrp recover-delay %s" % self.recover_delay)
if self.version:
version = "3"
if self.version == "v2":
version = "2"
self.updates_cmd.append("vrrp version %s" % version)
self.changed = True
def delete_vrrp_global(self):
"""delete vrrp global attribute info"""
if self.is_vrrp_global_info_exist():
conf_str = CE_NC_SET_VRRP_GLOBAL_HEAD
if self.gratuitous_arp_interval:
if self.gratuitous_arp_interval == "120":
self.module.fail_json(
msg='Error: The default value of gratuitous_arp_interval is 120.')
gratuitous_arp_interval = "120"
conf_str += "<gratuitousArpTimeOut>%s</gratuitousArpTimeOut>" % gratuitous_arp_interval
if self.recover_delay:
if self.recover_delay == "0":
self.module.fail_json(
msg='Error: The default value of recover_delay is 0.')
recover_delay = "0"
conf_str += "<recoverDelay>%s</recoverDelay>" % recover_delay
if self.version:
if self.version == "v2":
self.module.fail_json(
msg='Error: The default value of version is v2.')
version = "v2"
conf_str += "<version>%s</version>" % version
conf_str += CE_NC_SET_VRRP_GLOBAL_TAIL
recv_xml = set_nc_config(self.module, conf_str)
if "<ok/>" not in recv_xml:
self.module.fail_json(
msg='Error: set vrrp global atrribute info failed.')
if self.gratuitous_arp_interval:
self.updates_cmd.append("undo vrrp gratuitous-arp interval")
if self.recover_delay:
self.updates_cmd.append("undo vrrp recover-delay")
if self.version == "v3":
self.updates_cmd.append("undo vrrp version")
self.changed = True
def set_vrrp_group(self):
"""set vrrp group attribute info"""
if self.is_vrrp_group_info_change():
conf_str = CE_NC_SET_VRRP_GROUP_INFO_HEAD % (
self.interface, self.vrid)
if self.vrrp_type:
conf_str += "<vrrpType>%s</vrrpType>" % self.vrrp_type
if self.admin_vrid:
conf_str += "<adminVrrpId>%s</adminVrrpId>" % self.admin_vrid
if self.admin_interface:
conf_str += "<adminIfName>%s</adminIfName>" % self.admin_interface
if self.admin_flowdown is True or self.admin_flowdown is False:
admin_flowdown = "false"
if self.admin_flowdown is True:
admin_flowdown = "true"
conf_str += "<unflowdown>%s</unflowdown>" % admin_flowdown
if self.priority:
conf_str += "<priority>%s</priority>" % self.priority
if self.vrrp_type == "admin":
if self.admin_ignore_if_down is True or self.admin_ignore_if_down is False:
admin_ignore_if_down = "false"
if self.admin_ignore_if_down is True:
admin_ignore_if_down = "true"
conf_str += "<adminIgnoreIfDown>%s</adminIgnoreIfDown>" % admin_ignore_if_down
if self.fast_resume:
fast_resume = "false"
if self.fast_resume == "enable":
fast_resume = "true"
conf_str += "<fastResume>%s</fastResume>" % fast_resume
if self.advertise_interval:
conf_str += "<advertiseInterval>%s</advertiseInterval>" % self.advertise_interval
if self.preempt_timer_delay:
conf_str += "<delayTime>%s</delayTime>" % self.preempt_timer_delay
if self.holding_multiplier:
conf_str += "<holdMultiplier>%s</holdMultiplier>" % self.holding_multiplier
if self.auth_mode:
conf_str += "<authenticationMode>%s</authenticationMode>" % self.auth_mode
if self.auth_key:
conf_str += "<authenticationKey>%s</authenticationKey>" % self.auth_key
if self.auth_mode == "simple":
is_plain = "false"
if self.is_plain is True:
is_plain = "true"
conf_str += "<isPlain>%s</isPlain>" % is_plain
conf_str += CE_NC_SET_VRRP_GROUP_INFO_TAIL
recv_xml = set_nc_config(self.module, conf_str)
if "<ok/>" not in recv_xml:
self.module.fail_json(
msg='Error: set vrrp group atrribute info failed.')
if self.interface and self.vrid:
if self.vrrp_type == "admin":
if self.admin_ignore_if_down is True:
self.updates_cmd.append(
"interface %s" % self.interface)
self.updates_cmd.append(
"vrrp vrid %s admin ignore-if-down" % self.vrid)
else:
self.updates_cmd.append(
"interface %s" % self.interface)
self.updates_cmd.append(
"vrrp vrid %s admin" % self.vrid)
if self.priority:
self.updates_cmd.append("interface %s" % self.interface)
self.updates_cmd.append(
"vrrp vrid %s priority %s" % (self.vrid, self.priority))
if self.fast_resume == "enable":
self.updates_cmd.append("interface %s" % self.interface)
self.updates_cmd.append(
"vrrp vrid %s fast-resume" % self.vrid)
if self.fast_resume == "disable":
self.updates_cmd.append("interface %s" % self.interface)
self.updates_cmd.append(
"undo vrrp vrid %s fast-resume" % self.vrid)
if self.advertise_interval:
self.updates_cmd.append("interface %s" % self.interface)
self.updates_cmd.append("vrrp vrid %s timer advertise %s" % (
self.vrid, self.advertise_interval))
if self.preempt_timer_delay:
self.updates_cmd.append("interface %s" % self.interface)
self.updates_cmd.append("vrrp vrid %s preempt timer delay %s" % (self.vrid,
self.preempt_timer_delay))
if self.holding_multiplier:
self.updates_cmd.append("interface %s" % self.interface)
self.updates_cmd.append(
"vrrp vrid %s holding-multiplier %s" % (self.vrid, self.holding_multiplier))
if self.admin_vrid and self.admin_interface:
if self.admin_flowdown is True:
self.updates_cmd.append(
"interface %s" % self.interface)
self.updates_cmd.append("vrrp vrid %s track admin-vrrp interface %s vrid %s unflowdown" %
(self.vrid, self.admin_interface, self.admin_vrid))
else:
self.updates_cmd.append(
"interface %s" % self.interface)
self.updates_cmd.append("vrrp vrid %s track admin-vrrp interface %s vrid %s" %
(self.vrid, self.admin_interface, self.admin_vrid))
if self.auth_mode and self.auth_key:
if self.auth_mode == "simple":
if self.is_plain is True:
self.updates_cmd.append(
"interface %s" % self.interface)
self.updates_cmd.append("vrrp vrid %s authentication-mode simple plain %s" %
(self.vrid, self.auth_key))
else:
self.updates_cmd.append(
"interface %s" % self.interface)
self.updates_cmd.append("vrrp vrid %s authentication-mode simple cipher %s" %
(self.vrid, self.auth_key))
if self.auth_mode == "md5":
self.updates_cmd.append(
"interface %s" % self.interface)
self.updates_cmd.append(
"vrrp vrid %s authentication-mode md5 %s" % (self.vrid, self.auth_key))
self.changed = True
def delete_vrrp_group(self):
"""delete vrrp group attribute info"""
if self.is_vrrp_group_info_exist():
conf_str = CE_NC_SET_VRRP_GROUP_INFO_HEAD % (
self.interface, self.vrid)
if self.vrrp_type:
vrrp_type = self.vrrp_type
if self.vrrp_type == "admin":
vrrp_type = "normal"
if self.vrrp_type == "member" and self.admin_vrid and self.admin_interface:
vrrp_type = "normal"
conf_str += "<vrrpType>%s</vrrpType>" % vrrp_type
if self.priority:
if self.priority == "100":
self.module.fail_json(
msg='Error: The default value of priority is 100.')
priority = "100"
conf_str += "<priority>%s</priority>" % priority
if self.fast_resume:
fast_resume = "false"
if self.fast_resume == "enable":
fast_resume = "true"
conf_str += "<fastResume>%s</fastResume>" % fast_resume
if self.advertise_interval:
if self.advertise_interval == "1000":
self.module.fail_json(
msg='Error: The default value of advertise_interval is 1000.')
advertise_interval = "1000"
conf_str += "<advertiseInterval>%s</advertiseInterval>" % advertise_interval
if self.preempt_timer_delay:
if self.preempt_timer_delay == "0":
self.module.fail_json(
msg='Error: The default value of preempt_timer_delay is 0.')
preempt_timer_delay = "0"
conf_str += "<delayTime>%s</delayTime>" % preempt_timer_delay
if self.holding_multiplier:
if self.holding_multiplier == "0":
self.module.fail_json(
msg='Error: The default value of holding_multiplier is 3.')
holding_multiplier = "3"
conf_str += "<holdMultiplier>%s</holdMultiplier>" % holding_multiplier
if self.auth_mode:
auth_mode = self.auth_mode
if self.auth_mode == "md5" or self.auth_mode == "simple":
auth_mode = "none"
conf_str += "<authenticationMode>%s</authenticationMode>" % auth_mode
conf_str += CE_NC_SET_VRRP_GROUP_INFO_TAIL
recv_xml = set_nc_config(self.module, conf_str)
if "<ok/>" not in recv_xml:
self.module.fail_json(
msg='Error: set vrrp global atrribute info failed.')
if self.interface and self.vrid:
if self.vrrp_type == "admin":
self.updates_cmd.append(
"undo vrrp vrid %s admin" % self.vrid)
if self.priority:
self.updates_cmd.append("interface %s" % self.interface)
self.updates_cmd.append(
"undo vrrp vrid %s priority" % self.vrid)
if self.fast_resume:
self.updates_cmd.append("interface %s" % self.interface)
self.updates_cmd.append(
"undo vrrp vrid %s fast-resume" % self.vrid)
if self.advertise_interval:
self.updates_cmd.append("interface %s" % self.interface)
self.updates_cmd.append(
"undo vrrp vrid %s timer advertise" % self.vrid)
if self.preempt_timer_delay:
self.updates_cmd.append("interface %s" % self.interface)
self.updates_cmd.append(
"undo vrrp vrid %s preempt timer delay" % self.vrid)
if self.holding_multiplier:
self.updates_cmd.append("interface %s" % self.interface)
self.updates_cmd.append(
"undo vrrp vrid %s holding-multiplier" % self.vrid)
if self.admin_vrid and self.admin_interface:
self.updates_cmd.append("interface %s" % self.interface)
self.updates_cmd.append(
"undo vrrp vrid %s track admin-vrrp" % self.vrid)
if self.auth_mode:
self.updates_cmd.append("interface %s" % self.interface)
self.updates_cmd.append(
"undo vrrp vrid %s authentication-mode" % self.vrid)
self.changed = True
def get_proposed(self):
"""get proposed info"""
if self.interface:
self.proposed["interface"] = self.interface
if self.vrid:
self.proposed["vrid"] = self.vrid
if self.virtual_ip:
self.proposed["virtual_ip"] = self.virtual_ip
if self.vrrp_type:
self.proposed["vrrp_type"] = self.vrrp_type
if self.admin_vrid:
self.proposed["admin_vrid"] = self.admin_vrid
if self.admin_interface:
self.proposed["admin_interface"] = self.admin_interface
if self.admin_flowdown:
self.proposed["unflowdown"] = self.admin_flowdown
if self.admin_ignore_if_down:
self.proposed["admin_ignore_if_down"] = self.admin_ignore_if_down
if self.priority:
self.proposed["priority"] = self.priority
if self.version:
self.proposed["version"] = self.version
if self.advertise_interval:
self.proposed["advertise_interval"] = self.advertise_interval
if self.preempt_timer_delay:
self.proposed["preempt_timer_delay"] = self.preempt_timer_delay
if self.gratuitous_arp_interval:
self.proposed[
"gratuitous_arp_interval"] = self.gratuitous_arp_interval
if self.recover_delay:
self.proposed["recover_delay"] = self.recover_delay
if self.holding_multiplier:
self.proposed["holding_multiplier"] = self.holding_multiplier
if self.auth_mode:
self.proposed["auth_mode"] = self.auth_mode
if self.is_plain:
self.proposed["is_plain"] = self.is_plain
if self.auth_key:
self.proposed["auth_key"] = self.auth_key
if self.fast_resume:
self.proposed["fast_resume"] = self.fast_resume
if self.state:
self.proposed["state"] = self.state
def get_existing(self):
"""get existing info"""
if self.gratuitous_arp_interval:
self.existing["gratuitous_arp_interval"] = self.vrrp_global_info[
"gratuitousArpTimeOut"]
if self.version:
self.existing["version"] = self.vrrp_global_info["version"]
if self.recover_delay:
self.existing["recover_delay"] = self.vrrp_global_info[
"recoverDelay"]
if self.virtual_ip:
if self.virtual_ip_info:
self.existing["interface"] = self.interface
self.existing["vrid"] = self.vrid
self.existing["virtual_ip_info"] = self.virtual_ip_info[
"vrrpVirtualIpInfos"]
if self.vrrp_group_info:
self.existing["interface"] = self.vrrp_group_info["ifName"]
self.existing["vrid"] = self.vrrp_group_info["vrrpId"]
self.existing["vrrp_type"] = self.vrrp_group_info["vrrpType"]
if self.vrrp_type == "admin":
self.existing["admin_ignore_if_down"] = self.vrrp_group_info[
"authenticationMode"]
if self.admin_vrid and self.admin_interface:
self.existing["admin_vrid"] = self.vrrp_group_info[
"adminVrrpId"]
self.existing["admin_interface"] = self.vrrp_group_info[
"adminIfName"]
self.existing["admin_flowdown"] = self.vrrp_group_info[
"unflowdown"]
if self.priority:
self.existing["priority"] = self.vrrp_group_info["priority"]
if self.advertise_interval:
self.existing["advertise_interval"] = self.vrrp_group_info[
"advertiseInterval"]
if self.preempt_timer_delay:
self.existing["preempt_timer_delay"] = self.vrrp_group_info[
"delayTime"]
if self.holding_multiplier:
self.existing["holding_multiplier"] = self.vrrp_group_info[
"holdMultiplier"]
if self.fast_resume:
fast_resume_exist = "disable"
fast_resume = self.vrrp_group_info["fastResume"]
if fast_resume == "true":
fast_resume_exist = "enable"
self.existing["fast_resume"] = fast_resume_exist
if self.auth_mode:
self.existing["auth_mode"] = self.vrrp_group_info[
"authenticationMode"]
self.existing["is_plain"] = self.vrrp_group_info["isPlain"]
def get_end_state(self):
"""get end state info"""
if self.gratuitous_arp_interval or self.version or self.recover_delay:
self.vrrp_global_info = self.get_vrrp_global_info()
if self.interface and self.vrid:
if self.virtual_ip:
self.virtual_ip_info = self.get_virtual_ip_info()
if self.virtual_ip_info:
self.vrrp_group_info = self.get_vrrp_group_info()
if self.gratuitous_arp_interval:
self.end_state["gratuitous_arp_interval"] = self.vrrp_global_info[
"gratuitousArpTimeOut"]
if self.version:
self.end_state["version"] = self.vrrp_global_info["version"]
if self.recover_delay:
self.end_state["recover_delay"] = self.vrrp_global_info[
"recoverDelay"]
if self.virtual_ip:
if self.virtual_ip_info:
self.end_state["interface"] = self.interface
self.end_state["vrid"] = self.vrid
self.end_state["virtual_ip_info"] = self.virtual_ip_info[
"vrrpVirtualIpInfos"]
if self.vrrp_group_info:
self.end_state["interface"] = self.vrrp_group_info["ifName"]
self.end_state["vrid"] = self.vrrp_group_info["vrrpId"]
self.end_state["vrrp_type"] = self.vrrp_group_info["vrrpType"]
if self.vrrp_type == "admin":
self.end_state["admin_ignore_if_down"] = self.vrrp_group_info[
"authenticationMode"]
if self.admin_vrid and self.admin_interface:
self.existing["admin_vrid"] = self.vrrp_group_info[
"adminVrrpId"]
self.end_state["admin_interface"] = self.vrrp_group_info[
"adminIfName"]
self.end_state["admin_flowdown"] = self.vrrp_group_info[
"unflowdown"]
if self.priority:
self.end_state["priority"] = self.vrrp_group_info["priority"]
if self.advertise_interval:
self.end_state["advertise_interval"] = self.vrrp_group_info[
"advertiseInterval"]
if self.preempt_timer_delay:
self.end_state["preempt_timer_delay"] = self.vrrp_group_info[
"delayTime"]
if self.holding_multiplier:
self.end_state["holding_multiplier"] = self.vrrp_group_info[
"holdMultiplier"]
if self.fast_resume:
fast_resume_end = "disable"
fast_resume = self.vrrp_group_info["fastResume"]
if fast_resume == "true":
fast_resume_end = "enable"
self.end_state["fast_resume"] = fast_resume_end
if self.auth_mode:
self.end_state["auth_mode"] = self.vrrp_group_info[
"authenticationMode"]
self.end_state["is_plain"] = self.vrrp_group_info["isPlain"]
def work(self):
"""worker"""
self.check_params()
if self.gratuitous_arp_interval or self.version or self.recover_delay:
self.vrrp_global_info = self.get_vrrp_global_info()
if self.interface and self.vrid:
self.virtual_ip_info = self.get_virtual_ip_info()
if self.virtual_ip_info:
self.vrrp_group_info = self.get_vrrp_group_info()
self.get_proposed()
self.get_existing()
if self.gratuitous_arp_interval or self.version or self.recover_delay:
if self.state == "present":
self.set_vrrp_global()
else:
self.delete_vrrp_global()
else:
if not self.interface or not self.vrid:
self.module.fail_json(
msg='Error: interface, vrid must be config at the same time.')
if self.interface and self.vrid:
if self.virtual_ip:
if self.state == "present":
self.create_virtual_ip()
else:
self.delete_virtual_ip()
else:
if not self.vrrp_group_info:
self.module.fail_json(
msg='Error: The VRRP group does not exist.')
if self.admin_ignore_if_down is True:
if self.vrrp_type != "admin":
self.module.fail_json(
msg='Error: vrrpType must be admin when admin_ignore_if_down is true.')
if self.admin_interface or self.admin_vrid:
if self.vrrp_type != "member":
self.module.fail_json(
msg='Error: it binds a VRRP group to an mVRRP group, vrrp_type must be "member".')
if not self.vrrp_type or not self.interface or not self.vrid:
self.module.fail_json(
msg='Error: admin_interface admin_vrid vrrp_type interface vrid must '
'be config at the same time.')
if self.auth_mode == "md5" and self.is_plain is True:
self.module.fail_json(
msg='Error: is_plain can not be True when auth_mode is md5.')
if self.state == "present":
self.set_vrrp_group()
else:
self.delete_vrrp_group()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
""" Module main """
argument_spec = dict(
interface=dict(type='str'),
vrid=dict(type='str'),
virtual_ip=dict(type='str'),
vrrp_type=dict(type='str', choices=['normal', 'member', 'admin']),
admin_ignore_if_down=dict(type='bool', default=False),
admin_vrid=dict(type='str'),
admin_interface=dict(type='str'),
admin_flowdown=dict(type='bool', default=False),
priority=dict(type='str'),
version=dict(type='str', choices=['v2', 'v3']),
advertise_interval=dict(type='str'),
preempt_timer_delay=dict(type='str'),
gratuitous_arp_interval=dict(type='str'),
recover_delay=dict(type='str'),
holding_multiplier=dict(type='str'),
auth_mode=dict(type='str', choices=['simple', 'md5', 'none']),
is_plain=dict(type='bool', default=False),
auth_key=dict(type='str'),
fast_resume=dict(type='str', choices=['enable', 'disable']),
state=dict(type='str', default='present',
choices=['present', 'absent'])
)
argument_spec.update(ce_argument_spec)
module = Vrrp(argument_spec=argument_spec)
module.work()
if __name__ == '__main__':
main()
| gpl-3.0 | -4,655,661,425,479,502,000 | 38.467569 | 120 | 0.54299 | false |
MalkmusT/coala-quickstart | coala_quickstart/interaction/Logo.py | 2 | 2373 | import textwrap
from coala_quickstart.Strings import COALA_BEAR_LOGO, WELCOME_MESSAGES
def print_side_by_side(printer,
left=[],
right=[],
left_color='white',
right_color='blue',
limit=80):
"""
Prints the the given lines side by side. Example usage:
>>> from pyprint.ConsolePrinter import ConsolePrinter
>>> printer = ConsolePrinter()
>>> print_side_by_side(
... printer,
... ["Text content on the left",
... "side of the text."],
... ["Right side should contain",
... "this."],
... left_color=None,
... right_color=None,
... limit=80)
Text content on the left Right side should contain
side of the text. this.
If either side is longer than the other, empty lines will
be added to the shorter side.
:param printer:
A ``ConsolePrinter`` object used for console interaction.
:param left:
The lines for the left portion of the text.
:param right:
The lines for the right portion of the text.
:param left_color:
The color to use for the left side of the text.
:parm right_color:
The color to use for the right side of the text.
:param limit:
The maximum line length limit.
"""
max_left_length = len(max(left, key=len))
for line in range(len(left) - len(right)):
right.append('')
for line in range(len(right) - len(left)):
left.append('')
for left_line, right_line in zip(left, right):
printer.print(left_line, color=left_color, end='')
printer.print(
' ' * (max_left_length - len(left_line) + 1),
end='')
printer.print(right_line, color=right_color)
def print_welcome_message(printer):
"""
Prints the coala bear logo with a welcome message side by side.
:param printer:
A ``ConsolePrinter`` object used for console interaction.
"""
max_length = 80 - len(max(COALA_BEAR_LOGO, key=len))
text_lines = ['']
for welcome_message in WELCOME_MESSAGES:
text_lines += ['']
text_lines += textwrap.wrap(welcome_message, max_length)
print_side_by_side(
printer,
left=COALA_BEAR_LOGO,
right=text_lines,
limit=80)
| agpl-3.0 | 982,964,152,342,786,000 | 30.223684 | 70 | 0.574378 | false |
jimfenton/notif-notifier | clockwatcherd.py | 1 | 5149 | #!/usr/bin/python
# clockwatcherd.py - Daemon to generate test notifs once a minute
#
# Copyright (c) 2015 Jim Fenton
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
__version__="0.1.0"
import sys
import traceback
from datetime import timedelta, datetime, date, time
import time
import daemon
import syslog
import signal
import lockfile
from notify import Signer, Notification
def clockwatcher_main():
syslog.syslog("clockwatcherd: starting clockwatcher_main")
lifetime = timedelta(days=1) #Notif expiration delta
s = Signer("/etc/clockwatcher/shiny.private", "shiny")
addrlist=[]
updates={}
with open("/etc/clockwatcher/clockwatcherd.cfg","r") as cfg:
for line in cfg:
addrlist.append(line[:-1]) #remembering to remove trailing \n
while 1:
# Synchronize to next whole minute
starttime = time.localtime()
time.sleep(60-starttime.tm_sec)
currtime = datetime.now()+ timedelta(seconds=30) # Force rounding in case we're early
timemsg = currtime.strftime("It is now %H:%M")
notif = Notification(4, lifetime, timemsg, timemsg + " and all is well") # Need to add expiration here
notif.prepare(s)
# For now, minimizing the possibility of a collision between this daemon and new authorizations coming in
# by reading the additional authorizations from a separate file and adding them on here. Only the daemon
# touches the main clockwatcherd.cfg file.
rewrite = False
try:
with open("/etc/clockwatcher/newwatchers.cfg","r") as cfg:
for line in cfg:
newaddr = line
if newaddr not in addrlist: #Handle unlikely duplicates
addrlist.append(newaddr)
rewrite = True
except IOError:
pass
except:
syslog.syslog("clockwatcherd: Unknown error opening newwatchers file")
quit()
if rewrite:
cfg=open("/etc/clockwatcher/newwatchers.cfg","w") #Clobber newwatchers file
cfg.close()
with open("/etc/clockwatcher/clockwatcherd.cfg","w") as cfg: #Update config with new watchers
for idx in range(len(addrlist)):
if addrlist[idx] != "":
cfg.write(addrlist[idx])
cfg.write("\n")
rewrite = False
for idx in range(len(addrlist)):
notaddr = addrlist[idx]
if notaddr == "":
continue
if notaddr in updates: #update an existing notif if possible
notid = updates[notaddr]
status = notif.update(notid)
if status == 404: #if 404 delete notid from updates
del updates[notaddr]
if notaddr not in updates: #not an else because it could have just been removed
# TODO: Handle exceptions (can't connect, etc.) here
(notid, status) = notif.send(notaddr) #Need to get feedback on send failures, delete notaddr
if status == 404:
addrlist[idx]="" #Don't delete entry from addrlist inside loop, just blank it
rewrite = True #Disk copy of list needs updating
elif status == 200:
updates[notaddr] = notid
if rewrite: #Update disk copy of list, removing any blank addresses
with open("/etc/clockwatcher/clockwatcherd.cfg","w") as cfg:
for idx in range(len(addrlist)):
if addrlist[idx] != "":
cfg.write(addrlist[idx])
cfg.write("\n")
def program_cleanup():
conn.close()
syslog.syslog("clockwatcherd: exiting on signal")
quit()
# Uncomment next 2 lines for non-daemon testing
#clockwatcher_main()
#quit()
context = daemon.DaemonContext(
pidfile=lockfile.FileLock('/var/run/clockwatcherd.pid'),
)
context.signal_map = {
signal.SIGHUP: program_cleanup,
}
with context:
clockwatcher_main()
| mit | 2,108,621,448,660,416,500 | 36.043165 | 110 | 0.629637 | false |
gitpython-developers/GitPython | git/repo/base.py | 1 | 49533 | # repo.py
# Copyright (C) 2008, 2009 Michael Trier ([email protected]) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
import logging
import os
import re
import warnings
from git.cmd import (
Git,
handle_process_output
)
from git.compat import (
defenc,
safe_decode,
is_win,
)
from git.config import GitConfigParser
from git.db import GitCmdObjectDB
from git.exc import InvalidGitRepositoryError, NoSuchPathError, GitCommandError
from git.index import IndexFile
from git.objects import Submodule, RootModule, Commit
from git.refs import HEAD, Head, Reference, TagReference
from git.remote import Remote, add_progress, to_progress_instance
from git.util import Actor, finalize_process, decygpath, hex_to_bin, expand_path, remove_password_if_present
import os.path as osp
from .fun import rev_parse, is_git_dir, find_submodule_git_dir, touch, find_worktree_git_dir
import gc
import gitdb
# typing ------------------------------------------------------
from git.types import TBD, PathLike
from typing_extensions import Literal
from typing import (Any, BinaryIO, Callable, Dict,
Iterator, List, Mapping, Optional,
TextIO, Tuple, Type, Union,
NamedTuple, cast, TYPE_CHECKING)
if TYPE_CHECKING: # only needed for types
from git.util import IterableList
from git.refs.symbolic import SymbolicReference
from git.objects import TagObject, Blob, Tree # NOQA: F401
Lit_config_levels = Literal['system', 'global', 'user', 'repository']
# -----------------------------------------------------------
log = logging.getLogger(__name__)
__all__ = ('Repo',)
BlameEntry = NamedTuple('BlameEntry', [
('commit', Dict[str, TBD]),
('linenos', range),
('orig_path', Optional[str]),
('orig_linenos', range)]
)
class Repo(object):
"""Represents a git repository and allows you to query references,
gather commit information, generate diffs, create and clone repositories query
the log.
The following attributes are worth using:
'working_dir' is the working directory of the git command, which is the working tree
directory if available or the .git directory in case of bare repositories
'working_tree_dir' is the working tree directory, but will raise AssertionError
if we are a bare repository.
'git_dir' is the .git repository directory, which is always set."""
DAEMON_EXPORT_FILE = 'git-daemon-export-ok'
git = cast('Git', None) # Must exist, or __del__ will fail in case we raise on `__init__()`
working_dir = None # type: Optional[PathLike]
_working_tree_dir = None # type: Optional[PathLike]
git_dir = None # type: Optional[PathLike]
_common_dir = None # type: Optional[PathLike]
# precompiled regex
re_whitespace = re.compile(r'\s+')
re_hexsha_only = re.compile('^[0-9A-Fa-f]{40}$')
re_hexsha_shortened = re.compile('^[0-9A-Fa-f]{4,40}$')
re_envvars = re.compile(r'(\$(\{\s?)?[a-zA-Z_]\w*(\}\s?)?|%\s?[a-zA-Z_]\w*\s?%)')
re_author_committer_start = re.compile(r'^(author|committer)')
re_tab_full_line = re.compile(r'^\t(.*)$')
# invariants
# represents the configuration level of a configuration file
config_level = ("system", "user", "global", "repository") # type: Tuple[Lit_config_levels, ...]
# Subclass configuration
# Subclasses may easily bring in their own custom types by placing a constructor or type here
GitCommandWrapperType = Git
def __init__(self, path: Optional[PathLike] = None, odbt: Type[GitCmdObjectDB] = GitCmdObjectDB,
search_parent_directories: bool = False, expand_vars: bool = True) -> None:
"""Create a new Repo instance
:param path:
the path to either the root git directory or the bare git repo::
repo = Repo("/Users/mtrier/Development/git-python")
repo = Repo("/Users/mtrier/Development/git-python.git")
repo = Repo("~/Development/git-python.git")
repo = Repo("$REPOSITORIES/Development/git-python.git")
repo = Repo("C:\\Users\\mtrier\\Development\\git-python\\.git")
- In *Cygwin*, path may be a `'cygdrive/...'` prefixed path.
- If it evaluates to false, :envvar:`GIT_DIR` is used, and if this also evals to false,
the current-directory is used.
:param odbt:
Object DataBase type - a type which is constructed by providing
the directory containing the database objects, i.e. .git/objects. It will
be used to access all object data
:param search_parent_directories:
if True, all parent directories will be searched for a valid repo as well.
Please note that this was the default behaviour in older versions of GitPython,
which is considered a bug though.
:raise InvalidGitRepositoryError:
:raise NoSuchPathError:
:return: git.Repo """
epath = path or os.getenv('GIT_DIR')
if not epath:
epath = os.getcwd()
if Git.is_cygwin():
epath = decygpath(epath)
epath = epath or path or os.getcwd()
if not isinstance(epath, str):
epath = str(epath)
if expand_vars and re.search(self.re_envvars, epath):
warnings.warn("The use of environment variables in paths is deprecated" +
"\nfor security reasons and may be removed in the future!!")
epath = expand_path(epath, expand_vars)
if epath is not None:
if not os.path.exists(epath):
raise NoSuchPathError(epath)
## Walk up the path to find the `.git` dir.
#
curpath = epath
while curpath:
# ABOUT osp.NORMPATH
# It's important to normalize the paths, as submodules will otherwise initialize their
# repo instances with paths that depend on path-portions that will not exist after being
# removed. It's just cleaner.
if is_git_dir(curpath):
self.git_dir = curpath
# from man git-config : core.worktree
# Set the path to the root of the working tree. If GIT_COMMON_DIR environment
# variable is set, core.worktree is ignored and not used for determining the
# root of working tree. This can be overridden by the GIT_WORK_TREE environment
# variable. The value can be an absolute path or relative to the path to the .git
# directory, which is either specified by GIT_DIR, or automatically discovered.
# If GIT_DIR is specified but none of GIT_WORK_TREE and core.worktree is specified,
# the current working directory is regarded as the top level of your working tree.
self._working_tree_dir = os.path.dirname(self.git_dir)
if os.environ.get('GIT_COMMON_DIR') is None:
gitconf = self.config_reader("repository")
if gitconf.has_option('core', 'worktree'):
self._working_tree_dir = gitconf.get('core', 'worktree')
if 'GIT_WORK_TREE' in os.environ:
self._working_tree_dir = os.getenv('GIT_WORK_TREE')
break
dotgit = osp.join(curpath, '.git')
sm_gitpath = find_submodule_git_dir(dotgit)
if sm_gitpath is not None:
self.git_dir = osp.normpath(sm_gitpath)
sm_gitpath = find_submodule_git_dir(dotgit)
if sm_gitpath is None:
sm_gitpath = find_worktree_git_dir(dotgit)
if sm_gitpath is not None:
self.git_dir = expand_path(sm_gitpath, expand_vars)
self._working_tree_dir = curpath
break
if not search_parent_directories:
break
curpath, tail = osp.split(curpath)
if not tail:
break
# END while curpath
if self.git_dir is None:
self.git_dir = cast(PathLike, self.git_dir)
raise InvalidGitRepositoryError(epath)
self._bare = False
try:
self._bare = self.config_reader("repository").getboolean('core', 'bare')
except Exception:
# lets not assume the option exists, although it should
pass
try:
common_dir = open(osp.join(self.git_dir, 'commondir'), 'rt').readlines()[0].strip()
self._common_dir = osp.join(self.git_dir, common_dir)
except OSError:
self._common_dir = None
# adjust the wd in case we are actually bare - we didn't know that
# in the first place
if self._bare:
self._working_tree_dir = None
# END working dir handling
self.working_dir = self._working_tree_dir or self.common_dir # type: Optional[PathLike]
self.git = self.GitCommandWrapperType(self.working_dir)
# special handling, in special times
rootpath = osp.join(self.common_dir, 'objects')
if issubclass(odbt, GitCmdObjectDB):
self.odb = odbt(rootpath, self.git)
else:
self.odb = odbt(rootpath)
def __enter__(self) -> 'Repo':
return self
def __exit__(self, exc_type: TBD, exc_value: TBD, traceback: TBD) -> None:
self.close()
def __del__(self) -> None:
try:
self.close()
except Exception:
pass
def close(self) -> None:
if self.git:
self.git.clear_cache()
# Tempfiles objects on Windows are holding references to
# open files until they are collected by the garbage
# collector, thus preventing deletion.
# TODO: Find these references and ensure they are closed
# and deleted synchronously rather than forcing a gc
# collection.
if is_win:
gc.collect()
gitdb.util.mman.collect()
if is_win:
gc.collect()
def __eq__(self, rhs: object) -> bool:
if isinstance(rhs, Repo) and self.git_dir:
return self.git_dir == rhs.git_dir
return False
def __ne__(self, rhs: object) -> bool:
return not self.__eq__(rhs)
def __hash__(self) -> int:
return hash(self.git_dir)
# Description property
def _get_description(self) -> str:
if self.git_dir:
filename = osp.join(self.git_dir, 'description')
with open(filename, 'rb') as fp:
return fp.read().rstrip().decode(defenc)
def _set_description(self, descr: str) -> None:
if self.git_dir:
filename = osp.join(self.git_dir, 'description')
with open(filename, 'wb') as fp:
fp.write((descr + '\n').encode(defenc))
description = property(_get_description, _set_description,
doc="the project's description")
del _get_description
del _set_description
@property
def working_tree_dir(self) -> Optional[PathLike]:
""":return: The working tree directory of our git repository. If this is a bare repository, None is returned.
"""
return self._working_tree_dir
@property
def common_dir(self) -> PathLike:
"""
:return: The git dir that holds everything except possibly HEAD,
FETCH_HEAD, ORIG_HEAD, COMMIT_EDITMSG, index, and logs/."""
if self._common_dir:
return self._common_dir
elif self.git_dir:
return self.git_dir
else:
# or could return ""
raise InvalidGitRepositoryError()
@property
def bare(self) -> bool:
""":return: True if the repository is bare"""
return self._bare
@property
def heads(self) -> 'IterableList':
"""A list of ``Head`` objects representing the branch heads in
this repo
:return: ``git.IterableList(Head, ...)``"""
return Head.list_items(self)
@property
def references(self) -> 'IterableList':
"""A list of Reference objects representing tags, heads and remote references.
:return: IterableList(Reference, ...)"""
return Reference.list_items(self)
# alias for references
refs = references
# alias for heads
branches = heads
@property
def index(self) -> 'IndexFile':
""":return: IndexFile representing this repository's index.
:note: This property can be expensive, as the returned ``IndexFile`` will be
reinitialized. It's recommended to re-use the object."""
return IndexFile(self)
@property
def head(self) -> 'HEAD':
""":return: HEAD Object pointing to the current head reference"""
return HEAD(self, 'HEAD')
@property
def remotes(self) -> 'IterableList':
"""A list of Remote objects allowing to access and manipulate remotes
:return: ``git.IterableList(Remote, ...)``"""
return Remote.list_items(self)
def remote(self, name: str = 'origin') -> 'Remote':
""":return: Remote with the specified name
:raise ValueError: if no remote with such a name exists"""
r = Remote(self, name)
if not r.exists():
raise ValueError("Remote named '%s' didn't exist" % name)
return r
#{ Submodules
@property
def submodules(self) -> 'IterableList':
"""
:return: git.IterableList(Submodule, ...) of direct submodules
available from the current head"""
return Submodule.list_items(self)
def submodule(self, name: str) -> 'IterableList':
""" :return: Submodule with the given name
:raise ValueError: If no such submodule exists"""
try:
return self.submodules[name]
except IndexError as e:
raise ValueError("Didn't find submodule named %r" % name) from e
# END exception handling
def create_submodule(self, *args: Any, **kwargs: Any) -> Submodule:
"""Create a new submodule
:note: See the documentation of Submodule.add for a description of the
applicable parameters
:return: created submodules"""
return Submodule.add(self, *args, **kwargs)
def iter_submodules(self, *args: Any, **kwargs: Any) -> Iterator:
"""An iterator yielding Submodule instances, see Traversable interface
for a description of args and kwargs
:return: Iterator"""
return RootModule(self).traverse(*args, **kwargs)
def submodule_update(self, *args: Any, **kwargs: Any) -> Iterator:
"""Update the submodules, keeping the repository consistent as it will
take the previous state into consideration. For more information, please
see the documentation of RootModule.update"""
return RootModule(self).update(*args, **kwargs)
#}END submodules
@property
def tags(self) -> 'IterableList':
"""A list of ``Tag`` objects that are available in this repo
:return: ``git.IterableList(TagReference, ...)`` """
return TagReference.list_items(self)
def tag(self, path: PathLike) -> TagReference:
""":return: TagReference Object, reference pointing to a Commit or Tag
:param path: path to the tag reference, i.e. 0.1.5 or tags/0.1.5 """
return TagReference(self, path)
def create_head(self, path: PathLike, commit: str = 'HEAD',
force: bool = False, logmsg: Optional[str] = None
) -> 'SymbolicReference':
"""Create a new head within the repository.
For more documentation, please see the Head.create method.
:return: newly created Head Reference"""
return Head.create(self, path, commit, force, logmsg)
def delete_head(self, *heads: 'SymbolicReference', **kwargs: Any) -> None:
"""Delete the given heads
:param kwargs: Additional keyword arguments to be passed to git-branch"""
return Head.delete(self, *heads, **kwargs)
def create_tag(self, path: PathLike, ref: str = 'HEAD',
message: Optional[str] = None, force: bool = False, **kwargs: Any
) -> TagReference:
"""Create a new tag reference.
For more documentation, please see the TagReference.create method.
:return: TagReference object """
return TagReference.create(self, path, ref, message, force, **kwargs)
def delete_tag(self, *tags: TBD) -> None:
"""Delete the given tag references"""
return TagReference.delete(self, *tags)
def create_remote(self, name: str, url: PathLike, **kwargs: Any) -> Remote:
"""Create a new remote.
For more information, please see the documentation of the Remote.create
methods
:return: Remote reference"""
return Remote.create(self, name, url, **kwargs)
def delete_remote(self, remote: 'Remote') -> Type['Remote']:
"""Delete the given remote."""
return Remote.remove(self, remote)
def _get_config_path(self, config_level: Lit_config_levels) -> str:
# we do not support an absolute path of the gitconfig on windows ,
# use the global config instead
if is_win and config_level == "system":
config_level = "global"
if config_level == "system":
return "/etc/gitconfig"
elif config_level == "user":
config_home = os.environ.get("XDG_CONFIG_HOME") or osp.join(os.environ.get("HOME", '~'), ".config")
return osp.normpath(osp.expanduser(osp.join(config_home, "git", "config")))
elif config_level == "global":
return osp.normpath(osp.expanduser("~/.gitconfig"))
elif config_level == "repository":
repo_dir = self._common_dir or self.git_dir
if not repo_dir:
raise NotADirectoryError
else:
return osp.normpath(osp.join(repo_dir, "config"))
raise ValueError("Invalid configuration level: %r" % config_level)
def config_reader(self, config_level: Optional[Lit_config_levels] = None) -> GitConfigParser:
"""
:return:
GitConfigParser allowing to read the full git configuration, but not to write it
The configuration will include values from the system, user and repository
configuration files.
:param config_level:
For possible values, see config_writer method
If None, all applicable levels will be used. Specify a level in case
you know which file you wish to read to prevent reading multiple files.
:note: On windows, system configuration cannot currently be read as the path is
unknown, instead the global path will be used."""
files = None
if config_level is None:
files = [self._get_config_path(f) for f in self.config_level]
else:
files = [self._get_config_path(config_level)]
return GitConfigParser(files, read_only=True, repo=self)
def config_writer(self, config_level: Lit_config_levels = "repository") -> GitConfigParser:
"""
:return:
GitConfigParser allowing to write values of the specified configuration file level.
Config writers should be retrieved, used to change the configuration, and written
right away as they will lock the configuration file in question and prevent other's
to write it.
:param config_level:
One of the following values
system = system wide configuration file
global = user level configuration file
repository = configuration file for this repository only"""
return GitConfigParser(self._get_config_path(config_level), read_only=False, repo=self)
def commit(self, rev: Optional[TBD] = None
) -> Union['SymbolicReference', Commit, 'TagObject', 'Blob', 'Tree']:
"""The Commit object for the specified revision
:param rev: revision specifier, see git-rev-parse for viable options.
:return: ``git.Commit``
"""
if rev is None:
return self.head.commit
return self.rev_parse(str(rev) + "^0")
def iter_trees(self, *args: Any, **kwargs: Any) -> Iterator['Tree']:
""":return: Iterator yielding Tree objects
:note: Takes all arguments known to iter_commits method"""
return (c.tree for c in self.iter_commits(*args, **kwargs))
def tree(self, rev: Union['Commit', 'Tree', None] = None) -> 'Tree':
"""The Tree object for the given treeish revision
Examples::
repo.tree(repo.heads[0])
:param rev: is a revision pointing to a Treeish ( being a commit or tree )
:return: ``git.Tree``
:note:
If you need a non-root level tree, find it by iterating the root tree. Otherwise
it cannot know about its path relative to the repository root and subsequent
operations might have unexpected results."""
if rev is None:
return self.head.commit.tree
return self.rev_parse(str(rev) + "^{tree}")
def iter_commits(self, rev: Optional[TBD] = None, paths: Union[PathLike, List[PathLike]] = '',
**kwargs: Any) -> Iterator[Commit]:
"""A list of Commit objects representing the history of a given ref/commit
:param rev:
revision specifier, see git-rev-parse for viable options.
If None, the active branch will be used.
:param paths:
is an optional path or a list of paths to limit the returned commits to
Commits that do not contain that path or the paths will not be returned.
:param kwargs:
Arguments to be passed to git-rev-list - common ones are
max_count and skip
:note: to receive only commits between two named revisions, use the
"revA...revB" revision specifier
:return: ``git.Commit[]``"""
if rev is None:
rev = self.head.commit
return Commit.iter_items(self, rev, paths, **kwargs)
def merge_base(self, *rev: TBD, **kwargs: Any
) -> List[Union['SymbolicReference', Commit, 'TagObject', 'Blob', 'Tree', None]]:
"""Find the closest common ancestor for the given revision (e.g. Commits, Tags, References, etc)
:param rev: At least two revs to find the common ancestor for.
:param kwargs: Additional arguments to be passed to the repo.git.merge_base() command which does all the work.
:return: A list of Commit objects. If --all was not specified as kwarg, the list will have at max one Commit,
or is empty if no common merge base exists.
:raises ValueError: If not at least two revs are provided
"""
if len(rev) < 2:
raise ValueError("Please specify at least two revs, got only %i" % len(rev))
# end handle input
res = [] # type: List[Union['SymbolicReference', Commit, 'TagObject', 'Blob', 'Tree', None]]
try:
lines = self.git.merge_base(*rev, **kwargs).splitlines() # List[str]
except GitCommandError as err:
if err.status == 128:
raise
# end handle invalid rev
# Status code 1 is returned if there is no merge-base
# (see https://github.com/git/git/blob/master/builtin/merge-base.c#L16)
return res
# end exception handling
for line in lines:
res.append(self.commit(line))
# end for each merge-base
return res
def is_ancestor(self, ancestor_rev: 'Commit', rev: 'Commit') -> bool:
"""Check if a commit is an ancestor of another
:param ancestor_rev: Rev which should be an ancestor
:param rev: Rev to test against ancestor_rev
:return: ``True``, ancestor_rev is an ancestor to rev.
"""
try:
self.git.merge_base(ancestor_rev, rev, is_ancestor=True)
except GitCommandError as err:
if err.status == 1:
return False
raise
return True
def _get_daemon_export(self) -> bool:
if self.git_dir:
filename = osp.join(self.git_dir, self.DAEMON_EXPORT_FILE)
return osp.exists(filename)
def _set_daemon_export(self, value: object) -> None:
if self.git_dir:
filename = osp.join(self.git_dir, self.DAEMON_EXPORT_FILE)
fileexists = osp.exists(filename)
if value and not fileexists:
touch(filename)
elif not value and fileexists:
os.unlink(filename)
daemon_export = property(_get_daemon_export, _set_daemon_export,
doc="If True, git-daemon may export this repository")
del _get_daemon_export
del _set_daemon_export
def _get_alternates(self) -> List[str]:
"""The list of alternates for this repo from which objects can be retrieved
:return: list of strings being pathnames of alternates"""
if self.git_dir:
alternates_path = osp.join(self.git_dir, 'objects', 'info', 'alternates')
if osp.exists(alternates_path):
with open(alternates_path, 'rb') as f:
alts = f.read().decode(defenc)
return alts.strip().splitlines()
return []
def _set_alternates(self, alts: List[str]) -> None:
"""Sets the alternates
:param alts:
is the array of string paths representing the alternates at which
git should look for objects, i.e. /home/user/repo/.git/objects
:raise NoSuchPathError:
:note:
The method does not check for the existence of the paths in alts
as the caller is responsible."""
alternates_path = osp.join(self.common_dir, 'objects', 'info', 'alternates')
if not alts:
if osp.isfile(alternates_path):
os.remove(alternates_path)
else:
with open(alternates_path, 'wb') as f:
f.write("\n".join(alts).encode(defenc))
alternates = property(_get_alternates, _set_alternates,
doc="Retrieve a list of alternates paths or set a list paths to be used as alternates")
def is_dirty(self, index: bool = True, working_tree: bool = True, untracked_files: bool = False,
submodules: bool = True, path: Optional[PathLike] = None) -> bool:
"""
:return:
``True``, the repository is considered dirty. By default it will react
like a git-status without untracked files, hence it is dirty if the
index or the working copy have changes."""
if self._bare:
# Bare repositories with no associated working directory are
# always consired to be clean.
return False
# start from the one which is fastest to evaluate
default_args = ['--abbrev=40', '--full-index', '--raw']
if not submodules:
default_args.append('--ignore-submodules')
if path:
default_args.extend(["--", str(path)])
if index:
# diff index against HEAD
if osp.isfile(self.index.path) and \
len(self.git.diff('--cached', *default_args)):
return True
# END index handling
if working_tree:
# diff index against working tree
if len(self.git.diff(*default_args)):
return True
# END working tree handling
if untracked_files:
if len(self._get_untracked_files(path, ignore_submodules=not submodules)):
return True
# END untracked files
return False
@property
def untracked_files(self) -> List[str]:
"""
:return:
list(str,...)
Files currently untracked as they have not been staged yet. Paths
are relative to the current working directory of the git command.
:note:
ignored files will not appear here, i.e. files mentioned in .gitignore
:note:
This property is expensive, as no cache is involved. To process the result, please
consider caching it yourself."""
return self._get_untracked_files()
def _get_untracked_files(self, *args: Any, **kwargs: Any) -> List[str]:
# make sure we get all files, not only untracked directories
proc = self.git.status(*args,
porcelain=True,
untracked_files=True,
as_process=True,
**kwargs)
# Untracked files preffix in porcelain mode
prefix = "?? "
untracked_files = []
for line in proc.stdout:
line = line.decode(defenc)
if not line.startswith(prefix):
continue
filename = line[len(prefix):].rstrip('\n')
# Special characters are escaped
if filename[0] == filename[-1] == '"':
filename = filename[1:-1]
# WHATEVER ... it's a mess, but works for me
filename = filename.encode('ascii').decode('unicode_escape').encode('latin1').decode(defenc)
untracked_files.append(filename)
finalize_process(proc)
return untracked_files
def ignored(self, *paths: PathLike) -> List[PathLike]:
"""Checks if paths are ignored via .gitignore
Doing so using the "git check-ignore" method.
:param paths: List of paths to check whether they are ignored or not
:return: subset of those paths which are ignored
"""
try:
proc = self.git.check_ignore(*paths)
except GitCommandError:
return []
return proc.replace("\\\\", "\\").replace('"', "").split("\n")
@property
def active_branch(self) -> 'SymbolicReference':
"""The name of the currently active branch.
:return: Head to the active branch"""
return self.head.reference
def blame_incremental(self, rev: TBD, file: TBD, **kwargs: Any) -> Optional[Iterator['BlameEntry']]:
"""Iterator for blame information for the given file at the given revision.
Unlike .blame(), this does not return the actual file's contents, only
a stream of BlameEntry tuples.
:param rev: revision specifier, see git-rev-parse for viable options.
:return: lazy iterator of BlameEntry tuples, where the commit
indicates the commit to blame for the line, and range
indicates a span of line numbers in the resulting file.
If you combine all line number ranges outputted by this command, you
should get a continuous range spanning all line numbers in the file.
"""
data = self.git.blame(rev, '--', file, p=True, incremental=True, stdout_as_string=False, **kwargs)
commits = {} # type: Dict[str, TBD]
stream = (line for line in data.split(b'\n') if line)
while True:
try:
line = next(stream) # when exhausted, causes a StopIteration, terminating this function
except StopIteration:
return
split_line = line.split() # type: Tuple[str, str, str, str]
hexsha, orig_lineno_str, lineno_str, num_lines_str = split_line
lineno = int(lineno_str)
num_lines = int(num_lines_str)
orig_lineno = int(orig_lineno_str)
if hexsha not in commits:
# Now read the next few lines and build up a dict of properties
# for this commit
props = {}
while True:
try:
line = next(stream)
except StopIteration:
return
if line == b'boundary':
# "boundary" indicates a root commit and occurs
# instead of the "previous" tag
continue
tag, value = line.split(b' ', 1)
props[tag] = value
if tag == b'filename':
# "filename" formally terminates the entry for --incremental
orig_filename = value
break
c = Commit(self, hex_to_bin(hexsha),
author=Actor(safe_decode(props[b'author']),
safe_decode(props[b'author-mail'].lstrip(b'<').rstrip(b'>'))),
authored_date=int(props[b'author-time']),
committer=Actor(safe_decode(props[b'committer']),
safe_decode(props[b'committer-mail'].lstrip(b'<').rstrip(b'>'))),
committed_date=int(props[b'committer-time']))
commits[hexsha] = c
else:
# Discard all lines until we find "filename" which is
# guaranteed to be the last line
while True:
try:
line = next(stream) # will fail if we reach the EOF unexpectedly
except StopIteration:
return
tag, value = line.split(b' ', 1)
if tag == b'filename':
orig_filename = value
break
yield BlameEntry(commits[hexsha],
range(lineno, lineno + num_lines),
safe_decode(orig_filename),
range(orig_lineno, orig_lineno + num_lines))
def blame(self, rev: TBD, file: TBD, incremental: bool = False, **kwargs: Any
) -> Union[List[List[Union[Optional['Commit'], List[str]]]], Optional[Iterator[BlameEntry]]]:
"""The blame information for the given file at the given revision.
:param rev: revision specifier, see git-rev-parse for viable options.
:return:
list: [git.Commit, list: [<line>]]
A list of lists associating a Commit object with a list of lines that
changed within the given commit. The Commit objects will be given in order
of appearance."""
if incremental:
return self.blame_incremental(rev, file, **kwargs)
data = self.git.blame(rev, '--', file, p=True, stdout_as_string=False, **kwargs)
commits = {} # type: Dict[str, Any]
blames = [] # type: List[List[Union[Optional['Commit'], List[str]]]]
info = {} # type: Dict[str, Any] # use Any until TypedDict available
keepends = True
for line in data.splitlines(keepends):
try:
line = line.rstrip().decode(defenc)
except UnicodeDecodeError:
firstpart = ''
is_binary = True
else:
# As we don't have an idea when the binary data ends, as it could contain multiple newlines
# in the process. So we rely on being able to decode to tell us what is is.
# This can absolutely fail even on text files, but even if it does, we should be fine treating it
# as binary instead
parts = self.re_whitespace.split(line, 1)
firstpart = parts[0]
is_binary = False
# end handle decode of line
if self.re_hexsha_only.search(firstpart):
# handles
# 634396b2f541a9f2d58b00be1a07f0c358b999b3 1 1 7 - indicates blame-data start
# 634396b2f541a9f2d58b00be1a07f0c358b999b3 2 2 - indicates
# another line of blame with the same data
digits = parts[-1].split(" ")
if len(digits) == 3:
info = {'id': firstpart}
blames.append([None, []])
elif info['id'] != firstpart:
info = {'id': firstpart}
blames.append([commits.get(firstpart), []])
# END blame data initialization
else:
m = self.re_author_committer_start.search(firstpart)
if m:
# handles:
# author Tom Preston-Werner
# author-mail <[email protected]>
# author-time 1192271832
# author-tz -0700
# committer Tom Preston-Werner
# committer-mail <[email protected]>
# committer-time 1192271832
# committer-tz -0700 - IGNORED BY US
role = m.group(0)
if firstpart.endswith('-mail'):
info["%s_email" % role] = parts[-1]
elif firstpart.endswith('-time'):
info["%s_date" % role] = int(parts[-1])
elif role == firstpart:
info[role] = parts[-1]
# END distinguish mail,time,name
else:
# handle
# filename lib/grit.rb
# summary add Blob
# <and rest>
if firstpart.startswith('filename'):
info['filename'] = parts[-1]
elif firstpart.startswith('summary'):
info['summary'] = parts[-1]
elif firstpart == '':
if info:
sha = info['id']
c = commits.get(sha)
if c is None:
c = Commit(self, hex_to_bin(sha),
author=Actor._from_string(info['author'] + ' ' + info['author_email']),
authored_date=info['author_date'],
committer=Actor._from_string(
info['committer'] + ' ' + info['committer_email']),
committed_date=info['committer_date'])
commits[sha] = c
# END if commit objects needs initial creation
if not is_binary:
if line and line[0] == '\t':
line = line[1:]
else:
# NOTE: We are actually parsing lines out of binary data, which can lead to the
# binary being split up along the newline separator. We will append this to the blame
# we are currently looking at, even though it should be concatenated with the last line
# we have seen.
pass
# end handle line contents
blames[-1][0] = c
if blames[-1][1] is not None:
blames[-1][1].append(line)
info = {'id': sha}
# END if we collected commit info
# END distinguish filename,summary,rest
# END distinguish author|committer vs filename,summary,rest
# END distinguish hexsha vs other information
return blames
@classmethod
def init(cls, path: PathLike = None, mkdir: bool = True, odbt: Type[GitCmdObjectDB] = GitCmdObjectDB,
expand_vars: bool = True, **kwargs: Any) -> 'Repo':
"""Initialize a git repository at the given path if specified
:param path:
is the full path to the repo (traditionally ends with /<name>.git)
or None in which case the repository will be created in the current
working directory
:param mkdir:
if specified will create the repository directory if it doesn't
already exists. Creates the directory with a mode=0755.
Only effective if a path is explicitly given
:param odbt:
Object DataBase type - a type which is constructed by providing
the directory containing the database objects, i.e. .git/objects.
It will be used to access all object data
:param expand_vars:
if specified, environment variables will not be escaped. This
can lead to information disclosure, allowing attackers to
access the contents of environment variables
:param kwargs:
keyword arguments serving as additional options to the git-init command
:return: ``git.Repo`` (the newly created repo)"""
if path:
path = expand_path(path, expand_vars)
if mkdir and path and not osp.exists(path):
os.makedirs(path, 0o755)
# git command automatically chdir into the directory
git = Git(path)
git.init(**kwargs)
return cls(path, odbt=odbt)
@classmethod
def _clone(cls, git: 'Git', url: PathLike, path: PathLike, odb_default_type: Type[GitCmdObjectDB],
progress: Optional[Callable], multi_options: Optional[List[str]] = None, **kwargs: Any
) -> 'Repo':
progress_checked = to_progress_instance(progress)
odbt = kwargs.pop('odbt', odb_default_type)
# when pathlib.Path or other classbased path is passed
if not isinstance(path, str):
path = str(path)
## A bug win cygwin's Git, when `--bare` or `--separate-git-dir`
# it prepends the cwd or(?) the `url` into the `path, so::
# git clone --bare /cygwin/d/foo.git C:\\Work
# becomes::
# git clone --bare /cygwin/d/foo.git /cygwin/d/C:\\Work
#
clone_path = (Git.polish_url(path)
if Git.is_cygwin() and 'bare' in kwargs
else path)
sep_dir = kwargs.get('separate_git_dir')
if sep_dir:
kwargs['separate_git_dir'] = Git.polish_url(sep_dir)
multi = None
if multi_options:
multi = ' '.join(multi_options).split(' ')
proc = git.clone(multi, Git.polish_url(url), clone_path, with_extended_output=True, as_process=True,
v=True, universal_newlines=True, **add_progress(kwargs, git, progress_checked))
if progress_checked:
handle_process_output(proc, None, progress_checked.new_message_handler(),
finalize_process, decode_streams=False)
else:
(stdout, stderr) = proc.communicate()
cmdline = getattr(proc, 'args', '')
cmdline = remove_password_if_present(cmdline)
log.debug("Cmd(%s)'s unused stdout: %s", cmdline, stdout)
finalize_process(proc, stderr=stderr)
# our git command could have a different working dir than our actual
# environment, hence we prepend its working dir if required
if not osp.isabs(path):
path = osp.join(git._working_dir, path) if git._working_dir is not None else path
repo = cls(path, odbt=odbt)
# retain env values that were passed to _clone()
repo.git.update_environment(**git.environment())
# adjust remotes - there may be operating systems which use backslashes,
# These might be given as initial paths, but when handling the config file
# that contains the remote from which we were clones, git stops liking it
# as it will escape the backslashes. Hence we undo the escaping just to be
# sure
if repo.remotes:
with repo.remotes[0].config_writer as writer:
writer.set_value('url', Git.polish_url(repo.remotes[0].url))
# END handle remote repo
return repo
def clone(self, path: PathLike, progress: Optional[Callable] = None,
multi_options: Optional[List[str]] = None, **kwargs: Any) -> 'Repo':
"""Create a clone from this repository.
:param path: is the full path of the new repo (traditionally ends with ./<name>.git).
:param progress: See 'git.remote.Remote.push'.
:param multi_options: A list of Clone options that can be provided multiple times. One
option per list item which is passed exactly as specified to clone.
For example ['--config core.filemode=false', '--config core.ignorecase',
'--recurse-submodule=repo1_path', '--recurse-submodule=repo2_path']
:param kwargs:
* odbt = ObjectDatabase Type, allowing to determine the object database
implementation used by the returned Repo instance
* All remaining keyword arguments are given to the git-clone command
:return: ``git.Repo`` (the newly cloned repo)"""
return self._clone(self.git, self.common_dir, path, type(self.odb), progress, multi_options, **kwargs)
@classmethod
def clone_from(cls, url: PathLike, to_path: PathLike, progress: Optional[Callable] = None,
env: Optional[Mapping[str, Any]] = None,
multi_options: Optional[List[str]] = None, **kwargs: Any) -> 'Repo':
"""Create a clone from the given URL
:param url: valid git url, see http://www.kernel.org/pub/software/scm/git/docs/git-clone.html#URLS
:param to_path: Path to which the repository should be cloned to
:param progress: See 'git.remote.Remote.push'.
:param env: Optional dictionary containing the desired environment variables.
Note: Provided variables will be used to update the execution
environment for `git`. If some variable is not specified in `env`
and is defined in `os.environ`, value from `os.environ` will be used.
If you want to unset some variable, consider providing empty string
as its value.
:param multi_options: See ``clone`` method
:param kwargs: see the ``clone`` method
:return: Repo instance pointing to the cloned directory"""
git = Git(os.getcwd())
if env is not None:
git.update_environment(**env)
return cls._clone(git, url, to_path, GitCmdObjectDB, progress, multi_options, **kwargs)
def archive(self, ostream: Union[TextIO, BinaryIO], treeish: Optional[str] = None,
prefix: Optional[str] = None, **kwargs: Any) -> 'Repo':
"""Archive the tree at the given revision.
:param ostream: file compatible stream object to which the archive will be written as bytes
:param treeish: is the treeish name/id, defaults to active branch
:param prefix: is the optional prefix to prepend to each filename in the archive
:param kwargs: Additional arguments passed to git-archive
* Use the 'format' argument to define the kind of format. Use
specialized ostreams to write any format supported by python.
* You may specify the special **path** keyword, which may either be a repository-relative
path to a directory or file to place into the archive, or a list or tuple of multiple paths.
:raise GitCommandError: in case something went wrong
:return: self"""
if treeish is None:
treeish = self.head.commit
if prefix and 'prefix' not in kwargs:
kwargs['prefix'] = prefix
kwargs['output_stream'] = ostream
path = kwargs.pop('path', [])
path = cast(Union[PathLike, List[PathLike], Tuple[PathLike, ...]], path)
if not isinstance(path, (tuple, list)):
path = [path]
# end assure paths is list
self.git.archive(treeish, *path, **kwargs)
return self
def has_separate_working_tree(self) -> bool:
"""
:return: True if our git_dir is not at the root of our working_tree_dir, but a .git file with a
platform agnositic symbolic link. Our git_dir will be wherever the .git file points to
:note: bare repositories will always return False here
"""
if self.bare:
return False
if self.working_tree_dir:
return osp.isfile(osp.join(self.working_tree_dir, '.git'))
else:
return False # or raise Error?
rev_parse = rev_parse
def __repr__(self) -> str:
clazz = self.__class__
return '<%s.%s %r>' % (clazz.__module__, clazz.__name__, self.git_dir)
def currently_rebasing_on(self) -> Union['SymbolicReference', Commit, 'TagObject', 'Blob', 'Tree', None]:
"""
:return: The commit which is currently being replayed while rebasing.
None if we are not currently rebasing.
"""
if self.git_dir:
rebase_head_file = osp.join(self.git_dir, "REBASE_HEAD")
if not osp.isfile(rebase_head_file):
return None
return self.commit(open(rebase_head_file, "rt").readline().strip())
| bsd-3-clause | -8,987,056,800,874,823,000 | 42.18483 | 119 | 0.577978 | false |
anandka/SEWA | project/user/forms.py | 1 | 2007 | # project/user/forms.py
from flask_wtf import Form
from wtforms import TextField, PasswordField
from wtforms.validators import DataRequired, Email, Length, EqualTo
from project.models import User
class LoginForm(Form):
email = TextField('email', validators=[DataRequired()])
password = PasswordField('password', validators=[DataRequired()])
class RegisterForm(Form):
email = TextField(
'email',
validators=[DataRequired(), Email(message=None), Length(min=6, max=40)])
password = PasswordField(
'password',
validators=[DataRequired(), Length(min=6, max=25)]
)
confirm = PasswordField(
'Repeat password',
validators=[
DataRequired(),
EqualTo('password', message='Passwords must match.')
]
)
def validate(self):
initial_validation = super(RegisterForm, self).validate()
if not initial_validation:
return False
user = User.query.filter_by(email=self.email.data).first()
if user:
self.email.errors.append("Email already registered")
return False
return True
class ChangePasswordForm(Form):
password = PasswordField(
'password',
validators=[DataRequired(), Length(min=6, max=25)]
)
confirm = PasswordField(
'Repeat password',
validators=[
DataRequired(),
EqualTo('password', message='Passwords must match.')
]
)
class InputForm(Form):
region = TextField('region', validators=[DataRequired()])
name = TextField('nm', validators=[DataRequired()])
class ProviderForm(Form):
Snm = TextField(
'Snm',
validators=[DataRequired(), Length(min=2, max=40)])
Rnm = TextField(
'Rnm',
validators=[DataRequired(), Length(min=6, max=40)])
Anm = TextField(
'Anm',
validators=[DataRequired(), Length(min=6, max=40)])
Cnm = TextField(
'Cnm',
validators=[DataRequired(), Length(min=3, max=40)])
| mit | -8,264,770,392,331,245,000 | 26.121622 | 80 | 0.623817 | false |
Abhino/GamifiedTodoList | app.py | 1 | 1583 | import logging
import logging.config
import sys
from flask import Flask,render_template
from werkzeug.contrib.fixers import ProxyFix
from datetime import datetime
from apis import api, db
import os
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
DATE_FORMAT="%Y-%m-%d %H:%M:%S"
FORMAT = '%(asctime)s - %(filename)s - %(levelname)s:%(lineno)d: %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,format=FORMAT,datefmt=DATE_FORMAT)
LOG = logging.getLogger('app')
app = Flask(__name__)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
database_url = str(os.environ['DATABASE_URL'])
database_url.replace("postgre", "postgresql")
app.config['SQLALCHEMY_DATABASE_URI'] = database_url
app.logger_name = "flask.app"
api.init_app(app)
db.init_app(app)
@app.after_request
def after_request(response):
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Headers'] = 'Content-Type,Authorization'
response.headers['Access-Control-Allow-Methods'] = 'GET,PUT,POST,DELETE'
response.headers['Last-Modified'] = datetime.now()
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
response.headers['Pragma'] = 'no-cache'
response.headers['Expires'] = '-1'
return response
app.wsgi_app = ProxyFix(app.wsgi_app)
@app.route('/todo')
def index():
LOG.info("Rendering Template")
return render_template('index.html')
#Create schema for database
with app.app_context():
db.create_all()
if __name__ == '__main__':
app.run(debug=True) | mit | -6,565,310,337,275,284,000 | 30.68 | 113 | 0.725205 | false |
python-xlib/python-xlib | Xlib/xobject/drawable.py | 1 | 34416 | # Xlib.xobject.drawable -- drawable objects (window and pixmap)
#
# Copyright (C) 2000 Peter Liljenberg <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
from Xlib import X, Xatom, Xutil
from Xlib.protocol import request, rq
# Other X resource objects
from . import resource
from . import colormap
from . import cursor
from . import fontable
# Inter-client communication conventions
from . import icccm
class Drawable(resource.Resource):
__drawable__ = resource.Resource.__resource__
def get_geometry(self):
return request.GetGeometry(display = self.display,
drawable = self)
def create_pixmap(self, width, height, depth):
pid = self.display.allocate_resource_id()
request.CreatePixmap(display = self.display,
depth = depth,
pid = pid,
drawable = self.id,
width = width,
height = height)
cls = self.display.get_resource_class('pixmap', Pixmap)
return cls(self.display, pid, owner = 1)
def create_gc(self, **keys):
cid = self.display.allocate_resource_id()
request.CreateGC(display = self.display,
cid = cid,
drawable = self.id,
attrs = keys)
cls = self.display.get_resource_class('gc', fontable.GC)
return cls(self.display, cid, owner = 1)
def copy_area(self, gc, src_drawable, src_x, src_y, width, height, dst_x, dst_y, onerror = None):
request.CopyArea(display = self.display,
onerror = onerror,
src_drawable = src_drawable,
dst_drawable = self.id,
gc = gc,
src_x = src_x,
src_y = src_y,
dst_x = dst_x,
dst_y = dst_y,
width = width,
height = height)
def copy_plane(self, gc, src_drawable, src_x, src_y, width, height,
dst_x, dst_y, bit_plane, onerror = None):
request.CopyPlane(display = self.display,
onerror = onerror,
src_drawable = src_drawable,
dst_drawable = self.id,
gc = gc,
src_x = src_x,
src_y = src_y,
dst_x = dst_x,
dst_y = dst_y,
width = width,
height = height,
bit_plane = bit_plane)
def poly_point(self, gc, coord_mode, points, onerror = None):
request.PolyPoint(display = self.display,
onerror = onerror,
coord_mode = coord_mode,
drawable = self.id,
gc = gc,
points = points)
def point(self, gc, x, y, onerror = None):
request.PolyPoint(display = self.display,
onerror = onerror,
coord_mode = X.CoordModeOrigin,
drawable = self.id,
gc = gc,
points = [(x, y)])
def poly_line(self, gc, coord_mode, points, onerror = None):
request.PolyLine(display = self.display,
onerror = onerror,
coord_mode = coord_mode,
drawable = self.id,
gc = gc,
points = points)
def line(self, gc, x1, y1, x2, y2, onerror = None):
request.PolySegment(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
segments = [(x1, y1, x2, y2)])
def poly_segment(self, gc, segments, onerror = None):
request.PolySegment(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
segments = segments)
def poly_rectangle(self, gc, rectangles, onerror = None):
request.PolyRectangle(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
rectangles = rectangles)
def rectangle(self, gc, x, y, width, height, onerror = None):
request.PolyRectangle(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
rectangles = [(x, y, width, height)])
def poly_arc(self, gc, arcs, onerror = None):
request.PolyArc(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
arcs = arcs)
def arc(self, gc, x, y, width, height, angle1, angle2, onerror = None):
request.PolyArc(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
arcs = [(x, y, width, height, angle1, angle2)])
def fill_poly(self, gc, shape, coord_mode, points, onerror = None):
request.FillPoly(display = self.display,
onerror = onerror,
shape = shape,
coord_mode = coord_mode,
drawable = self.id,
gc = gc,
points = points)
def poly_fill_rectangle(self, gc, rectangles, onerror = None):
request.PolyFillRectangle(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
rectangles = rectangles)
def fill_rectangle(self, gc, x, y, width, height, onerror = None):
request.PolyFillRectangle(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
rectangles = [(x, y, width, height)])
def poly_fill_arc(self, gc, arcs, onerror = None):
request.PolyFillArc(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
arcs = arcs)
def fill_arc(self, gc, x, y, width, height, angle1, angle2, onerror = None):
request.PolyFillArc(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
arcs = [(x, y, width, height, angle1, angle2)])
def put_image(self, gc, x, y, width, height, format,
depth, left_pad, data, onerror = None):
request.PutImage(display = self.display,
onerror = onerror,
format = format,
drawable = self.id,
gc = gc,
width = width,
height = height,
dst_x = x,
dst_y = y,
left_pad = left_pad,
depth = depth,
data = data)
# Trivial little method for putting PIL images. Will break on anything
# but depth 1 or 24...
def put_pil_image(self, gc, x, y, image, onerror = None):
width, height = image.size
if image.mode == '1':
format = X.XYBitmap
depth = 1
if self.display.info.bitmap_format_bit_order == 0:
rawmode = '1;R'
else:
rawmode = '1'
pad = self.display.info.bitmap_format_scanline_pad
stride = roundup(width, pad) >> 3
elif image.mode == 'RGB':
format = X.ZPixmap
depth = 24
if self.display.info.image_byte_order == 0:
rawmode = 'BGRX'
else:
rawmode = 'RGBX'
pad = self.display.info.bitmap_format_scanline_pad
unit = self.display.info.bitmap_format_scanline_unit
stride = roundup(width * unit, pad) >> 3
else:
raise ValueError('Unknown data format')
maxlen = (self.display.info.max_request_length << 2) \
- request.PutImage._request.static_size
split = maxlen // stride
x1 = 0
x2 = width
y1 = 0
while y1 < height:
h = min(height, split)
if h < height:
subimage = image.crop((x1, y1, x2, y1 + h))
else:
subimage = image
w, h = subimage.size
data = subimage.tobytes("raw", rawmode, stride, 0)
self.put_image(gc, x, y, w, h, format, depth, 0, data)
y1 = y1 + h
y = y + h
def get_image(self, x, y, width, height, format, plane_mask):
return request.GetImage(display = self.display,
format = format,
drawable = self.id,
x = x,
y = y,
width = width,
height = height,
plane_mask = plane_mask)
def draw_text(self, gc, x, y, text, onerror = None):
request.PolyText8(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
x = x,
y = y,
items = [text])
def poly_text(self, gc, x, y, items, onerror = None):
request.PolyText8(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
x = x,
y = y,
items = items)
def poly_text_16(self, gc, x, y, items, onerror = None):
request.PolyText16(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
x = x,
y = y,
items = items)
def image_text(self, gc, x, y, string, onerror = None):
request.ImageText8(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
x = x,
y = y,
string = string)
def image_text_16(self, gc, x, y, string, onerror = None):
request.ImageText16(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
x = x,
y = y,
string = string)
def query_best_size(self, item_class, width, height):
return request.QueryBestSize(display = self.display,
item_class = item_class,
drawable = self.id,
width = width,
height = height)
class Window(Drawable):
__window__ = resource.Resource.__resource__
_STRING_ENCODING = 'ISO-8859-1'
_UTF8_STRING_ENCODING = 'UTF-8'
def create_window(self, x, y, width, height, border_width, depth,
window_class = X.CopyFromParent,
visual = X.CopyFromParent,
onerror = None,
**keys):
wid = self.display.allocate_resource_id()
request.CreateWindow(display = self.display,
onerror = onerror,
depth = depth,
wid = wid,
parent = self.id,
x = x,
y = y,
width = width,
height = height,
border_width = border_width,
window_class = window_class,
visual = visual,
attrs = keys)
cls = self.display.get_resource_class('window', Window)
return cls(self.display, wid, owner = 1)
def change_attributes(self, onerror = None, **keys):
request.ChangeWindowAttributes(display = self.display,
onerror = onerror,
window = self.id,
attrs = keys)
def get_attributes(self):
return request.GetWindowAttributes(display = self.display,
window = self.id)
def destroy(self, onerror = None):
request.DestroyWindow(display = self.display,
onerror = onerror,
window = self.id)
self.display.free_resource_id(self.id)
def destroy_sub_windows(self, onerror = None):
request.DestroySubWindows(display = self.display,
onerror = onerror,
window = self.id)
def change_save_set(self, mode, onerror = None):
request.ChangeSaveSet(display = self.display,
onerror = onerror,
mode = mode,
window = self.id)
def reparent(self, parent, x, y, onerror = None):
request.ReparentWindow(display = self.display,
onerror = onerror,
window = self.id,
parent = parent,
x = x,
y = y)
def map(self, onerror = None):
request.MapWindow(display = self.display,
onerror = onerror,
window = self.id)
def map_sub_windows(self, onerror = None):
request.MapSubwindows(display = self.display,
onerror = onerror,
window = self.id)
def unmap(self, onerror = None):
request.UnmapWindow(display = self.display,
onerror = onerror,
window = self.id)
def unmap_sub_windows(self, onerror = None):
request.UnmapSubwindows(display = self.display,
onerror = onerror,
window = self.id)
def configure(self, onerror = None, **keys):
request.ConfigureWindow(display = self.display,
onerror = onerror,
window = self.id,
attrs = keys)
def circulate(self, direction, onerror = None):
request.CirculateWindow(display = self.display,
onerror = onerror,
direction = direction,
window = self.id)
def raise_window(self, onerror = None):
"""alias for raising the window to the top - as in XRaiseWindow"""
self.configure(onerror, stack_mode = X.Above)
def query_tree(self):
return request.QueryTree(display = self.display,
window = self.id)
def change_property(self, property, property_type, format, data,
mode = X.PropModeReplace, onerror = None):
request.ChangeProperty(display = self.display,
onerror = onerror,
mode = mode,
window = self.id,
property = property,
type = property_type,
data = (format, data))
def change_text_property(self, property, property_type, data,
mode = X.PropModeReplace, onerror = None):
if not isinstance(data, bytes):
if property_type == Xatom.STRING:
data = data.encode(self._STRING_ENCODING)
elif property_type == self.display.get_atom('UTF8_STRING'):
data = data.encode(self._UTF8_STRING_ENCODING)
self.change_property(property, property_type, 8, data,
mode=mode, onerror=onerror)
def delete_property(self, property, onerror = None):
request.DeleteProperty(display = self.display,
onerror = onerror,
window = self.id,
property = property)
def get_property(self, property, property_type, offset, length, delete = 0):
r = request.GetProperty(display = self.display,
delete = delete,
window = self.id,
property = property,
type = property_type,
long_offset = offset,
long_length = length)
if r.property_type:
fmt, value = r.value
r.format = fmt
r.value = value
return r
else:
return None
def get_full_property(self, property, property_type, sizehint = 10):
prop = self.get_property(property, property_type, 0, sizehint)
if prop:
val = prop.value
if prop.bytes_after:
prop = self.get_property(property, property_type, sizehint,
prop.bytes_after // 4 + 1)
val = val + prop.value
prop.value = val
return prop
else:
return None
def get_full_text_property(self, property, property_type=X.AnyPropertyType, sizehint = 10):
prop = self.get_full_property(property, property_type,
sizehint=sizehint)
if prop is None or prop.format != 8:
return None
if prop.property_type == Xatom.STRING:
prop.value = prop.value.decode(self._STRING_ENCODING)
elif prop.property_type == self.display.get_atom('UTF8_STRING'):
prop.value = prop.value.decode(self._UTF8_STRING_ENCODING)
# FIXME: at least basic support for compound text would be nice.
# elif prop.property_type == self.display.get_atom('COMPOUND_TEXT'):
return prop.value
def list_properties(self):
r = request.ListProperties(display = self.display,
window = self.id)
return r.atoms
def set_selection_owner(self, selection, time, onerror = None):
request.SetSelectionOwner(display = self.display,
onerror = onerror,
window = self.id,
selection = selection,
time = time)
def convert_selection(self, selection, target, property, time, onerror = None):
request.ConvertSelection(display = self.display,
onerror = onerror,
requestor = self.id,
selection = selection,
target = target,
property = property,
time = time)
def send_event(self, event, event_mask = 0, propagate = 0, onerror = None):
request.SendEvent(display = self.display,
onerror = onerror,
propagate = propagate,
destination = self.id,
event_mask = event_mask,
event = event)
def grab_pointer(self, owner_events, event_mask,
pointer_mode, keyboard_mode,
confine_to, cursor, time):
r = request.GrabPointer(display = self.display,
owner_events = owner_events,
grab_window = self.id,
event_mask = event_mask,
pointer_mode = pointer_mode,
keyboard_mode = keyboard_mode,
confine_to = confine_to,
cursor = cursor,
time = time)
return r.status
def grab_button(self, button, modifiers, owner_events, event_mask,
pointer_mode, keyboard_mode,
confine_to, cursor, onerror = None):
request.GrabButton(display = self.display,
onerror = onerror,
owner_events = owner_events,
grab_window = self.id,
event_mask = event_mask,
pointer_mode = pointer_mode,
keyboard_mode = keyboard_mode,
confine_to = confine_to,
cursor = cursor,
button = button,
modifiers = modifiers)
def ungrab_button(self, button, modifiers, onerror = None):
request.UngrabButton(display = self.display,
onerror = onerror,
button = button,
grab_window = self.id,
modifiers = modifiers)
def grab_keyboard(self, owner_events, pointer_mode, keyboard_mode, time):
r = request.GrabKeyboard(display = self.display,
owner_events = owner_events,
grab_window = self.id,
time = time,
pointer_mode = pointer_mode,
keyboard_mode = keyboard_mode)
return r.status
def grab_key(self, key, modifiers, owner_events, pointer_mode, keyboard_mode, onerror = None):
request.GrabKey(display = self.display,
onerror = onerror,
owner_events = owner_events,
grab_window = self.id,
modifiers = modifiers,
key = key,
pointer_mode = pointer_mode,
keyboard_mode = keyboard_mode)
def ungrab_key(self, key, modifiers, onerror = None):
request.UngrabKey(display = self.display,
onerror = onerror,
key = key,
grab_window = self.id,
modifiers = modifiers)
def query_pointer(self):
return request.QueryPointer(display = self.display,
window = self.id)
def get_motion_events(self, start, stop):
r = request.GetMotionEvents(display = self.display,
window = self.id,
start = start,
stop = stop)
return r.events
def translate_coords(self, src_window, src_x, src_y):
return request.TranslateCoords(display = self.display,
src_wid = src_window,
dst_wid = self.id,
src_x = src_x,
src_y = src_y)
def warp_pointer(self, x, y, src_window = 0, src_x = 0, src_y = 0,
src_width = 0, src_height = 0, onerror = None):
request.WarpPointer(display = self.display,
onerror = onerror,
src_window = src_window,
dst_window = self.id,
src_x = src_x,
src_y = src_y,
src_width = src_width,
src_height = src_height,
dst_x = x,
dst_y = y)
def set_input_focus(self, revert_to, time, onerror = None):
request.SetInputFocus(display = self.display,
onerror = onerror,
revert_to = revert_to,
focus = self.id,
time = time)
def clear_area(self, x = 0, y = 0, width = 0, height = 0, exposures = 0, onerror = None):
request.ClearArea(display = self.display,
onerror = onerror,
exposures = exposures,
window = self.id,
x = x,
y = y,
width = width,
height = height)
def create_colormap(self, visual, alloc):
mid = self.display.allocate_resource_id()
request.CreateColormap(display = self.display,
alloc = alloc,
mid = mid,
window = self.id,
visual = visual)
cls = self.display.get_resource_class('colormap', colormap.Colormap)
return cls(self.display, mid, owner = 1)
def list_installed_colormaps(self):
r = request.ListInstalledColormaps(display = self.display,
window = self.id)
return r.cmaps
def rotate_properties(self, properties, delta, onerror = None):
request.RotateProperties(display = self.display,
onerror = onerror,
window = self.id,
delta = delta,
properties = properties)
def set_wm_name(self, name, onerror = None):
self.change_text_property(Xatom.WM_NAME, Xatom.STRING, name,
onerror = onerror)
def get_wm_name(self):
return self.get_full_text_property(Xatom.WM_NAME, Xatom.STRING)
def set_wm_icon_name(self, name, onerror = None):
self.change_text_property(Xatom.WM_ICON_NAME, Xatom.STRING, name,
onerror = onerror)
def get_wm_icon_name(self):
return self.get_full_text_property(Xatom.WM_ICON_NAME, Xatom.STRING)
def set_wm_class(self, inst, cls, onerror = None):
self.change_text_property(Xatom.WM_CLASS, Xatom.STRING,
'%s\0%s\0' % (inst, cls),
onerror = onerror)
def get_wm_class(self):
value = self.get_full_text_property(Xatom.WM_CLASS, Xatom.STRING)
if value is None:
return None
parts = value.split('\0')
if len(parts) < 2:
return None
else:
return parts[0], parts[1]
def set_wm_transient_for(self, window, onerror = None):
self.change_property(Xatom.WM_TRANSIENT_FOR, Xatom.WINDOW,
32, [window.id],
onerror = onerror)
def get_wm_transient_for(self):
d = self.get_property(Xatom.WM_TRANSIENT_FOR, Xatom.WINDOW, 0, 1)
if d is None or d.format != 32 or len(d.value) < 1:
return None
else:
cls = self.display.get_resource_class('window', Window)
return cls(self.display, d.value[0])
def set_wm_protocols(self, protocols, onerror = None):
self.change_property(self.display.get_atom('WM_PROTOCOLS'),
Xatom.ATOM, 32, protocols,
onerror = onerror)
def get_wm_protocols(self):
d = self.get_full_property(self.display.get_atom('WM_PROTOCOLS'), Xatom.ATOM)
if d is None or d.format != 32:
return []
else:
return d.value
def set_wm_colormap_windows(self, windows, onerror = None):
self.change_property(self.display.get_atom('WM_COLORMAP_WINDOWS'),
Xatom.WINDOW, 32,
map(lambda w: w.id, windows),
onerror = onerror)
def get_wm_colormap_windows(self):
d = self.get_full_property(self.display.get_atom('WM_COLORMAP_WINDOWS'),
Xatom.WINDOW)
if d is None or d.format != 32:
return []
else:
cls = self.display.get_resource_class('window', Window)
return map(lambda i, d = self.display, c = cls: c(d, i),
d.value)
def set_wm_client_machine(self, name, onerror = None):
self.change_text_property(Xatom.WM_CLIENT_MACHINE, Xatom.STRING, name,
onerror = onerror)
def get_wm_client_machine(self):
return self.get_full_text_property(Xatom.WM_CLIENT_MACHINE, Xatom.STRING)
def set_wm_normal_hints(self, hints = {}, onerror = None, **keys):
self._set_struct_prop(Xatom.WM_NORMAL_HINTS, Xatom.WM_SIZE_HINTS,
icccm.WMNormalHints, hints, keys, onerror)
def get_wm_normal_hints(self):
return self._get_struct_prop(Xatom.WM_NORMAL_HINTS, Xatom.WM_SIZE_HINTS,
icccm.WMNormalHints)
def set_wm_hints(self, hints = {}, onerror = None, **keys):
self._set_struct_prop(Xatom.WM_HINTS, Xatom.WM_HINTS,
icccm.WMHints, hints, keys, onerror)
def get_wm_hints(self):
return self._get_struct_prop(Xatom.WM_HINTS, Xatom.WM_HINTS,
icccm.WMHints)
def set_wm_state(self, hints = {}, onerror = None, **keys):
atom = self.display.get_atom('WM_STATE')
self._set_struct_prop(atom, atom, icccm.WMState, hints, keys, onerror)
def get_wm_state(self):
atom = self.display.get_atom('WM_STATE')
return self._get_struct_prop(atom, atom, icccm.WMState)
def set_wm_icon_size(self, hints = {}, onerror = None, **keys):
self._set_struct_prop(Xatom.WM_ICON_SIZE, Xatom.WM_ICON_SIZE,
icccm.WMIconSize, hints, keys, onerror)
def get_wm_icon_size(self):
return self._get_struct_prop(Xatom.WM_ICON_SIZE, Xatom.WM_ICON_SIZE,
icccm.WMIconSize)
# Helper function for getting structured properties.
# pname and ptype are atoms, and pstruct is a Struct object.
# Returns a DictWrapper, or None
def _get_struct_prop(self, pname, ptype, pstruct):
r = self.get_property(pname, ptype, 0, pstruct.static_size // 4)
if r and r.format == 32:
value = rq.encode_array(r.value)
if len(value) == pstruct.static_size:
return pstruct.parse_binary(value, self.display)[0]
return None
# Helper function for setting structured properties.
# pname and ptype are atoms, and pstruct is a Struct object.
# hints is a mapping or a DictWrapper, keys is a mapping. keys
# will be modified. onerror is the error handler.
def _set_struct_prop(self, pname, ptype, pstruct, hints, keys, onerror):
if isinstance(hints, rq.DictWrapper):
keys.update(hints._data)
else:
keys.update(hints)
value = pstruct.to_binary(*(), **keys)
self.change_property(pname, ptype, 32, value, onerror = onerror)
class Pixmap(Drawable):
__pixmap__ = resource.Resource.__resource__
def free(self, onerror = None):
request.FreePixmap(display = self.display,
onerror = onerror,
pixmap = self.id)
self.display.free_resource_id(self.id)
def create_cursor(self, mask, foreground, background, x, y):
fore_red, fore_green, fore_blue = foreground
back_red, back_green, back_blue = background
cid = self.display.allocate_resource_id()
request.CreateCursor(display = self.display,
cid = cid,
source = self.id,
mask = mask,
fore_red = fore_red,
fore_green = fore_green,
fore_blue = fore_blue,
back_red = back_red,
back_green = back_green,
back_blue = back_blue,
x = x,
y = y)
cls = self.display.get_resource_class('cursor', cursor.Cursor)
return cls(self.display, cid, owner = 1)
def roundup(value, unit):
return (value + (unit - 1)) & ~(unit - 1)
| lgpl-2.1 | -8,560,134,836,506,674,000 | 40.216766 | 101 | 0.463389 | false |
GiovanniConserva/TestDeploy | venv/Lib/site-packages/binstar_client/inspect_package/tests/test_pypi.py | 1 | 8799 | from __future__ import print_function, unicode_literals
import unittest
from os import path
from binstar_client.inspect_package import pypi
from pprint import pprint
import os
import shutil
import tempfile
def data_path(filename):
return path.join(path.dirname(__file__), 'data', filename)
expected_package_data = {'name': 'test-package34',
'license': 'custom',
'summary': 'Python test package for binstar client'}
expected_version_data = {'home_page': 'http://github.com/binstar/binstar_pypi',
'version': '0.3.1',
'description':'longer description of the package'}
expected_dependencies = {'depends': [ {'name': u'python-dateutil', 'specs': []},
{'name': u'pytz', 'specs': []},
{'name': u'pyyaml', 'specs': []},
{'name': u'requests', 'specs': [(u'>=', u'2.0'), (u'<=', u'3.0')]}, ],
'extras': [{'depends': [{'name': u'argparse',
'specs': []}],
'name': u':python_version=="2.6"'},
{'depends': [{'name': u'reportlab',
'specs': [(u'>=', u'1.2')]},
{'name': u'rxp', 'specs': []}],
'name': u'PDF'},
{'depends': [{'name': u'docutils',
'specs': [(u'>=', u'0.3')]}],
'name': u'reST'}],
'has_dep_errors': False}
expected_whl_dependencies = {u'depends': [{u'name': u'python-dateutil', u'specs': []},
{u'name': u'pytz', u'specs': []},
{u'name': u'pyyaml', u'specs': []},
{u'name': u'requests',
u'specs': [(u'>=', u'2.0'),
(u'<=', u'3.0')]}],
u'environments': [{u'depends': [{u'name': u'argparse',
u'specs': []}],
u'name': u'python_version=="2.6"'}],
u'extras': [{u'depends': [{u'name': u'RXP',
u'specs': []},
{u'name': u'reportlab',
u'specs': [(u'>=', u'1.2')]}],
u'name': u'PDF'},
{u'depends': [{u'name': u'docutils',
u'specs': [(u'>=', u'0.3')]}],
u'name': u'reST'}],
u'has_dep_errors': False}
expected_egg_file_data = {'attrs': {'packagetype': 'bdist_egg', 'python_version': 'source'},
'basename': 'test_package34-0.3.1-py2.7.egg',
'dependencies': expected_dependencies,
'platform': None}
class Test(unittest.TestCase):
def test_sdist(self):
filename = data_path('test_package34-0.3.1.tar.gz')
with open(filename, 'rb') as fd:
package_data, version_data, file_data = pypi.inspect_pypi_package(filename, fd)
expected_file_data = {'attrs': {'packagetype': 'sdist', 'python_version': 'source'},
'basename': 'test_package34-0.3.1.tar.gz',
'dependencies': expected_dependencies}
self.assertEqual(expected_package_data, package_data)
self.assertEqual(expected_version_data, version_data)
self.assertEqual(set(expected_file_data), set(file_data))
for key in expected_file_data:
self.assertEqual(expected_file_data[key], file_data[key])
def test_bdist_wheel(self):
filename = data_path('test_package34-0.3.1-py2-none-any.whl')
with open(filename, 'rb') as fd:
package_data, version_data, file_data = pypi.inspect_pypi_package(filename, fd)
expected_file_data = {'attrs': {'abi': None, 'build_no': 0,
'packagetype': 'bdist_wheel',
'python_version': 'py2'},
'basename': 'test_package34-0.3.1-py2-none-any.whl',
'dependencies': expected_whl_dependencies,
'platform': None}
self.assertEqual(expected_package_data, package_data)
self.assertEqual(expected_version_data, version_data)
self.assertEqual(set(expected_file_data), set(file_data))
for key in expected_file_data:
self.assertEqual(expected_file_data[key], file_data[key])
def test_bdist_egg(self):
filename = data_path('test_package34-0.3.1-py2.7.egg')
with open(filename, 'rb') as fd:
package_data, version_data, file_data = pypi.inspect_pypi_package(filename, fd)
self.assertEqual(expected_package_data, package_data)
self.assertEqual(expected_version_data, version_data)
self.assertEqual(set(expected_egg_file_data), set(file_data))
for key in expected_egg_file_data:
self.assertEqual(expected_egg_file_data[key], file_data[key])
def test_bdist_egg_dashed_path(self):
filename = data_path('test_package34-0.3.1-py2.7.egg')
tmpdir = tempfile.gettempdir()
dash_count = tmpdir.count('-')
if dash_count == 0:
tmpdir = path.join(tmpdir, 'has-dash')
try:
os.mkdir(tmpdir)
except (IOError, OSError):
raise unittest.SkipTest('Cannot create temporary directory %r' % tmpdir)
elif dash_count > 1:
raise unittest.SkipTest('Too many dashes in temporary directory path %r' % tmpdir)
try:
shutil.copy(filename, tmpdir)
except (IOError, OSError):
raise unittest.SkipTest('Cannot copy package to temporary directory')
tmpfilename = path.join(tmpdir, 'test_package34-0.3.1-py2.7.egg')
with open(tmpfilename, 'rb') as fd:
package_data, version_data, file_data = pypi.inspect_pypi_package(tmpfilename, fd)
# If we could create this file, we ought to be able to delete it
os.remove(tmpfilename)
if dash_count == 0:
# We created a temporary directory like /tmp/has-dash, delete it
os.rmdir(tmpdir)
self.assertEqual(expected_package_data, package_data)
self.assertEqual(expected_version_data, version_data)
self.assertEqual(set(expected_egg_file_data), set(file_data))
self.assertEqual(expected_egg_file_data['platform'], file_data['platform'])
self.assertEqual(expected_egg_file_data['attrs']['python_version'],
file_data['attrs']['python_version'])
def test_sdist_distutils(self):
filename = data_path('test_package34-distutils-0.3.1.tar.gz')
with open(filename, 'rb') as fd:
package_data, version_data, file_data = pypi.inspect_pypi_package(filename, fd)
expected_file_data = {'attrs': {'packagetype': 'sdist', 'python_version': 'source'},
'basename': 'test_package34-distutils-0.3.1.tar.gz',
'dependencies': {'depends': [{'name': 'requests',
'specs': [('>=', '2.0'), ('<=', '3.0')]},
{'name': 'pyyaml', 'specs': [('==', '2.0')]},
{'name': 'pytz', 'specs': []}],
'extras': [],
'has_dep_errors': False}}
dexpected_package_data = expected_package_data.copy()
dexpected_package_data['name'] = dexpected_package_data['name'].replace('-', '_')
self.assertEqual(dexpected_package_data, package_data)
self.assertEqual(expected_version_data, version_data)
self.assertEqual(set(expected_file_data), set(file_data))
for key in expected_file_data:
self.assertEqual(expected_file_data[key], file_data[key])
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| bsd-3-clause | -6,492,083,558,679,514,000 | 46.820652 | 108 | 0.472895 | false |
kiyoad/twimg2rss | url_db.py | 1 | 1450 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
import os
import sqlite3
from common import conf
class UrlDb:
def __init__(self):
self.conn = None
self.c = None
def _open_sqlite3(self, url_db_file):
self.conn = sqlite3.connect(
url_db_file,
detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)
self.c = self.conn.cursor()
def _clear_old_db_data(self):
reference = datetime.datetime.now() - datetime.timedelta(
seconds=conf.url_db_period())
self.c.execute('DELETE FROM urls WHERE created < ?', (reference,))
def open(self):
url_db_file = conf.url_db_file()
db_exist = os.path.isfile(url_db_file)
self._open_sqlite3(url_db_file)
if not db_exist:
self.c.execute('CREATE TABLE urls (url TEXT, created TIMESTAMP)')
self.c.execute('CREATE INDEX url_index ON urls(url, created)')
def close(self):
self._clear_old_db_data()
self.conn.commit()
self.c.close()
self.c = None
self.conn.close()
self.conn = None
def url_in_db(self, url):
self.c.execute('SELECT * FROM urls WHERE url == ?', (url,))
return self.c.fetchone() is not None
def add_url(self, url, created):
self.c.execute('INSERT INTO urls(url, created) VALUES (?, ?)',
(url, created))
url_db = UrlDb()
| mit | 1,711,929,013,310,290,000 | 27.431373 | 77 | 0.576552 | false |
SCUEvals/scuevals-api | db/alembic/versions/20170927093446_fix_update_courses.py | 1 | 4925 | """Fix update_courses
Revision ID: 7004250e3ef5
Revises: 8a786f9bf241
Create Date: 2017-09-27 09:34:46.069174
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7004250e3ef5'
down_revision = '8a786f9bf241'
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
conn.execute(sa.text("""create or replace function update_courses(_university_id numeric, _json jsonb)
returns numeric as $func$
declare
_d_id numeric;
_c_id numeric;
_p_id numeric;
_quarter numeric;
_latest_quarter numeric;
_s_id numeric;
_department varchar;
_number varchar;
_title varchar;
_professor1 varchar[];
_professor2 varchar[];
_professor3 varchar[];
_professors varchar[][];
_professor varchar[];
_count numeric := 0;
_new_course boolean := false;
begin
for
_quarter,
_department,
_number,
_title,
_professor1,
_professor2,
_professor3
in
select
(course ->> 'term')::int as _quarter,
course ->> 'subject' as _department,
course ->> 'catalog_nbr' as _number,
course ->> 'class_descr' as _title,
-- prof #1
case
when (course ->> 'instr_1') like '%, %' then
array[
split_part(course ->> 'instr_1', ', ', 1),
split_part(course ->> 'instr_1', ', ', 2)
]
when (course ->> 'instr_1') = '' then
null
end as _professor1,
-- prof #2
case
when (course ->> 'instr_2') like '%, %' then
array[
split_part(course ->> 'instr_2', ', ', 1),
split_part(course ->> 'instr_2', ', ', 2)
]
when (course ->> 'instr_2') = '' then
null
end as _professor2,
-- prof #3
case
when (course ->> 'instr_3') like '%, %' then
array[
split_part(course ->> 'instr_3', ', ', 1),
split_part(course ->> 'instr_3', ', ', 2)
]
when (course ->> 'instr_3') = '' then
null
end as _professor3
from jsonb_array_elements(_json -> 'courses') course
loop
if _professor1 is null then continue; end if;
-- get the department id (assume it exists)
select departments.id into _d_id
from departments
where abbreviation = _department
order by school_id limit 1;
-- get the course id if it exists
select id into _c_id
from courses
where department_id = _d_id and number = _number;
-- if the course does not exist, create it
if _c_id is null then
insert into courses (department_id, number, title) values (_d_id, _number, _title)
returning id into _c_id;
_new_course = true;
end if;
-- get the section id if it exists
select id into _s_id
from sections
where quarter_id = _quarter and course_id = _c_id;
-- if the section does not exist, create it
if _s_id is null then
insert into sections (quarter_id, course_id) values (_quarter, _c_id)
returning id into _s_id;
end if;
_professors = array[_professor1];
if _professor2 is not null then _professors = array_cat(_professors, _professor2); end if;
if _professor3 is not null then _professors = array_cat(_professors, _professor3); end if;
foreach _professor slice 1 in array _professors
loop
if _professor[1] is null then continue; end if;
-- get the professor id if it exists
select id into _p_id
from professors
where last_name = _professor[2] and first_name = _professor[1];
-- if the professor does not exist, create it
if _p_id is null then
insert into professors (first_name, last_name, university_id)
values (_professor[1], _professor[2], _university_id)
returning id into _p_id;
end if;
-- check if the professer is listed under this section
if not exists(select 1
from section_professor sp
where sp.section_id = _s_id and sp.professor_id = _p_id)
then
insert into section_professor (section_id, professor_id) values (_s_id, _p_id);
end if;
end loop;
-- if the course existed, make sure the title is up to date
if not _new_course then
-- get the latest quarter which the course was offered in
select q.id into _latest_quarter
from quarters q
join sections s on q.id = s.quarter_id
join courses c on s.course_id = c.id
where c.id = _c_id and q.university_id = _university_id
order by lower(period) desc
limit 1;
-- if this course info is for the latest quarter, update the title
if _quarter = _latest_quarter then
update courses
set title = _title
where id = _c_id;
end if;
end if;
_count = _count + 1;
end loop;
return _count;
end;
$func$ language plpgsql;"""))
def downgrade():
pass
| agpl-3.0 | -5,163,401,731,827,070,000 | 25.196809 | 106 | 0.591269 | false |
leonardr/botfriend | bots.sample/podcast/__init__.py | 1 | 2324 | from dateutil import parser
from pdb import set_trace
import random
from olipy.ia import Audio
from botfriend.bot import BasicBot
from botfriend.publish.podcast import PodcastPublisher
class PodcastBot(BasicBot):
COLLECTION = "podcasts"
def update_state(self):
# Grab the 100 most recently posted podcasts.
query = Audio.recent("collection:%s" % self.COLLECTION)
max_count = 100
choices = []
a = 0
for audio in query:
choices.append(audio.identifier)
a += 1
if a >= max_count:
break
self.model.json_state = choices
def file(self, item, format_name):
"""Find a file in a specific format."""
for f in item.files:
if f.format == format_name:
return f
return None
def make_post(self, podcast):
"""Convert an Audio object into a post compatible with
the PodcastPublisher.
"""
meta = podcast.metadata
mp3 = self.file(podcast, "VBR MP3")
if not mp3:
# This isn't really a podcast.
return None
title = meta.get('title')
date = parser.parse(
meta.get('date') or meta.get('publicdate')
).strftime("%d %b %Y")
description = meta.get('description', '')
creator = meta.get('creator')
if creator:
byline = " by %s" % creator
else:
byline = ""
detail_url = 'https://archive.org/details/%s' % meta['identifier']
detail_link='<p>Archived at <a href="%s">%s</a>' % (detail_url, detail_url)
template = '<p>Originally published%(byline)s on %(date)s.</p>\n\n%(description)s\n\n%(details)s'
description = template % dict(
details=detail_link,
title=title,
description=description,
date=date,
byline=byline
)
# Create a post compatible with the PodcastPublisher.
return PodcastPublisher.make_post(
self.model, title, mp3.url, description,
media_size=mp3.size, guid=detail_url
)
def new_post(self):
podcast = random.choice(self.model.json_state)
post, is_new = self.make_post(Audio(podcast))
return post
Bot = PodcastBot
| mit | -460,678,980,916,038,300 | 29.986667 | 105 | 0.567986 | false |
mkhutornenko/incubator-aurora | src/test/python/apache/aurora/client/api/test_scheduler_client.py | 1 | 11221 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
import time
import unittest
import mock
import pytest
from mox import IgnoreArg, IsA, Mox
from thrift.transport import THttpClient, TTransport
from twitter.common.quantity import Amount, Time
from twitter.common.zookeeper.kazoo_client import TwitterKazooClient
from twitter.common.zookeeper.serverset.endpoint import ServiceInstance
import apache.aurora.client.api.scheduler_client as scheduler_client
from apache.aurora.common.cluster import Cluster
from apache.aurora.common.transport import TRequestsTransport
import gen.apache.aurora.api.AuroraAdmin as AuroraAdmin
import gen.apache.aurora.api.AuroraSchedulerManager as AuroraSchedulerManager
from gen.apache.aurora.api.constants import CURRENT_API_VERSION, DEFAULT_ENVIRONMENT
from gen.apache.aurora.api.ttypes import (
Hosts,
JobConfiguration,
JobKey,
Lock,
LockValidation,
ResourceAggregate,
Response,
ResponseCode,
Result,
RewriteConfigsRequest,
ScheduleStatus,
SessionKey,
TaskQuery
)
ROLE = 'foorole'
JOB_NAME = 'barjobname'
JOB_KEY = JobKey(role=ROLE, environment=DEFAULT_ENVIRONMENT, name=JOB_NAME)
def test_coverage():
"""Make sure a new thrift RPC doesn't get added without minimal test coverage."""
for name, klass in inspect.getmembers(AuroraAdmin) + inspect.getmembers(AuroraSchedulerManager):
if name.endswith('_args'):
rpc_name = name[:-len('_args')]
assert hasattr(TestSchedulerProxyAdminInjection, 'test_%s' % rpc_name), (
'No test defined for RPC %s' % rpc_name)
class TestSchedulerProxy(scheduler_client.SchedulerProxy):
"""In testing we shouldn't use the real SSHAgentAuthenticator."""
def session_key(self):
return self.create_session('SOME_USER')
@classmethod
def create_session(cls, user):
return SessionKey(mechanism='test', data='test')
class TestSchedulerProxyInjection(unittest.TestCase):
def setUp(self):
self.mox = Mox()
self.mox.StubOutClassWithMocks(AuroraAdmin, 'Client')
self.mox.StubOutClassWithMocks(scheduler_client, 'SchedulerClient')
self.mock_scheduler_client = self.mox.CreateMock(scheduler_client.SchedulerClient)
self.mock_thrift_client = self.mox.CreateMock(AuroraAdmin.Client)
scheduler_client.SchedulerClient.get(IgnoreArg(), verbose=IgnoreArg()).AndReturn(
self.mock_scheduler_client)
self.mock_scheduler_client.get_thrift_client().AndReturn(self.mock_thrift_client)
version_resp = Response(responseCode=ResponseCode.OK)
version_resp.result = Result(getVersionResult=CURRENT_API_VERSION)
self.mock_thrift_client.getVersion().AndReturn(version_resp)
def tearDown(self):
self.mox.UnsetStubs()
self.mox.VerifyAll()
def make_scheduler_proxy(self):
return TestSchedulerProxy('local')
def test_startCronJob(self):
self.mock_thrift_client.startCronJob(IsA(JobKey), IsA(SessionKey))
self.mox.ReplayAll()
self.make_scheduler_proxy().startCronJob(JOB_KEY)
def test_createJob(self):
self.mock_thrift_client.createJob(IsA(JobConfiguration), IsA(SessionKey))
self.mox.ReplayAll()
self.make_scheduler_proxy().createJob(JobConfiguration())
def test_replaceCronTemplate(self):
self.mock_thrift_client.replaceCronTemplate(IsA(JobConfiguration), IsA(Lock), IsA(SessionKey))
self.mox.ReplayAll()
self.make_scheduler_proxy().replaceCronTemplate(JobConfiguration(), Lock())
def test_scheduleCronJob(self):
self.mock_thrift_client.scheduleCronJob(IsA(JobConfiguration), IsA(SessionKey))
self.mox.ReplayAll()
self.make_scheduler_proxy().scheduleCronJob(JobConfiguration())
def test_descheduleCronJob(self):
self.mock_thrift_client.descheduleCronJob(IsA(JobKey), IsA(SessionKey))
self.mox.ReplayAll()
self.make_scheduler_proxy().descheduleCronJob(JOB_KEY)
def test_populateJobConfig(self):
self.mock_thrift_client.populateJobConfig(IsA(JobConfiguration))
self.mox.ReplayAll()
self.make_scheduler_proxy().populateJobConfig(JobConfiguration())
def test_restartShards(self):
self.mock_thrift_client.restartShards(IsA(JobKey), IgnoreArg(), IsA(SessionKey))
self.mox.ReplayAll()
self.make_scheduler_proxy().restartShards(JOB_KEY, set([0]))
def test_getTasksStatus(self):
self.mock_thrift_client.getTasksStatus(IsA(TaskQuery))
self.mox.ReplayAll()
self.make_scheduler_proxy().getTasksStatus(TaskQuery())
def test_getJobs(self):
self.mock_thrift_client.getJobs(IgnoreArg())
self.mox.ReplayAll()
self.make_scheduler_proxy().getJobs(ROLE)
def test_killTasks(self):
self.mock_thrift_client.killTasks(IsA(TaskQuery), IsA(SessionKey))
self.mox.ReplayAll()
self.make_scheduler_proxy().killTasks(TaskQuery())
def test_getQuota(self):
self.mock_thrift_client.getQuota(IgnoreArg())
self.mox.ReplayAll()
self.make_scheduler_proxy().getQuota(ROLE)
def test_startMaintenance(self):
self.mock_thrift_client.startMaintenance(IsA(Hosts), IsA(SessionKey))
self.mox.ReplayAll()
self.make_scheduler_proxy().startMaintenance(Hosts())
def test_drainHosts(self):
self.mock_thrift_client.drainHosts(IsA(Hosts), IsA(SessionKey))
self.mox.ReplayAll()
self.make_scheduler_proxy().drainHosts(Hosts())
def test_maintenanceStatus(self):
self.mock_thrift_client.maintenanceStatus(IsA(Hosts), IsA(SessionKey))
self.mox.ReplayAll()
self.make_scheduler_proxy().maintenanceStatus(Hosts())
def test_endMaintenance(self):
self.mock_thrift_client.endMaintenance(IsA(Hosts), IsA(SessionKey))
self.mox.ReplayAll()
self.make_scheduler_proxy().endMaintenance(Hosts())
def test_getVersion(self):
self.mock_thrift_client.getVersion()
self.mox.ReplayAll()
self.make_scheduler_proxy().getVersion()
def test_addInstances(self):
self.mock_thrift_client.addInstances(IsA(JobKey), IgnoreArg(), IsA(Lock), IsA(SessionKey))
self.mox.ReplayAll()
self.make_scheduler_proxy().addInstances(JobKey(), {}, Lock())
def test_acquireLock(self):
self.mock_thrift_client.acquireLock(IsA(Lock), IsA(SessionKey))
self.mox.ReplayAll()
self.make_scheduler_proxy().acquireLock(Lock())
def test_releaseLock(self):
self.mock_thrift_client.releaseLock(IsA(Lock), IsA(LockValidation), IsA(SessionKey))
self.mox.ReplayAll()
self.make_scheduler_proxy().releaseLock(Lock(), LockValidation())
class TestSchedulerProxyAdminInjection(TestSchedulerProxyInjection):
def test_setQuota(self):
self.mock_thrift_client.setQuota(IgnoreArg(), IsA(ResourceAggregate), IsA(SessionKey))
self.mox.ReplayAll()
self.make_scheduler_proxy().setQuota(ROLE, ResourceAggregate())
def test_forceTaskState(self):
self.mock_thrift_client.forceTaskState(IgnoreArg(), IgnoreArg(), IsA(SessionKey))
self.mox.ReplayAll()
self.make_scheduler_proxy().forceTaskState('taskid', ScheduleStatus.LOST)
def test_performBackup(self):
self.mock_thrift_client.performBackup(IsA(SessionKey))
self.mox.ReplayAll()
self.make_scheduler_proxy().performBackup()
def test_listBackups(self):
self.mock_thrift_client.listBackups(IsA(SessionKey))
self.mox.ReplayAll()
self.make_scheduler_proxy().listBackups()
def test_stageRecovery(self):
self.mock_thrift_client.stageRecovery(IsA(TaskQuery), IsA(SessionKey))
self.mox.ReplayAll()
self.make_scheduler_proxy().stageRecovery(TaskQuery())
def test_queryRecovery(self):
self.mock_thrift_client.queryRecovery(IsA(TaskQuery), IsA(SessionKey))
self.mox.ReplayAll()
self.make_scheduler_proxy().queryRecovery(TaskQuery())
def test_deleteRecoveryTasks(self):
self.mock_thrift_client.deleteRecoveryTasks(IsA(TaskQuery), IsA(SessionKey))
self.mox.ReplayAll()
self.make_scheduler_proxy().deleteRecoveryTasks(TaskQuery())
def test_commitRecovery(self):
self.mock_thrift_client.commitRecovery(IsA(SessionKey))
self.mox.ReplayAll()
self.make_scheduler_proxy().commitRecovery()
def test_unloadRecovery(self):
self.mock_thrift_client.unloadRecovery(IsA(SessionKey))
self.mox.ReplayAll()
self.make_scheduler_proxy().unloadRecovery()
def test_snapshot(self):
self.mock_thrift_client.snapshot(IsA(SessionKey))
self.mox.ReplayAll()
self.make_scheduler_proxy().snapshot()
def test_rewriteConfigs(self):
self.mock_thrift_client.rewriteConfigs(IsA(RewriteConfigsRequest), IsA(SessionKey))
self.mox.ReplayAll()
self.make_scheduler_proxy().rewriteConfigs(RewriteConfigsRequest())
@pytest.mark.parametrize('scheme', ('http', 'https'))
def test_url_when_not_connected_and_cluster_has_no_proxy_url(scheme):
host = 'some-host.example.com'
port = 31181
mock_zk = mock.MagicMock(spec=TwitterKazooClient)
service_json = '''{
"additionalEndpoints": {
"%(scheme)s": {
"host": "%(host)s",
"port": %(port)d
}
},
"serviceEndpoint": {
"host": "%(host)s",
"port": %(port)d
},
"shard": 0,
"status": "ALIVE"
}''' % dict(host=host, port=port, scheme=scheme)
service_endpoints = [ServiceInstance.unpack(service_json)]
def make_mock_client(proxy_url):
client = scheduler_client.ZookeeperSchedulerClient(Cluster(proxy_url=proxy_url))
client.get_scheduler_serverset = mock.MagicMock(return_value=(mock_zk, service_endpoints))
client.SERVERSET_TIMEOUT = Amount(0, Time.SECONDS)
client._connect_scheduler = mock.MagicMock()
return client
client = make_mock_client(proxy_url=None)
assert client.url == '%s://%s:%d' % (scheme, host, port)
client._connect_scheduler.assert_has_calls([])
client = make_mock_client(proxy_url='https://scheduler.proxy')
assert client.url == 'https://scheduler.proxy'
client._connect_scheduler.assert_has_calls([])
client = make_mock_client(proxy_url=None)
client.get_thrift_client()
assert client.url == '%s://%s:%d' % (scheme, host, port)
client._connect_scheduler.assert_has_calls([mock.call('%s://%s:%d/api' % (scheme, host, port))])
client._connect_scheduler.reset_mock()
client.get_thrift_client()
client._connect_scheduler.assert_has_calls([])
@mock.patch('apache.aurora.client.api.scheduler_client.TRequestsTransport', spec=TRequestsTransport)
def test_connect_scheduler(mock_client):
mock_client.return_value.open.side_effect = [TTransport.TTransportException, True]
mock_time = mock.Mock(spec=time)
scheduler_client.SchedulerClient._connect_scheduler(
'https://scheduler.example.com:1337',
mock_time)
assert mock_client.return_value.open.call_count == 2
mock_time.sleep.assert_called_once_with(
scheduler_client.SchedulerClient.RETRY_TIMEOUT.as_(Time.SECONDS))
| apache-2.0 | 3,804,141,466,639,658,500 | 35.080386 | 100 | 0.733268 | false |
jbteixeir/Openflow-DC-Framework | pox/host_tracker/host_tracker.old.py | 1 | 12063 | # Copyright 2011 Dorgival Guedes
#
# This file is part of POX.
# Some of the arp/openflow-related code was borrowed from dumb_l3_switch.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
Keep track of hosts in the network, where they are and how they are
configured (at least MAC/IP addresses)
For the time being, it keeps tables with the information; later, it should
transfer that information to Topology and handle just the actual
discovery/update of host information.
Timer configuration can be changed when needed (e.g., for debugging) using
the launch facility (check timeoutSec dict and PingCtrl.pingLim).
"""
from pox.core import core
import pox
log = core.getLogger()
#import logging
#log.setLevel(logging.WARN)
from pox.lib.packet.ethernet import ethernet
from pox.lib.packet.ipv4 import ipv4
from pox.lib.packet.arp import arp
from pox.lib.recoco.recoco import Timer
from pox.lib.revent.revent import EventMixin
from ext.Structures.ercs_host import HostJoin, HostTimeout, HostMove
import time
import string
import pox.openflow.libopenflow_01 as of
import pox.openflow.discovery as discovery
# Times (in seconds) to use for differente timouts:
timeoutSec = dict(
arpAware=60*2, # Quiet ARP-responding entries are pinged after this
arpSilent=60*20, # This is for uiet entries not known to answer ARP
arpReply=4, # Time to wait for an ARP reply before retrial
timerInterval=5, # Seconds between timer routine activations
entryMove=60 # Minimum expected time to move a physical entry
)
# Good values for testing:
# --arpAware=15 --arpSilent=45 --arpReply=1 --entryMove=4
# Another parameter that may be used:
# --pingLim=2
class Alive (object):
""" Holds liveliness information for MAC and IP entries
"""
def __init__ (self, livelinessInterval=timeoutSec['arpAware']):
self.lastTimeSeen = time.time()
self.interval=livelinessInterval
def expired (self):
return time.time() > self.lastTimeSeen + self.interval
def refresh (self):
self.lastTimeSeen = time.time()
class PingCtrl (Alive):
""" Holds information for handling ARP pings for hosts
"""
# Number of ARP ping attemps before deciding it failed
pingLim=3
def __init__ (self):
Alive.__init__(self, timeoutSec['arpReply'])
self.pending = 0
def sent (self):
self.refresh()
self.pending += 1
def failed (self):
return self.pending > PingCtrl.pingLim
def received (self):
# Clear any pending timeouts related to ARP pings
self.pending = 0
class IpEntry (Alive):
"""
This entry keeps track of IP addresses seen from each MAC entry and will
be kept in the macEntry object's ipAddrs dictionary. At least for now,
there is no need to refer to the original macEntry as the code is organized.
"""
def __init__ (self, hasARP):
if hasARP:
Alive.__init__(self,timeoutSec['arpAware'])
else:
Alive.__init__(self,timeoutSec['arpSilent'])
self.hasARP = hasARP
self.pings = PingCtrl()
def setHasARP (self):
if not self.hasARP:
self.hasARP = True
self.interval = timeoutSec['arpAware']
class MacEntry (Alive):
"""
Not strictly an ARP entry.
When it gets moved to Topology, may include other host info, like
services, and it may replace dpid by a general switch object reference
We use the port to determine which port to forward traffic out of.
"""
def __init__ (self, dpid, port, macaddr):
Alive.__init__(self)
self.dpid = dpid
self.port = port
self.macaddr = macaddr
self.ipAddrs = {}
def __str__(self):
return string.join([str(self.dpid), str(self.port), str(self.macaddr)],' ')
def __eq__ (self, other):
if type(other) == type(None):
return type(self) == type(None)
elif type(other) == tuple:
return (self.dpid,self.port,self.macaddr)==other
else:
return (self.dpid,self.port,self.macaddr) \
==(other.dpid,other.port,other.macaddr)
def __ne__ (self, other):
return not self.__eq__(other)
class host_tracker (EventMixin):
_eventMixin_events = set([
HostJoin,
HostTimeout,
HostMove,
])
_core_name = "host_tracker" # we want to be core.host_tracker
def __init__ (self):
# The following tables should go to Topology later
self.entryByMAC = {}
self._t = Timer(timeoutSec['timerInterval'],
self._check_timeouts, recurring=True)
self.listenTo(core)
log.info("host_tracker ready")
# The following two functions should go to Topology also
def getMacEntry(self, macaddr):
try:
result = self.entryByMAC[macaddr]
except KeyError as e:
result = None
return result
def sendPing(self, macEntry, ipAddr):
r = arp() # Builds an "ETH/IP any-to-any ARP packet
r.opcode = arp.REQUEST
r.hwdst = macEntry.macaddr
r.protodst = ipAddr
# src is ETHER_ANY, IP_ANY
e = ethernet(type=ethernet.ARP_TYPE, src=r.hwsrc, dst=r.hwdst)
e.set_payload(r)
log.debug("%i %i sending ARP REQ to %s %s",
macEntry.dpid, macEntry.port, str(r.hwdst), str(r.protodst))
msg = of.ofp_packet_out(data = e.pack(),
action = of.ofp_action_output(port = macEntry.port))
if core.openflow.sendToDPID(macEntry.dpid, msg.pack()):
ipEntry = macEntry.ipAddrs[ipAddr]
ipEntry.pings.sent()
else:
# macEntry is stale, remove it.
log.debug("%i %i ERROR sending ARP REQ to %s %s",
macEntry.dpid, macEntry.port, str(r.hwdst), str(r.protodst))
del macEntry.ipAddrs[ipAddr]
return
def getSrcIPandARP(self, packet):
"""
This auxiliary function returns the source IPv4 address for packets that
have one (IPv4, ARPv4). Returns None otherwise.
"""
if isinstance(packet, ipv4):
log.debug("IP %s => %s",str(packet.srcip),str(packet.dstip))
return ( packet.srcip, False )
elif isinstance(packet, arp):
log.debug("ARP %s %s => %s",
{arp.REQUEST:"request",arp.REPLY:"reply"}.get(packet.opcode,
'op:%i' % (packet.opcode,)),
str(packet.protosrc), str(packet.protodst))
if packet.hwtype == arp.HW_TYPE_ETHERNET and \
packet.prototype == arp.PROTO_TYPE_IP and \
packet.protosrc != 0:
return ( packet.protosrc, True )
return ( None, False )
def updateIPInfo(self, pckt_srcip, macEntry, hasARP):
""" If there is IP info in the incoming packet, update the macEntry
accordingly. In the past we assumed a 1:1 mapping between MAC and IP
addresses, but removed that restriction later to accomodate cases
like virtual interfaces (1:n) and distributed packet rewriting (n:1)
"""
if pckt_srcip in macEntry.ipAddrs:
# that entry already has that IP
ipEntry = macEntry.ipAddrs[pckt_srcip]
ipEntry.refresh()
log.debug("%s already has IP %s, refreshing",
str(macEntry), str(pckt_srcip) )
else:
# new mapping
ipEntry = IpEntry(hasARP)
macEntry.ipAddrs[pckt_srcip] = ipEntry
log.info("Learned %s got IP %s", str(macEntry), str(pckt_srcip) )
if hasARP:
ipEntry.pings.received()
def _handle_GoingUpEvent (self, event):
self.listenTo(core.openflow)
log.debug("Up...")
def _handle_PacketIn (self, event):
"""
Populate MAC and IP tables based on incoming packets.
Handles only packets from ports identified as not switch-only.
If a MAC was not seen before, insert it in the MAC table;
otherwise, update table and enry.
If packet has a source IP, update that info for the macEntry (may require
removing the info from antoher entry previously with that IP address).
It does not forward any packets, just extract info from them.
"""
dpid = event.connection.dpid
inport = event.port
packet = event.parse()
if not packet.parsed:
log.warning("%i %i ignoring unparsed packet", dpid, inport)
return
if packet.type == ethernet.LLDP_TYPE: # Ignore LLDP packets
return
if packet.type == 34525:
return
# This should use Topology later
if core.openflow_discovery.isSwitchOnlyPort(dpid, inport):
# No host should be right behind a switch-only port
log.debug("%i %i ignoring packetIn at switch-only port", dpid, inport)
return
log.debug("PacketIn: %i %i ETH %s => %s",
dpid, inport, str(packet.src), str(packet.dst))
# Learn or update dpid/port/MAC info
macEntry = self.getMacEntry(packet.src)
if macEntry == None:
# there is no known host by that MAC
# should we raise a NewHostFound event (at the end)?
macEntry = MacEntry(dpid,inport,packet.src)
self.entryByMAC[packet.src] = macEntry
log.info("Learned %s", str(macEntry))
#/begin FOR ERCS Purpose
(srcip, hasARP) = self.getSrcIPandARP(packet.next)
self.raiseEvent(HostJoin, packet.src, srcip, dpid, inport)
#/end FOR ERCS Purpose
elif macEntry != (dpid, inport, packet.src):
# there is already an entry of host with that MAC, but host has moved
# should we raise a HostMoved event (at the end)?
log.info("Learned %s moved to %i %i", str(macEntry), dpid, inport)
try:
# if there has not been long since heard from it...
if time.time() - macEntry.lastTimeSeen < timeoutSec['entryMove']:
log.warning("Possible duplicate: %s at time %i, now (%i %i), time %i",
str(macEntry), macEntry.lastTimeSeen(),
dpid, inport, time.time())
# should we create a whole new entry, or keep the previous host info?
# for now, we keep it: IP info, answers pings, etc.
macEntry.dpid = dpid
macEntry.inport = inport
except Exception, e:
#TODO: Fix this later
pass
#/begin FOR ERCS Purpose
#TODO: Should we check for duplicates?
self.raiseEvent(HostMove, packet.src, dpid, inport)
#/end FOR ERCS Purpose
macEntry.refresh()
(pckt_srcip, hasARP) = self.getSrcIPandARP(packet.next)
if pckt_srcip != None:
self.updateIPInfo(pckt_srcip,macEntry,hasARP)
return
def _check_timeouts(self):
for macEntry in self.entryByMAC.values():
entryPinged = False
for ip_addr, ipEntry in macEntry.ipAddrs.items():
ipa = ip_addr
if ipEntry.expired():
if ipEntry.pings.failed():
del macEntry.ipAddrs[ip_addr]
log.info("Entry %s: IP address %s expired",
str(macEntry), str(ip_addr) )
#/begin FOR ERCS Purpose
self.raiseEvent(HostTimeout, macEntry.macaddr, ipa)
#/end FOR ERCS Purpose
else:
self.sendPing(macEntry,ip_addr)
ipEntry.pings.sent()
entryPinged = True
if macEntry.expired() and not entryPinged:
log.info("Entry %s expired", str(macEntry))
#/begin FOR ERCS Purpose
self.raiseEvent(HostTimeout, macEntry.macaddr, ip_addr)
#/end FOR ERCS Purpose
# sanity check: there should be no IP addresses left
if len(macEntry.ipAddrs) > 0:
for ip in macEntry.ipAddrs.keys():
log.warning("Entry %s expired but still had IP address %s",
str(macEntry), str(ip_addr) )
del macEntry.ipAddrs[ip_addr]
del self.entryByMAC[macEntry.macaddr]
def launch():
core.registerNew(host_tracker)
| gpl-3.0 | -1,858,211,703,470,429,200 | 33.663793 | 80 | 0.655558 | false |
jonaslu/thatswhatsup | python/bytecode/bytecode.py | 1 | 2290 | method_add = {
"code": [
# func add(x,y):
# return x + y
# STORE_NAME 0
# STORE_NAME 1
# LOAD_NAME 0
# LOAD_NAME 1
# ADD_TWO_VALUES
# RET
("STORE_NAME", 0),
("STORE_NAME", 1),
("LOAD_NAME", 0),
("LOAD_NAME", 1),
("ADD_TWO_VALUES", None),
("RET", None)
],
"constants": [],
"names": ["x", "y"],
"args": 2
}
method_main = {
"code": [
# a = 3
# b = 4
# print(add(a, b))
("LOAD_VALUE", 0),
("STORE_NAME", 0),
("LOAD_VALUE", 1),
("STORE_NAME", 1),
("LOAD_NAME", 0),
("LOAD_NAME", 1),
("CALL", 2),
("PRINT", None)
],
"constants": [3, 4, method_add],
"names": ["a", "b"],
"args": 0
}
class Frame:
def __init__(self, code_block):
self.code_block = code_block
self.stack = []
self.environment = {}
def run(self):
for step in self.code_block["code"]:
instruction, value = step
if instruction == "LOAD_VALUE":
num = self.code_block["constants"][value]
self.stack.append(num)
elif instruction == "LOAD_NAME":
var_name = self.code_block["names"][value]
var_value = self.environment[var_name]
self.stack.append(var_value)
elif instruction == "STORE_NAME":
var_name = self.code_block["names"][value]
self.environment[var_name] = self.stack.pop(0)
elif instruction == "ADD_TWO_VALUES":
op1, op2 = self.stack.pop(0), self.stack.pop(0)
self.stack.append(op1 + op2)
elif instruction == "PRINT":
print(self.stack.pop(0))
elif instruction == "CALL":
code_block = self.code_block["constants"][value]
next_frame = Frame(code_block)
next_frame.stack = self.stack[-2:]
self.stack = self.stack[:-2]
next_frame.run()
if len(next_frame.stack) > 0:
self.stack.append(next_frame.stack[0])
elif instruction == "RET":
break
main_frame = Frame(method_main)
main_frame.run()
| mit | -5,269,336,586,440,504,000 | 26.590361 | 64 | 0.460262 | false |
googleinterns/e2e-convrec | trainer/constants.py | 1 | 1779 | # Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared Constants."""
import os
INPUT_LENGTH = 512 #1033 - longest training input for redial
TARGET_LENGTH = 128 #159 - longest trainin target for redial
BASE_DIR = "gs://e2e_central"
DATA_DIR = os.path.join(BASE_DIR, "data")
MODELS_DIR = os.path.join(BASE_DIR, "models")
BASE_PRETRAINED_DIR = "gs://t5-data/pretrained_models"
RD_JSONL_DIR = "gs://e2e_central/data/redial/"
RD_JSONL_PATH = {
"train": os.path.join(RD_JSONL_DIR, "rd-train-formatted.jsonl"),
"validation": os.path.join(RD_JSONL_DIR, "rd-test-formatted.jsonl")
}
RD_COUNTS_PATH = os.path.join(DATA_DIR, "rd-counts.json")
RD_TSV_PATH = {
"train": os.path.join(DATA_DIR, "rd-train.tsv"),
"validation": os.path.join(DATA_DIR, "rd-validation.tsv")
}
ML_SEQ_TSV_PATH = {
"train": os.path.join(DATA_DIR, "ml-sequences-train.tsv"),
"validation": os.path.join(DATA_DIR, "ml-sequences-validation.tsv")
}
ML_TAGS_TSV_PATH = {
"train": os.path.join(DATA_DIR, "ml-tags-train.tsv"),
"validation": os.path.join(DATA_DIR, "ml-tags-validation.tsv")
}
ML_TAGS_MASKED_TSV_PATH = {
"train": os.path.join(DATA_DIR, "ml-tags-train-masked-3.tsv"),
"validation": os.path.join(DATA_DIR, "ml-tags-validation-masked-3.tsv")
}
| apache-2.0 | 3,666,765,548,474,841,000 | 39.431818 | 75 | 0.704328 | false |
MWisBest/PyBot | Commands/xbox/xbox.py | 1 | 2126 | ###########################################################################
## PyBot ##
## Copyright (C) 2015, Kyle Repinski ##
## Copyright (C) 2015, Andres Preciado (Glitch) ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
###########################################################################
import __main__, requests
from pybotutils import fixHTMLChars, strbetween
info = { "names" : [ "xbox", "xb" ], "access" : 0, "version" : 1 }
def command( message, user, recvfrom ):
txt = requests.get( "https://live.xbox.com/en-US/Profile?gamertag=" + message ).text
gamerscore = fixHTMLChars( strbetween( txt, "<div class=\"gamerscore\">", "</div>" ) )
lastseen = fixHTMLChars( strbetween( txt, "<div class=\"presence\">", "</div>" ) )
gamertag = fixHTMLChars( strbetween( txt, "<title>", "'s Profile" ) ) #get proper case of gamertag
if gamerscore != "":
__main__.sendMessage( gamertag + " :: Status: " + lastseen + " :: Gamerscore: " + gamerscore, recvfrom )
else:
__main__.sendMessage( message + " was not found.", recvfrom )
return True
| gpl-3.0 | -3,655,683,782,739,206,700 | 63.424242 | 106 | 0.489652 | false |
rancher/validation-tests | tests/v2_validation/cattlevalidationtest/core/test_services_lb_ssl_with_cert_from_volume.py | 1 | 27225 | from common_fixtures import * # NOQA
from test_storage_nfs_driver import check_for_nfs_driver
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
if_certs_available = pytest.mark.skipif(
not os.path.isdir(SSLCERT_SUBDIR),
reason='ssl cert file directory not found')
dom_list = readDataFile(SSLCERT_SUBDIR, "certlist.txt").rstrip().split(",")
test_cert_con = {}
cert_change_interval = os.environ.get('CATTLE_CERT_CHANGE_INTERVAL',
'45')
if_rancher_nfs_enabled = pytest.mark.skipif(
not os.environ.get('RANCHER_NFS_ENABLED'),
reason='Rancher NFS test not enabled')
volume_driver = "rancher-nfs"
service_names_list = ["lb-withselectorlinks", "s1", "s2"]
shared_vol_name = "mytestcerts" + "-" + random_str()
@pytest.fixture(scope='session')
def test_cert_container(client, request):
assert check_for_nfs_driver(client)
volume = client.create_volume(driver="rancher-nfs",
name=shared_vol_name)
volume = wait_for_condition(client,
volume,
lambda x: x.state == "inactive",
lambda x: 'Volume state is ' + x.state)
assert volume.state == "inactive"
stack_name = \
random_str().replace("-", "") + "-lb-vol-client"
dc_yml = readDataFile(SSLCERT_SUBDIR, "haproxycert_testclient_dc.yml")
dc_yml = dc_yml.replace("$volname", shared_vol_name)
with open(os.path.join(SSLCERT_SUBDIR, "m_haproxycert_testclient_dc.yml"),
"wt") as fout:
fout.write(dc_yml)
fout.close()
stack, services = create_stack_with_multiple_service_using_rancher_cli(
client, stack_name, ["testclient"],
SSLCERT_SUBDIR,
"m_haproxycert_testclient_dc.yml")
assert services["testclient"].state == "active"
service_cons = client.list_service(
uuid=services["testclient"].uuid,
include="instances",
).data
assert len(service_cons) == 1
assert len(service_cons[0].instances) == 1
con_info = client.list_container(
uuid=service_cons[0].instances[0].uuid,
include="hosts").data
assert len(con_info) == 1
test_cert_con["con"] = con_info[0]
test_cert_con["host"] = con_info[0].hosts[0].agentIpAddress
test_cert_con["port"] = "7890"
def remove_test_cert_client():
delete_all(client, [stack])
request.addfinalizer(remove_test_cert_client)
def create_lb_services_ssl_with_cert(client,
stack_name, service_names,
lb_port, label,
dc_yml_file, rc_yml_file,
default_domain=None, domains=None):
upload_initial_certs(domains, default_domain)
client_port = lb_port + "0"
dc_yml = readDataFile(SSLCERT_SUBDIR, dc_yml_file)
rc_yml = readDataFile(SSLCERT_SUBDIR, rc_yml_file)
dc_yml = dc_yml.replace("$lbimage", get_lb_image_version(client))
dc_yml = dc_yml.replace("$label", label)
dc_yml = dc_yml.replace("$port", lb_port)
dc_yml = dc_yml.replace("$volname", shared_vol_name)
rc_yml = rc_yml.replace("$label", label)
rc_yml = rc_yml.replace("$port", lb_port)
modified_dc_yml_file = "lb_cert_dc.yml"
modified_rc_yml_file = "lb_cert_rc.yml"
with open(os.path.join(SSLCERT_SUBDIR, modified_dc_yml_file),
"wt") as fout:
fout.write(dc_yml)
fout.close()
with open(os.path.join(SSLCERT_SUBDIR, modified_rc_yml_file),
"wt") as fout:
fout.write(rc_yml)
fout.close()
stack, services = create_stack_with_multiple_service_using_rancher_cli(
client, stack_name, service_names,
SSLCERT_SUBDIR,
modified_dc_yml_file,
modified_rc_yml_file)
lb_service = services[service_names[0]]
target_services = [services[service_names[1]], services[service_names[2]]]
test_ssl_client_con = create_client_container_for_ssh(client_port)
validate_lb_services_ssl(client, test_ssl_client_con,
target_services, lb_service,
lb_port, default_domain, domains)
return stack, target_services, lb_service, test_ssl_client_con
def upload_initial_certs(cert_list, default_cert=None):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(
test_cert_con["host"], username="root",
password="root", port=int(test_cert_con["port"]))
cmd = "mkdir -p /certs/mycerts;"
cmd += "cd /certs/mycerts;rm -rf *;"
cmd += "mkdir -p /certs/default.com;"
cmd += "cd /certs/default.com;rm -rf *;"
for domain_name in cert_list:
cmd += "cd /certs/mycerts;"
cmd += cmd_for_cert_creation(domain_name)
cmd += "cd /certs/default.com;"
if default_cert is not None:
cmd += cmd_for_cert_creation(default_cert)
print(cmd)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
logger.info(response)
def upload_additional_certs(cert_list=None, default_cert=None):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(
test_cert_con["host"], username="root",
password="root", port=int(test_cert_con["port"]))
if cert_list is not None:
for domain_name in cert_list:
cmd = "cd /certs/mycerts;"
cmd += cmd_for_cert_creation(domain_name)
if default_cert is not None:
cmd = "cd /certs/default.com;"
cmd += cmd_for_cert_creation(default_cert)
print(cmd)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
logger.info(response)
def edit_existing_certs(existing_cert, modified_cert, is_default_cert=False):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(
test_cert_con["host"], username="root",
password="root", port=int(test_cert_con["port"]))
cert, key, certChain = get_cert_for_domain(modified_cert)
cert_file = existing_cert + ".crt"
key_file = existing_cert + ".key"
if is_default_cert:
cmd = "cd /certs/default.com/" + existing_cert + ";"
else:
cmd = "cd /certs/mycerts/" + existing_cert + ";"
cmd += 'echo "' + cert + '" > ' + cert_file + ";"
cmd += 'echo "' + key + '" > ' + key_file + ";"
print(cmd)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
logger.info(response)
def delete_existing_certs(cert_list=None, default_cert=None):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(
test_cert_con["host"], username="root",
password="root", port=int(test_cert_con["port"]))
if cert_list is not None:
for domain_name in cert_list:
cmd = "cd /certs/mycerts;"
cmd += "rm -rf " + domain_name + ";"
if default_cert is not None:
cmd = "cd /certs/default.com;"
cmd += "rm -rf " + default_cert + ";"
print(cmd)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
logger.info(response)
def cmd_for_cert_creation(domain_name):
cert, key, certChain = get_cert_for_domain(domain_name)
cmd = "mkdir " + domain_name + ";"
cmd += "cd " + domain_name + ";"
cert_file = domain_name + ".crt"
key_file = domain_name + ".key"
cmd += 'echo "' + cert + '" > ' + cert_file + ";"
cmd += 'echo "' + key + '" > ' + key_file + ";"
cmd += "ln -s " + key_file + " fullchain.pem;"
cmd += "ln -s " + cert_file + " privkey.pem;"
return cmd
def validate_lb_services_ssl(client, test_ssl_client_con,
services, lb_service, ssl_port,
default_domain=None, domains=None):
wait_for_lb_service_to_become_active(client,
services, lb_service)
supported_domains = []
if default_domain is not None:
supported_domains.append(default_domain)
if domains:
supported_domains.extend(domains)
for domain in supported_domains:
validate_lb_service(client,
lb_service, ssl_port,
[services[0]],
"test1.com", "/service1.html", domain,
test_ssl_client_con)
validate_lb_service(client,
lb_service, ssl_port, [services[1]],
"test2.com", "/service2.html", domain,
test_ssl_client_con)
@if_rancher_nfs_enabled
def test_lb_ssl_with_certs_and_default_cert(client,
socat_containers,
rancher_cli_container,
test_cert_container):
default_domain = dom_list[0]
domain_list = [dom_list[1], dom_list[2]]
port = "5656"
label = "test1"
stack_name = \
random_str().replace("-", "") + "-withcertanddefaultcert"
env, services, lb_service, test_ssl_client_con = \
create_lb_services_ssl_with_cert(client,
stack_name,
service_names_list,
port, label,
"haproxycert_dc.yml",
"haproxycert_rc.yml",
default_domain, domain_list)
# Attempting to access LB rules with cert other supported default
# cert/cert list should return certificate error
cert = dom_list[3]
validate_cert_error(client, lb_service, port, cert,
default_domain, cert,
test_ssl_client_con=test_ssl_client_con)
delete_all(client, [env, test_ssl_client_con["container"]])
@if_rancher_nfs_enabled
def test_lb_ssl_with_certs_and_default_cert_scaleup_lb(client,
socat_containers,
rancher_cli_container,
test_cert_container):
default_domain = dom_list[0]
domain_list = [dom_list[1], dom_list[2]]
port = "5655"
label = "test2"
stack_name = \
random_str().replace("-", "") + "-withcertanddefaultcert"
env, services, lb_service, test_ssl_client_con = \
create_lb_services_ssl_with_cert(client,
stack_name,
service_names_list,
port, label,
"haproxycert_dc.yml",
"haproxycert_rc.yml",
default_domain, domain_list)
# Attempting to access LB rules with cert other supported default
# cert/cert list should return certificate error
cert = dom_list[3]
validate_cert_error(client, lb_service, port, cert,
default_domain, cert,
test_ssl_client_con=test_ssl_client_con)
final_lb_scale = 2
lb_service = client.update(lb_service, scale=final_lb_scale,
name=lb_service.name)
lb_service = client.wait_success(lb_service, 120)
assert lb_service.state == "active"
assert lb_service.scale == final_lb_scale
validate_lb_services_ssl(client, test_ssl_client_con,
services, lb_service,
port, default_domain, domain_list)
# Attempting to access LB rules with cert other supported default
# cert/cert list should return certificate error
cert = dom_list[3]
validate_cert_error(client, lb_service, port, cert,
default_domain, cert,
test_ssl_client_con=test_ssl_client_con)
delete_all(client, [env, test_ssl_client_con["container"]])
@if_rancher_nfs_enabled
def test_lb_ssl_with_certs_and_default_cert_scaleup_target(
client, socat_containers,
rancher_cli_container,
test_cert_container):
default_domain = dom_list[0]
domain_list = [dom_list[1], dom_list[2]]
port = "5654"
label = "test3"
stack_name = \
random_str().replace("-", "") + "-withcertanddefaultcert"
env, services, lb_service, test_ssl_client_con = \
create_lb_services_ssl_with_cert(client,
stack_name,
service_names_list,
port, label,
"haproxycert_dc.yml",
"haproxycert_rc.yml",
default_domain, domain_list)
# Attempting to access LB rules with cert other supported default
# cert/cert list should return certificate error
cert = dom_list[3]
validate_cert_error(client, lb_service, port, cert,
default_domain, cert,
test_ssl_client_con=test_ssl_client_con)
# Scale up target service
final_service_scale = 3
services[0] = client.update(services[0], scale=final_service_scale,
name=services[0].name)
services[0] = client.wait_success(services[0], 120)
assert services[0].state == "active"
assert services[0].scale == final_service_scale
validate_lb_services_ssl(client, test_ssl_client_con,
services, lb_service,
port, default_domain, domain_list)
# Attempting to access LB rules with cert other supported default
# cert/cert list should return certificate error
cert = dom_list[3]
validate_cert_error(client, lb_service, port, cert,
default_domain, cert,
test_ssl_client_con=test_ssl_client_con)
delete_all(client, [env, test_ssl_client_con["container"]])
@if_rancher_nfs_enabled
def test_lb_ssl_with_default_cert_add(client,
socat_containers,
rancher_cli_container,
test_cert_container):
default_domain = dom_list[0]
domain_list = [dom_list[1], dom_list[2]]
port = "5657"
label = "test3"
stack_name = \
random_str().replace("-", "") + "-withcertanddefaultcert-addcert"
env, services, lb_service, test_ssl_client_con = \
create_lb_services_ssl_with_cert(client,
stack_name,
service_names_list,
port, label,
"haproxycert_dc.yml",
"haproxycert_rc.yml",
default_domain, domain_list)
cert = dom_list[3]
validate_cert_error(client, lb_service, port, cert,
default_domain, cert,
test_ssl_client_con=test_ssl_client_con)
# add cert
upload_additional_certs(cert_list=[cert], default_cert=None)
time.sleep(int(cert_change_interval))
# Should be able to access LB using the newly added cert
validate_lb_service(client,
lb_service, port, [services[0]],
"test1.com", "/service1.html", cert,
test_ssl_client_con)
delete_all(client, [env, test_ssl_client_con["container"]])
@if_rancher_nfs_enabled
def test_lb_ssl_with_default_cert_delete(client,
socat_containers,
rancher_cli_container,
test_cert_container):
default_domain = dom_list[0]
domain_list = [dom_list[1], dom_list[2]]
port = "5658"
label = "test5"
stack_name = \
random_str().replace("-", "") + "-withcertanddefaultcert-deletecert"
env, services, lb_service, test_ssl_client_con = \
create_lb_services_ssl_with_cert(client,
stack_name,
service_names_list,
port, label,
"haproxycert_dc.yml",
"haproxycert_rc.yml",
default_domain, domain_list)
# Delete cert
cert = dom_list[2]
delete_existing_certs(cert_list=[cert], default_cert=None)
time.sleep(int(cert_change_interval))
# Existing certs should continue to work
validate_lb_service(client,
lb_service, port, [services[0]],
"test1.com", "/service1.html", dom_list[0],
test_ssl_client_con)
validate_lb_service(client,
lb_service, port, [services[0]],
"test1.com", "/service1.html", dom_list[1],
test_ssl_client_con)
# Attempting to access LB rules using the delete cert should return
# certificate error
cert = dom_list[2]
validate_cert_error(client, lb_service, port, cert,
default_domain, cert,
test_ssl_client_con=test_ssl_client_con)
delete_all(client, [env, test_ssl_client_con["container"]])
@if_rancher_nfs_enabled
def test_lb_ssl_with_default_cert_edit(client,
socat_containers,
rancher_cli_container,
test_cert_container):
default_domain = dom_list[0]
domain_list = [dom_list[1], dom_list[2]]
port = "5659"
label = "test6"
stack_name = \
random_str().replace("-", "") + "-withcertanddefaultcert-editcert"
env, services, lb_service, test_ssl_client_con = \
create_lb_services_ssl_with_cert(client,
stack_name,
service_names_list,
port, label,
"haproxycert_dc.yml",
"haproxycert_rc.yml",
default_domain, domain_list)
# Edit cert contents to point to a different domain
existing_cert = dom_list[2]
modified_cert = dom_list[3]
edit_existing_certs(existing_cert, modified_cert, is_default_cert=False)
time.sleep(int(cert_change_interval))
# Existing certs should continue to work
validate_lb_service(client,
lb_service, port, [services[0]],
"test1.com", "/service1.html", dom_list[0],
test_ssl_client_con)
validate_lb_service(client,
lb_service, port, [services[0]],
"test1.com", "/service1.html", dom_list[1],
test_ssl_client_con)
# Attempting to access LB rules using the new value for the modified cert
# should succeed
cert = dom_list[3]
validate_lb_service(client,
lb_service, port, [services[0]],
"test1.com", "/service1.html", cert,
test_ssl_client_con)
# Attempting to access LB rules using the older value of the modified cert
# should fail
cert = dom_list[2]
validate_cert_error(client, lb_service, port, cert,
default_domain, cert,
test_ssl_client_con=test_ssl_client_con)
delete_all(client, [env, test_ssl_client_con["container"]])
@if_rancher_nfs_enabled
def test_lb_ssl_delete_default_cert(client,
socat_containers,
rancher_cli_container,
test_cert_container):
default_domain = dom_list[0]
domain_list = [dom_list[1], dom_list[2]]
port = "5660"
label = "test7"
stack_name = \
random_str().replace("-", "") + "-withcertanddefaultcert-deletecert"
env, services, lb_service, test_ssl_client_con = \
create_lb_services_ssl_with_cert(client,
stack_name,
service_names_list,
port, label,
"haproxycert_dc.yml",
"haproxycert_rc.yml",
default_domain, domain_list)
# Delete default cert
cert = dom_list[0]
delete_existing_certs(cert_list=None, default_cert=cert)
time.sleep(int(cert_change_interval))
# Existing certs should continue to work
validate_lb_service(client,
lb_service, port, [services[0]],
"test1.com", "/service1.html", dom_list[1],
test_ssl_client_con)
validate_lb_service(client,
lb_service, port, [services[0]],
"test1.com", "/service1.html", dom_list[2],
test_ssl_client_con)
# Attempting to access LB rules using the deleted cert should return
# certificate error (strict sni check)
cert = dom_list[0]
validate_cert_error(client, lb_service, port, cert,
None, cert,
test_ssl_client_con=test_ssl_client_con,
strict_sni_check=True)
# Attempting to access LB rules using certs other than any certs in the
# cert list should return certificate error (strict sni check)
cert = dom_list[3]
validate_cert_error(client, lb_service, port, cert,
None, cert,
test_ssl_client_con=test_ssl_client_con,
strict_sni_check=True)
delete_all(client, [env, test_ssl_client_con["container"]])
@if_rancher_nfs_enabled
def test_lb_ssl_edit_default_cert(client,
socat_containers,
rancher_cli_container,
test_cert_container):
default_domain = dom_list[0]
domain_list = [dom_list[1], dom_list[2]]
port = "5661"
label = "test8"
stack_name = \
random_str().replace("-", "") + "-withcertanddefaultcert-editcert"
env, services, lb_service, test_ssl_client_con = \
create_lb_services_ssl_with_cert(client,
stack_name,
service_names_list,
port, label,
"haproxycert_dc.yml",
"haproxycert_rc.yml",
default_domain, domain_list)
# Edit cert contents to point to a different domain
existing_cert = dom_list[0]
modified_cert = dom_list[3]
edit_existing_certs(existing_cert, modified_cert, is_default_cert=True)
time.sleep(int(cert_change_interval))
# Existing certs should continue to work
validate_lb_service(client,
lb_service, port, [services[0]],
"test1.com", "/service1.html", dom_list[1],
test_ssl_client_con)
validate_lb_service(client,
lb_service, port, [services[0]],
"test1.com", "/service1.html", dom_list[2],
test_ssl_client_con)
# Attempting to access LB rules using the new value for the modified cert
# should succeed
cert = dom_list[3]
validate_lb_service(client,
lb_service, port, [services[0]],
"test1.com", "/service1.html", cert,
test_ssl_client_con)
# Attempting to access LB rules using the older value of the modified cert
# should fail and new default cert should be presented to the user
default_domain = dom_list[3]
cert = dom_list[0]
validate_cert_error(client, lb_service, port, cert,
default_domain, cert,
test_ssl_client_con=test_ssl_client_con)
cert = dom_list[4]
validate_cert_error(client, lb_service, port, cert,
default_domain, cert,
test_ssl_client_con=test_ssl_client_con)
delete_all(client, [env, test_ssl_client_con["container"]])
@if_rancher_nfs_enabled
def test_lb_ssl_add_default_cert(client,
socat_containers,
rancher_cli_container,
test_cert_container):
domain_list = [dom_list[1], dom_list[2]]
port = "5662"
label = "test9"
stack_name = \
random_str().replace("-", "") + "-withcert-add-defaultcert"
env, services, lb_service, test_ssl_client_con = \
create_lb_services_ssl_with_cert(client,
stack_name,
service_names_list,
port, label,
"haproxycert_dc.yml",
"haproxycert_rc.yml",
None, domain_list)
# Attempting to access LB rules using any cert other than certs from
# cert list should result in certificate error (strict sni check)
cert = dom_list[0]
validate_cert_error(client, lb_service, port, cert,
None, cert,
test_ssl_client_con=test_ssl_client_con,
strict_sni_check=True)
cert = dom_list[3]
validate_cert_error(client, lb_service, port, cert,
None, cert,
test_ssl_client_con=test_ssl_client_con,
strict_sni_check=True)
default_domain = dom_list[0]
# add default cert
upload_additional_certs(cert_list=[], default_cert=default_domain)
time.sleep(int(cert_change_interval))
# Attempting to access LB rules using the newly added default cert
# should succeed
validate_lb_service(client,
lb_service, port, [services[0]],
"test1.com", "/service1.html", default_domain,
test_ssl_client_con)
# Attempting to access LB rules using any cert other than certs from
# cert list should result in certificate error with default cert
# being presented to the user
cert = dom_list[3]
validate_cert_error(client, lb_service, port, cert,
default_domain, cert,
test_ssl_client_con=test_ssl_client_con)
delete_all(client, [env, test_ssl_client_con["container"]])
| apache-2.0 | 4,385,412,779,574,357,500 | 37.837375 | 78 | 0.53124 | false |
pneff/wsgiservice | tests/test_application.py | 1 | 20489 | import io
from datetime import timedelta
from mox3 import mox
import wsgiservice
import wsgiservice.application
import wsgiservice.exceptions
from webob import Request
def test_getapp():
"""get_app returns a list of resources from the dictionary."""
app = wsgiservice.get_app(globals())
print(app)
print(app._resources)
assert isinstance(app, wsgiservice.application.Application)
assert len(app._resources) == 7
resources = (Resource1, Resource2, Resource3, Resource4, Resource5,
Resource6)
assert app._resources[0] in resources
assert app._resources[1] in resources
assert app._resources[2] in resources
assert app._resources[3] in resources
assert app._resources[4] in resources
assert app._resources[5] in resources
def test_app_handle_404():
"""Application returns a 404 status code if no resource is found."""
app = wsgiservice.get_app(globals())
req = Request.blank('/foo', {'HTTP_ACCEPT': 'text/xml'})
res = app._handle_request(req)
print(res)
assert res.status == '404 Not Found'
assert res.body == b'<response><error>' \
b'The requested resource does not exist.</error></response>'
assert res.headers['Content-Type'] == 'text/xml; charset=UTF-8'
def test_app_handle_method_not_allowed():
"""Application returns 405 for known but unimplemented methods."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res2', {'REQUEST_METHOD': 'GET'})
res = app._handle_request(req)
print(res)
assert res.status == '405 Method Not Allowed'
assert res.body == b''
assert res._headers['Allow'] == 'OPTIONS, POST, PUT'
def test_app_handle_method_not_known():
"""Application returns 501 for unknown and unimplemented methods."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res2', {'REQUEST_METHOD': 'PATCH'})
res = app._handle_request(req)
print(res)
assert res.status == '501 Not Implemented'
assert res.body == b''
assert res._headers['Allow'] == 'OPTIONS, POST, PUT'
def test_app_handle_response_201_abs():
"""raise_201 used the location header directly if it is absolute."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res2', {'REQUEST_METHOD': 'POST'})
res = app._handle_request(req)
print(res)
assert res.status == '201 Created'
assert res.body == b''
assert res.location == '/res2/test'
def test_app_handle_response_201_rel():
"""raise_201 adds relative location header to the current request path."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res2', {'REQUEST_METHOD': 'PUT'})
res = app._handle_request(req)
print(res)
assert res.status == '201 Created'
assert res.body == b''
assert res.location == '/res2/foo'
def test_app_handle_response_201_ext():
"""raise_201 ignores extension in the current path."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res2.json', {'REQUEST_METHOD': 'PUT'})
res = app._handle_request(req)
print(res)
assert res.status == '201 Created'
assert res.body == b''
assert res.location == '/res2/foo'
def test_app_handle_options():
"""Resource provides a good default for the OPTIONS method."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res2', {'REQUEST_METHOD': 'OPTIONS'})
res = app._handle_request(req)
print(res)
assert res.status == '200 OK'
assert res._headers['Allow'] == 'OPTIONS, POST, PUT'
def test_app_get_simple():
"""Application handles GET request and ignored POST data in that case."""
app = wsgiservice.get_app(globals())
body = b'foo=42&baz=foobar'
req = Request.blank('/res1/theid', {
'CONTENT_LENGTH': str(len(body)),
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'wsgi.input': io.BytesIO(body)})
res = app._handle_request(req)
print(res)
assert res.status == '200 OK'
assert res._headers['Content-MD5'] == '8d5a8ef21b4afff94c937faabfdf11fa'
assert res.body == b"<response>GET was called with id theid, " \
b"foo None</response>"
def test_app_head_revert_to_get_simple():
"""Application converts a HEAD to a GET request but doesn't send body."""
app = wsgiservice.get_app(globals())
body = b'foo=42&baz=foobar'
req = Request.blank('/res1/theid', {
'REQUEST_METHOD': 'HEAD',
'CONTENT_LENGTH': str(len(body)),
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'wsgi.input': io.BytesIO(body)})
res = app._handle_request(req)
print(res)
assert res.status == '200 OK'
assert res.body == b''
def test_app_post_simple():
"""Application handles normal POST request."""
app = wsgiservice.get_app(globals())
body = b'foo=42&baz=foobar'
req = Request.blank('/res1/theid', {
'REQUEST_METHOD': 'POST', 'CONTENT_LENGTH': str(len(body)),
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'wsgi.input': io.BytesIO(body)})
res = app._handle_request(req)
print(res)
assert res.status == '200 OK'
assert res.body == b"<response>POST was called with id theid, " \
b"foo 42</response>"
def test_app_wsgi():
"""Application instance works as a WSGI application."""
app = wsgiservice.get_app(globals())
env = Request.blank('/res1/theid.json').environ
start_response = mox.MockAnything()
start_response('200 OK', [('Content-Length', '40'),
('Content-Type', 'application/json; charset=UTF-8'),
('Content-MD5', 'd6fe631718727b542d2ecb70dfd41e4b')])
mox.Replay(start_response)
res = app(env, start_response)
print(res)
mox.Verify(start_response)
assert res == [b'"GET was called with id theid, foo None"']
def test_validation_method():
"""Resource validates a method parameter which was set on the method."""
inst = Resource1(None, None, None)
inst.validate_param(inst.POST, 'foo', '9')
def test_validation_class():
"""Resource validates a method parameter which was set on the class."""
inst = Resource1(None, None, None)
inst.validate_param(inst.GET, 'id', 'anyid')
def test_validation_with_re_none_value():
"""Resource rejects empty values if a validation is defined."""
inst = Resource1(None, None, None)
try:
inst.validate_param(inst.GET, 'id', None)
except wsgiservice.exceptions.ValidationException as e:
print(e)
assert str(e) == 'Value for id must not be empty.'
else:
assert False, "Expected an exception!"
def test_validation_with_re_mismatch():
"""Resource rejects invalid values by regular expression."""
inst = Resource1(None, None, None)
try:
inst.validate_param(inst.GET, 'id', 'fo')
except wsgiservice.exceptions.ValidationException as e:
print(e)
assert str(e) == 'id value fo does not validate.'
else:
assert False, "Expected an exception!"
def test_validation_with_re_mismatch_toolong():
"""Resource rejects invalid values by regular expression."""
inst = Resource1(None, None, None)
try:
inst.validate_param(inst.GET, 'id', 'fooobarrr')
except wsgiservice.exceptions.ValidationException as e:
print(e)
assert str(e) == 'id value fooobarrr does not validate.'
else:
assert False, "Expected an exception!"
def test_with_expires():
"""expires decorator correctly sets the Cache-Control header."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res3')
res = app._handle_request(req)
print(str(res))
print(res._headers)
assert res._headers['Cache-Control'] == 'max-age=86400'
def test_with_expires_vary():
"""expires decorator can set the Vary header."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res6/uid')
res = app._handle_request(req)
print(str(res))
print(res._headers)
assert res._headers['Cache-Control'] == 'max-age=86400'
vary = res._headers['Vary'].split(', ')
assert len(vary) == 2
assert 'Authorization' in vary
assert 'Accept' in vary
def test_with_expires_calculations():
"""expires decorator correctly sets the Expires header."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4')
res = app._handle_request(req)
print(res._headers)
assert res._headers['Cache-Control'] == 'max-age=138'
assert res._headers['Expires'] == 'Mon, 20 Apr 2009 17:55:45 GMT'
def test_with_expires_calculations_double_wrapped():
"""Wrapped expires decorators work by just using the last one."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4', {'REQUEST_METHOD': 'POST'})
res = app._handle_request(req)
print(str(res))
print(res._headers)
assert res._headers['Cache-Control'] == 'max-age=138'
assert res._headers['Expires'] == 'Mon, 20 Apr 2009 17:55:45 GMT'
def test_etag_generate():
"""ETags are calculated by adding the extension to the custom etag."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid')
res = app._handle_request(req)
print(res._headers)
assert res._headers['ETag'] == '"myid_xml"'
def test_etag_generate_json():
"""ETags are calculated by adding the extension to the custom etag."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid', {'HTTP_ACCEPT': 'application/json'})
res = app._handle_request(req)
print(res)
assert res._headers['ETag'] == '"myid_json"'
def test_etag_generate_json_ext():
"""ETags are calculated by adding the extension to the custom etag."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4.json?id=myid')
res = app._handle_request(req)
print(res)
assert res._headers['ETag'] == '"myid_json"'
def test_etag_if_match_false():
"""A GET request with a non-matching If-Match returns 412."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid',
{'HTTP_IF_MATCH': '"otherid"'})
res = app._handle_request(req)
print(res)
assert res._headers['ETag'] == '"myid_xml"'
assert res.status == '412 Precondition Failed'
def test_etag_if_match_true():
"""A GET request with a matching If-Match passes."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid', {'HTTP_IF_MATCH': '"myid_xml"'})
res = app._handle_request(req)
print(res)
assert res._headers['ETag'] == '"myid_xml"'
assert res.status == '200 OK'
def test_etag_if_match_not_set():
"""A GET request without an If-Match header passes."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid')
res = app._handle_request(req)
print(res)
assert res._headers['ETag'] == '"myid_xml"'
assert res.status == '200 OK'
def test_etag_if_none_match_get_true():
"""A GET request with a matching If-None-Match returns 304."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid', {'HTTP_IF_NONE_MATCH': '"myid_xml"'})
res = app._handle_request(req)
print(res)
assert res.body == b''
assert res._headers['ETag'] == '"myid_xml"'
assert res.status == '304 Not Modified'
assert 'Content-Type' not in res.headers
def test_etag_if_none_match_head_true():
"""A HEAD request with a matching If-None-Match returns 304."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid',
{'HTTP_IF_NONE_MATCH': '"myid_xml"', 'REQUEST_METHOD': 'HEAD'})
res = app._handle_request(req)
print(res)
assert res.body == b''
assert res._headers['ETag'] == '"myid_xml"'
assert res.status == '304 Not Modified'
def test_etag_if_none_match_post_true():
"""A POST request with a matching If-None-Match returns 412."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid',
{'HTTP_IF_NONE_MATCH': '"myid_xml"', 'REQUEST_METHOD': 'POST'})
res = app._handle_request(req)
print(res)
assert res._headers['ETag'] == '"myid_xml"'
assert res.status == '412 Precondition Failed'
def test_etag_if_none_match_false():
"""A GET request with a non-matching If-None-Match executes normally."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid',
{'HTTP_IF_NONE_MATCH': '"otherid"'})
res = app._handle_request(req)
print(res)
assert res._headers['ETag'] == '"myid_xml"'
assert res.status == '200 OK'
def test_modified_generate():
"""Resource generates a good Last-Modified response header."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid')
res = app._handle_request(req)
print(res._headers)
assert res._headers['Last-Modified'] == 'Fri, 01 May 2009 14:30:00 GMT'
def test_if_modified_since_false():
"""A GET request with a matching If-Modified-Since returns 304."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid',
{'HTTP_IF_MODIFIED_SINCE': 'Fri, 01 May 2009 14:30:00 GMT'})
res = app._handle_request(req)
print(res)
assert res.body == b''
assert res._headers['Last-Modified'] == 'Fri, 01 May 2009 14:30:00 GMT'
assert res._headers['ETag'] == '"myid_xml"'
assert res.status == '304 Not Modified'
def test_if_modified_since_true():
"""A GET request with an outdated If-Modified-Since passes."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid',
{'HTTP_IF_MODIFIED_SINCE': 'Fri, 01 May 2009 14:18:10 GMT'})
res = app._handle_request(req)
print(res)
assert res._headers['Last-Modified'] == 'Fri, 01 May 2009 14:30:00 GMT'
assert res.status == '200 OK'
def test_if_unmodified_since_false():
"""A GET request with an outdated If-Unmodified-Since returns 412."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid',
{'HTTP_IF_UNMODIFIED_SINCE': 'Fri, 01 May 2009 12:30:00 GMT'})
res = app._handle_request(req)
print(res)
assert res._headers['ETag'] == '"myid_xml"'
assert res._headers['Last-Modified'] == 'Fri, 01 May 2009 14:30:00 GMT'
assert res.status == '412 Precondition Failed'
def test_if_unmodified_since_false_head():
"""A HEAD request with an outdated If-Unmodified-Since returns 412."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid',
{'HTTP_IF_UNMODIFIED_SINCE': 'Thu, 30 Apr 2009 19:30:00 GMT',
'REQUEST_METHOD': 'HEAD'})
res = app._handle_request(req)
print(res)
assert res.body == b''
assert res._headers['ETag'] == '"myid_xml"'
assert res._headers['Last-Modified'] == 'Fri, 01 May 2009 14:30:00 GMT'
assert res.status == '412 Precondition Failed'
def test_if_unmodified_since_false_post():
"""A POST request with an outdated If-Unmodified-Since returns 412."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid',
{'HTTP_IF_UNMODIFIED_SINCE': 'Thu, 30 Apr 2009 19:30:00 GMT',
'REQUEST_METHOD': 'POST'})
res = app._handle_request(req)
print(res)
print(res.status)
assert res._headers['ETag'] == '"myid_xml"'
assert res._headers['Last-Modified'] == 'Fri, 01 May 2009 14:30:00 GMT'
assert res.status == '412 Precondition Failed'
def test_if_unmodified_since_true():
"""A GET request with a current If-Unmodified-Since returns 200."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid',
{'HTTP_IF_UNMODIFIED_SINCE': 'Fri, 01 May 2009 14:30:00 GMT',
'REQUEST_METHOD': 'POST'})
res = app._handle_request(req)
print(res)
assert res._headers['ETag'] == '"myid_xml"'
assert res._headers['Last-Modified'] == 'Fri, 01 May 2009 14:30:00 GMT'
assert res.status == '200 OK'
def test_verify_content_md5_invalid():
"""A request with a body that does not match Content-MD5 returns 400."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res1/theid', {
'HTTP_CONTENT_MD5': '89d5739baabbbe65be35cbe61c88e06d',
'wsgi.input': io.BytesIO(b'foobar')})
res = app._handle_request(req)
print(res)
print(res.status)
print(res._headers)
assert 'ETag' not in res._headers
assert 'Last-Modified' not in res._headers
assert res.status == '400 Bad Request'
assert res.body == b'<response><error>Invalid Content-MD5 request ' \
b'header.</error></response>'
def test_verify_content_md5_valid():
"""A request with a body that matches Content-MD5 passes."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res1/theid', {
'HTTP_CONTENT_MD5': '89d5739baabbbe65be35cbe61c88e06d',
})
req.body_file = io.BytesIO(b'Foobar')
res = app._handle_request(req)
print(res)
assert res.status == '200 OK'
def test_exception_json():
"""An exception is serialized as a dictionary in JSON."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res5?throw=1', {'HTTP_ACCEPT': 'application/json'})
res = app._handle_request(req)
print(res)
assert res.status == '500 Internal Server Error'
assert res.body == b'{"error": "Some random exception."}'
def test_exception_xml():
"""An exception is serialized as an error response in XML."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res5?throw=1')
res = app._handle_request(req)
print(res)
assert res.status == '500 Internal Server Error'
assert res.body == b'<response><error>Some random exception.' \
b'</error></response>'
def test_res6_default():
"""Resource6 works normally for keys which exist on the resource."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res6/uid')
res = app._handle_request(req)
print(res)
assert res.status == '200 OK'
assert res.body == b'<response>works</response>'
def test_notfound_xml():
"""Requests that cause a NOT_FOUND exception return 404."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res6/foo')
res = app._handle_request(req)
print(res)
assert res.status == '404 Not Found'
assert res.body == b'<response><error>Not Found</error></response>'
class AbstractResource(wsgiservice.Resource):
"""This resource should not be added to the application as it doesn't
have a path. (Verified by test_getapp)
"""
class Resource1(wsgiservice.Resource):
_path = '/res1/{id}'
_validations = {'id': {'re': '[a-z]{5}'}}
def GET(self, id, foo):
return 'GET was called with id {0}, foo {1}'.format(id, foo)
def POST(self, id, foo):
return 'POST was called with id {0}, foo {1}'.format(id, foo)
POST._validations = {'foo': {'re': '[0-9]+'}}
class Resource2(wsgiservice.Resource):
_path = '/res2'
def POST(self):
wsgiservice.raise_201(self, '/res2/test')
def PUT(self):
wsgiservice.raise_201(self, 'foo')
class Resource3(AbstractResource):
_path = '/res3'
@wsgiservice.expires(timedelta(days=1))
def GET(self, id):
return "Called with id: {0}".format(id)
class Resource4(wsgiservice.Resource):
_path = '/res4'
@wsgiservice.expires(138, currtime=lambda: 1240250007)
def GET(self, id):
return "Called with id: {0}".format(id)
@wsgiservice.expires(139, currtime=lambda: 1240250007)
@wsgiservice.expires(138, currtime=lambda: 1240250007)
def POST(self, id):
return "POST Called with id: {0}".format(id)
def get_etag(self, id):
if id:
return id[0] + '"' + id[1:]
def get_last_modified(self, id):
from webob import UTC
from datetime import datetime
return datetime(2009, 5, 1, 14, 30, tzinfo=UTC)
class Resource5(wsgiservice.Resource):
_path = '/res5'
def GET(self, throw):
if throw == '1':
raise Exception("Some random exception.")
else:
return 'Throwing nothing'
class Resource6(wsgiservice.Resource):
class DummyException(Exception):
pass
NOT_FOUND = (KeyError, DummyException)
_path = '/res6/{id}'
items = {'uid': 'works'}
@wsgiservice.expires(timedelta(days=1), vary=['Authorization'])
def GET(self, id):
return self.items[id]
class NotAResource():
def __getattr__(self, name):
return name
not_a_class = NotAResource()
| bsd-2-clause | 3,326,394,759,695,108,600 | 32.699013 | 78 | 0.639123 | false |
emailgregn/djtempl | djtempl/cli.py | 1 | 1278 | from djtempl import render_files
import argparse
import sys
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--template",
metavar='file',
default='Dockerfile.tmpl',
type=argparse.FileType(mode='r'), # 2.7 argparse.FileType() doesn't support encoding=
help="The dockerfile template to render")
parser.add_argument("-p", "--pip",
metavar='file',
default='requirements.txt',
type=argparse.FileType(mode='r'),
help="The pip requirements file")
parser.add_argument("-d", "--dockerfile",
metavar='file',
default=sys.stdout,
type=argparse.FileType(mode='w'),
help="The output dockerfile. Default is STDOUT")
parser.add_argument("-q", "--quiet",
action="store_true",
help="Silently overwrite if Dockerfile already exists")
args = parser.parse_args()
dfile = args.dockerfile
pfile = args.pip
tfile = args.template
quiet = args.quiet
render_files(pfile, tfile, dfile, quiet)
| gpl-3.0 | 3,263,640,041,976,481,000 | 33.540541 | 109 | 0.516432 | false |
ama-jharrison/agdc | agdc/agdc/abstract_ingester/dataset_record.py | 1 | 27323 | #!/usr/bin/env python
# ===============================================================================
# Copyright 2015 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
"""
DatasetRecord: database interface class.
These classes provide an interface between the database and the top-level
ingest algorithm (AbstractIngester and its subclasses). They also provide
the implementation of the database and tile store side of the ingest
process. They are expected to be independent of the structure of any
particular dataset, but will change if the database schema or tile store
format changes.
"""
from __future__ import absolute_import
import logging
import os
import re
from math import floor
from osgeo import osr
from agdc.cube_util import DatasetError, DatasetSkipError
from .ingest_db_wrapper import IngestDBWrapper
from .ingest_db_wrapper import TC_PENDING, TC_SINGLE_SCENE, TC_SUPERSEDED
from .ingest_db_wrapper import TC_MOSAIC
from .mosaic_contents import MosaicContents
from .tile_record import TileRecord, TileRepository
# Set up logger.
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
class DatasetRecord(object):
"""DatasetRecord database interface class."""
DATASET_METADATA_FIELDS = ['dataset_path',
'datetime_processed',
'dataset_size',
'll_x',
'll_y',
'lr_x',
'lr_y',
'ul_x',
'ul_y',
'ur_x',
'ur_y',
'x_pixels',
'y_pixels',
'xml_text'
]
def __init__(self, collection, acquisition, dataset):
self.collection = collection
self.datacube = collection.datacube
self.db = IngestDBWrapper(self.datacube.db_connection)
dataset_key = collection.get_dataset_key(dataset)
self.dataset_bands = collection.new_bands[dataset_key]
self.dataset = dataset
self.mdd = dataset.metadata_dict
self.dataset_dict = {}
for field in self.DATASET_METADATA_FIELDS:
self.dataset_dict[field] = self.mdd[field]
self.dataset_dict['acquisition_id'] = acquisition.acquisition_id
self.dataset_dict['crs'] = self.mdd['projection']
self.dataset_dict['level_name'] = self.mdd['processing_level']
self.dataset_dict['level_id'] = \
self.db.get_level_id(self.dataset_dict['level_name'])
self.dataset_dict['dataset_id'] = \
self.db.get_dataset_id(self.dataset_dict)
if self.dataset_dict['dataset_id'] is None:
# create a new dataset record in the database
self.dataset_dict['dataset_id'] = \
self.db.insert_dataset_record(self.dataset_dict)
self.needs_update = False
else:
# check that the old dataset record can be updated
self.__check_update_ok()
self.needs_update = True
self.dataset_id = self.dataset_dict['dataset_id']
def remove_mosaics(self, dataset_filter):
"""Remove mosaics associated with the dataset.
This will mark mosaic files for removal, delete mosaic database
records if they exist, and update the tile class of overlapping
tiles (from other datasets) to reflect the lack of a mosaic. The
'dataset_filter' is a list of dataset_ids to filter on. It should
be the list of dataset_ids that have been locked (including this
dataset). It is used to avoid operating on the tiles of an
unlocked dataset.
"""
# remove new mosaics (those with database records)
overlap_dict = self.db.get_overlapping_tiles_for_dataset(
self.dataset_id,
input_tile_class_filter=(TC_SINGLE_SCENE,
TC_SUPERSEDED,
TC_MOSAIC),
output_tile_class_filter=(TC_MOSAIC,),
dataset_filter=dataset_filter
)
for tile_record_list in overlap_dict.values():
for tr in tile_record_list:
self.db.remove_tile_record(tr['tile_id'])
self.collection.mark_tile_for_removal(tr['tile_pathname'])
# build a dictionary of overlaps (ignoring mosaics)
overlap_dict = self.db.get_overlapping_tiles_for_dataset(
self.dataset_id,
input_tile_class_filter=(TC_SINGLE_SCENE,
TC_SUPERSEDED),
output_tile_class_filter=(TC_SINGLE_SCENE,
TC_SUPERSEDED),
dataset_filter=dataset_filter
)
# update tile classes for overlap tiles from other datasets
for tile_record_list in overlap_dict.values():
if len(tile_record_list) > 2:
raise DatasetError("Attempt to update a mosaic of three or " +
"more datasets. Handling for this case " +
"is not yet implemented.")
for tr in tile_record_list:
if tr['dataset_id'] != self.dataset_id:
self.db.update_tile_class(tr['tile_id'], TC_SINGLE_SCENE)
# remove old mosaics (those without database records)
for tile_record_list in overlap_dict.values():
if len(tile_record_list) > 1:
# tile_record_list is sorted by acquisition start time, so
# the first record should be the one the mosaic filename is
# based on.
tr = tile_record_list[0]
mosaic_pathname = \
self.__make_mosaic_pathname(tr['tile_pathname'])
if os.path.isfile(mosaic_pathname):
self.collection.mark_tile_for_removal(mosaic_pathname)
def remove_tiles(self):
"""Remove the tiles associated with the dataset.
This will remove ALL the tiles belonging to this dataset, deleting
database records and marking tile files for removal on commit. Mosaics
should be removed BEFORE calling this (as it will delete the tiles
needed to figure out the overlaps, but may not delete all the mosaics).
"""
tile_list = self.db.get_dataset_tile_ids(self.dataset_id)
for tile_id in tile_list:
tile_pathname = self.db.get_tile_pathname(tile_id)
self.db.remove_tile_record(tile_id)
self.collection.mark_tile_for_removal(tile_pathname)
def update(self):
"""Update the dataset record in the database.
This first checks that the new dataset is more recent than
the record in the database. If not it raises a dataset error.
"""
self.__check_update_ok()
self.db.update_dataset_record(self.dataset_dict)
def make_tiles(self, tile_type_id, band_stack):
"""Tile the dataset, returning a list of tile_content objects.
:rtype list of TileContents
"""
tile_list = []
tile_footprint_list = sorted(self.get_coverage(tile_type_id))
LOGGER.info('%d tile footprints cover dataset', len(tile_footprint_list))
for tile_footprint in tile_footprint_list:
tile_contents = self.collection.create_tile_contents(
tile_type_id,
tile_footprint,
band_stack
)
tile_contents.reproject()
if tile_contents.has_data():
tile_list.append(tile_contents)
else:
tile_contents.remove()
LOGGER.info('%d non-empty tiles created', len(tile_list))
return tile_list
def store_tiles(self, tile_list):
"""Store tiles in the database and file store.
'tile_list' is a list of tile_contents objects. This
method will create the corresponding database records and
mark tiles for creation when the transaction commits.
:type tile_list: list of TileContents
"""
return [self.create_tile_record(tile_contents) for tile_contents in tile_list]
def create_mosaics(self, dataset_filter):
"""Create mosaics associated with the dataset.
'dataset_filter' is a list of dataset_ids to filter on. It should
be the list of dataset_ids that have been locked (including this
dataset). It is used to avoid operating on the tiles of an
unlocked dataset.
"""
# Build a dictionary of overlaps (ignoring mosaics, including pending).
overlap_dict = self.db.get_overlapping_tiles_for_dataset(
self.dataset_id,
input_tile_class_filter=(TC_PENDING,
TC_SINGLE_SCENE,
TC_SUPERSEDED),
output_tile_class_filter=(TC_PENDING,
TC_SINGLE_SCENE,
TC_SUPERSEDED),
dataset_filter=dataset_filter
)
# Make mosaics and update tile classes as needed.
for tile_record_list in overlap_dict.values():
if len(tile_record_list) > 2:
raise DatasetError("Attempt to create a mosaic of three or " +
"more datasets. Handling for this case " +
"is not yet implemented.")
elif len(tile_record_list) == 2:
self.__make_one_mosaic(tile_record_list)
for tr in tile_record_list:
self.db.update_tile_class(tr['tile_id'], TC_SUPERSEDED)
else:
for tr in tile_record_list:
self.db.update_tile_class(tr['tile_id'], TC_SINGLE_SCENE)
def get_removal_overlaps(self):
"""Returns a list of overlapping dataset ids for mosaic removal."""
tile_class_filter = (TC_SINGLE_SCENE,
TC_SUPERSEDED,
TC_MOSAIC)
return self.get_overlaps(tile_class_filter)
def get_creation_overlaps(self):
"""Returns a list of overlapping dataset_ids for mosaic creation."""
tile_class_filter = (TC_PENDING,
TC_SINGLE_SCENE,
TC_SUPERSEDED)
return self.get_overlaps(tile_class_filter)
def get_overlaps(self, tile_class_filter):
"""Returns a list of overlapping dataset ids, including this dataset.
A dataset is overlapping if it contains tiles that overlap with
tiles belonging to this dataset. Only tiles in the tile_class_filter
are considered.
"""
dataset_list = self.db.get_overlapping_dataset_ids(
self.dataset_id,
tile_class_filter=tile_class_filter
)
if not dataset_list:
dataset_list = [self.dataset_id]
return dataset_list
def create_tile_record(self, tile_contents):
"""Factory method to create an instance of the TileRecord class.
The created object will be responsible for inserting tile table records
into the database for reprojected or mosaiced tiles."""
self.collection.mark_tile_for_creation(tile_contents)
tile = TileRecord(
self.dataset_id,
tile_footprint=tile_contents.tile_footprint,
tile_type_id=tile_contents.tile_type_id,
path=tile_contents.get_output_path(),
size_mb=tile_contents.get_output_size_mb(),
tile_extents=tile_contents.tile_extents
)
TileRepository(self.collection).persist_tile(tile)
return tile
def mark_as_tiled(self):
"""Flag the dataset record as tiled in the database.
This flag does not exist in the current database schema,
so this method does nothing at the moment."""
pass
def list_tile_types(self):
"""Returns a list of the tile type ids for this dataset."""
return self.dataset_bands.keys()
def get_tile_bands(self, tile_type_id):
"""Returns a dictionary containing the band info for one tile type.
The tile_type_id must valid for this dataset, available from
list_tile_types above.
"""
return self.dataset_bands[tile_type_id]
def get_coverage(self, tile_type_id):
"""Given the coordinate reference system of the dataset and that of the
tile_type_id, return a list of tiles within the dataset footprint"""
tile_type_info = self.collection.datacube.tile_type_dict[tile_type_id]
#Get geospatial information from the dataset.
dataset_crs = self.mdd['projection']
dataset_geotransform = self.mdd['geo_transform']
pixels = self.mdd['x_pixels']
lines = self.mdd['y_pixels']
#Look up the datacube's projection information for this tile_type
tile_crs = tile_type_info['crs']
#Get the transformation between the two projections
transformation = self.define_transformation(dataset_crs, tile_crs)
#Determine the bounding quadrilateral of the dataset extent
#in tile coordinates
dataset_bbox = self.get_bbox(transformation, dataset_geotransform,
pixels, lines)
#Determine maximum inner rectangle, which is guaranteed to need tiling
#and the minimum outer rectangle outside which no tiles will exist.
cube_origin = (tile_type_info['x_origin'], tile_type_info['y_origin'])
cube_tile_size = (tile_type_info['x_size'], tile_type_info['y_size'])
coverage = self.get_touched_tiles(dataset_bbox,
cube_origin, cube_tile_size)
return coverage
#
# worker methods
#
def __check_update_ok(self):
"""Checks if an update is possible, raises a DatasetError otherwise.
Note that dataset_older_than_database returns a tuple
(disk_datetime_processed, database_datetime_processed, tile_ingested_datetime)
if no ingestion required"""
tile_class_filter = (TC_SINGLE_SCENE,
TC_SUPERSEDED)
time_tuple = self.db.dataset_older_than_database(
self.dataset_dict['dataset_id'],
self.dataset_dict['datetime_processed'],
tile_class_filter)
if time_tuple is not None:
disk_datetime_processed, database_datetime_processed, tile_ingested_datetime = time_tuple
if (disk_datetime_processed == database_datetime_processed):
skip_message = 'Dataset has already been ingested'
elif disk_datetime_processed < database_datetime_processed:
skip_message = 'Dataset on disk is older than dataset in DB'
else:
skip_message = 'Dataset on disk was created after currently ingested contents'
skip_message += ' (Disk = %s, DB = %s, Ingested = %s)' % time_tuple
raise DatasetSkipError(skip_message)
def __make_one_mosaic(self, tile_record_list):
"""Create a single mosaic.
This create the mosaic contents, creates the database record,
and marks the mosaic contents for creation on transaction commit.
"""
mosaic = MosaicContents(
tile_record_list,
self.datacube.tile_type_dict,
self.dataset_dict['level_name'],
self.collection.get_temp_tile_directory()
)
mosaic.create_record(self.db)
self.collection.mark_tile_for_creation(mosaic)
def __make_mosaic_pathname(self, tile_pathname):
"""Return the pathname of the mosaic corresponding to a tile."""
(tile_dir, tile_basename) = os.path.split(tile_pathname)
mosaic_dir = os.path.join(tile_dir, 'mosaic_cache')
if self.dataset_dict['level_name'] == 'PQA':
mosaic_basename = tile_basename
else:
mosaic_basename = re.sub(r'\.\w+$', '.vrt', tile_basename)
return os.path.join(mosaic_dir, mosaic_basename)
#
# Worker methods for coverage.
#
# These are public so that they can be called by test_dataset_record.
#
def define_transformation(self, dataset_crs, tile_crs):
"""Return the transformation between dataset_crs
and tile_crs projections"""
osr.UseExceptions()
try:
dataset_spatial_reference = self.create_spatial_ref(dataset_crs)
tile_spatial_reference = self.create_spatial_ref(tile_crs)
if dataset_spatial_reference is None:
raise DatasetError('Unknown projecton %s'
% str(dataset_crs))
if tile_spatial_reference is None:
raise DatasetError('Unknown projecton %s'
% str(tile_crs))
return osr.CoordinateTransformation(dataset_spatial_reference,
tile_spatial_reference)
except Exception:
raise DatasetError('Coordinate transformation error ' +
'for transforming %s to %s' %
(str(dataset_crs), str(tile_crs)))
@staticmethod
def create_spatial_ref(crs):
"""Create a spatial reference system for projecton crs.
Called by define_transformation()"""
# pylint: disable=broad-except
osr.UseExceptions()
try:
spatial_ref = osr.SpatialReference()
except Exception:
raise DatasetError('No spatial reference done for %s' % str(crs))
try:
spatial_ref.ImportFromWkt(crs)
return spatial_ref
except Exception:
pass
try:
matchobj = re.match(r'EPSG:(\d+)', crs)
epsg_code = int(matchobj.group(1))
spatial_ref.ImportFromEPSG(epsg_code)
return spatial_ref
except Exception:
return None
@staticmethod
def get_bbox(transform, geotrans, pixels, lines):
"""Return the coordinates of the dataset footprint in clockwise order
from upper-left"""
xul, yul, dummy_z = \
transform.TransformPoint(geotrans[0], geotrans[3], 0)
xur, yur, dummy_z = \
transform.TransformPoint(geotrans[0] + geotrans[1] * pixels,
geotrans[3] + geotrans[4] * pixels, 0)
xll, yll, dummy_z = \
transform.TransformPoint(geotrans[0] + geotrans[2] * lines,
geotrans[3] + geotrans[5] * lines, 0)
xlr, ylr, dummy_z = \
transform.TransformPoint(
geotrans[0] + geotrans[1] * pixels + geotrans[2] * lines,
geotrans[3] + geotrans[4] * pixels + geotrans[5] * lines, 0)
return [(xul, yul), (xur, yur), (xlr, ylr), (xll, yll)]
def get_touched_tiles(self, dataset_bbox, cube_origin, cube_tile_size):
"""Return a list of tuples (itile, jtile) comprising all tiles
footprints that intersect the dataset bounding box"""
definite_tiles, possible_tiles = \
self.get_definite_and_possible_tiles(dataset_bbox,
cube_origin, cube_tile_size)
coverage_set = definite_tiles
#Check possible tiles:
#Check if the tile perimeter intersects the dataset bbox perimeter:
intersected_tiles = \
self.get_intersected_tiles(possible_tiles, dataset_bbox,
cube_origin, cube_tile_size)
coverage_set = coverage_set.union(intersected_tiles)
possible_tiles = possible_tiles.difference(intersected_tiles)
#Otherwise the tile might be wholly contained in the dataset bbox
contained_tiles = \
self.get_contained_tiles(possible_tiles, dataset_bbox,
cube_origin, cube_tile_size)
coverage_set = coverage_set.union(contained_tiles)
return coverage_set
@staticmethod
def get_definite_and_possible_tiles(bbox, cube_origin, cube_tile_size):
"""Return two lists of tile footprints: from the largest rectangle
wholly contained within the dataset bbox and the smallest rectangle
containing the bbox."""
#pylint: disable=too-many-locals
#unpack the bbox vertices in clockwise order from upper-left
xyul, xyur, xylr, xyll = bbox
xul, yul = xyul
xur, yur = xyur
xlr, ylr = xylr
xll, yll = xyll
#unpack the origin of the tiled datacube (e.g. lat=0, lon=0) and the
#datacube tile size
xorigin, yorigin = cube_origin
xsize, ysize = cube_tile_size
#Define the largest rectangle wholly contained within footprint
xmin = max(xll, xul)
xmax = min(xlr, xur)
ymin = max(yll, ylr)
ymax = min(yul, yur)
xmin_index = int(floor((xmin - xorigin) / xsize))
xmax_index = int(floor((xmax - xorigin) / xsize))
ymin_index = int(floor((ymin - yorigin) / ysize))
ymax_index = int(floor((ymax - yorigin) / ysize))
definite_tiles = set([(itile, jtile)
for itile in range(xmin_index, xmax_index + 1)
for jtile in range(ymin_index, ymax_index + 1)])
#Define the smallest rectangle which is guaranteed to include all tiles
#in the foorprint.
xmin = min(xll, xul)
xmax = max(xlr, xur)
ymin = min(yll, ylr)
ymax = max(yul, yur)
xmin_index = int(floor((xmin - xorigin) / xsize))
xmax_index = int(floor((xmax - xorigin) / xsize))
ymin_index = int(floor((ymin - yorigin) / ysize))
ymax_index = int(floor((ymax - yorigin) / ysize))
possible_tiles = set([(itile, jtile)
for itile in range(xmin_index, xmax_index + 1)
for jtile in range(ymin_index, ymax_index + 1)
]).difference(definite_tiles)
return (definite_tiles, possible_tiles)
def get_intersected_tiles(self, candidate_tiles, dset_bbox,
cube_origin, cube_tile_size):
"""Return the subset of candidate_tiles that have an intersection with
the dataset bounding box"""
#pylint: disable=too-many-locals
xorigin, yorigin = cube_origin
xsize, ysize = cube_tile_size
keep_list = []
for itile, jtile in candidate_tiles:
intersection_exists = False
(x0, y0) = (xorigin + itile * xsize,
yorigin + (jtile + 1) * ysize)
tile_bbox = [(x0, y0), (x0 + xsize, y0),
(x0 + xsize, y0 - ysize), (x0, y0 - ysize)]
tile_vtx_number = len(tile_bbox)
dset_vtx_number = len(dset_bbox)
for tile_vtx in range(tile_vtx_number):
x1, y1 = tile_bbox[tile_vtx]
x2, y2 = tile_bbox[(tile_vtx + 1) % tile_vtx_number]
for dset_vtx in range(dset_vtx_number):
x3, y3 = dset_bbox[dset_vtx]
x4, y4 = dset_bbox[(dset_vtx + 1) % dset_vtx_number]
xcoords = [x1, x2, x3, x4]
ycoords = [y1, y2, y3, y4]
intersection_exists = \
self.check_intersection(xcoords, ycoords)
if intersection_exists:
keep_list.append((itile, jtile))
break
if intersection_exists:
break
return set(keep_list)
@staticmethod
def get_contained_tiles(candidate_tiles, dset_bbox,
cube_origin, cube_tile_size):
"""Return the subset of candidate tiles that lie wholly within the
dataset bounding box"""
#pylint: disable=too-many-locals
xorigin, yorigin = cube_origin
xsize, ysize = cube_tile_size
keep_list = []
for itile, jtile in candidate_tiles:
tile_vtx_inside = []
(x0, y0) = (xorigin + itile * xsize,
yorigin + (jtile + 1) * ysize)
tile_bbox = [(x0, y0), (x0 + xsize, y0),
(x0 + xsize, y0 - ysize), (x0, y0 - ysize)]
dset_vtx_number = len(dset_bbox)
for x, y in tile_bbox:
#Check if this vertex lies within the dataset bounding box:
winding_number = 0
for dset_vtx in range(dset_vtx_number):
x1, y1 = dset_bbox[dset_vtx]
x2, y2 = dset_bbox[(dset_vtx + 1) % dset_vtx_number]
if y >= y1 and y < y2:
if (x - x1) * (y2 - y1) > (x2 - x1) * (y - y1):
winding_number += 1
elif y <= y1 and y > y2:
if (x - x1) * (y2 - y1) < (x2 - x1) * (y - y1):
winding_number += 1
tile_vtx_inside.append(winding_number % 2 == 1)
if tile_vtx_inside.count(True) == len(tile_bbox):
keep_list.append((itile, jtile))
assert tile_vtx_inside.count(True) == 4 or \
tile_vtx_inside.count(True) == 0, \
"Tile partially inside dataset bounding box but has" \
"no intersection"
return set(keep_list)
@staticmethod
def check_intersection(xpts, ypts):
"""Determines if the line segments
(xpts[0], ypts[0]) to (xpts[1], ypts[1]) and
(xpts[2], ypts[2]) to (xpts[3], ypts[3]) intersect"""
pvec = (xpts[0], ypts[0])
qvec = (xpts[2], ypts[2])
rvec = (xpts[1] - xpts[0], ypts[1] - ypts[0])
svec = (xpts[3] - xpts[2], ypts[3] - ypts[2])
rvec_cross_svec = rvec[0] * svec[1] - rvec[1] * svec[0]
if rvec_cross_svec == 0:
return False
qminusp_cross_svec = \
(qvec[0] - pvec[0]) * svec[1] - (qvec[1] - pvec[1]) * svec[0]
qminusp_cross_rvec = \
(qvec[0] - pvec[0]) * rvec[1] - (qvec[1] - pvec[1]) * rvec[0]
tparameter = qminusp_cross_svec / rvec_cross_svec
uparameter = qminusp_cross_rvec / rvec_cross_svec
if tparameter > 0 and tparameter < 1 and \
uparameter > 0 and uparameter < 1:
return True
| apache-2.0 | 7,368,330,845,389,171,000 | 41.493002 | 101 | 0.569044 | false |
chris-klinger/Goat | databases/database_config.py | 1 | 7681 | """
This module contains code for creating the underlying database structures
and also general code to perform basic actions: add, remove, update, list
"""
import os
import goat
from settings import settings_config
from databases import database_records,database_util,database_dirfiles
from util.inputs import prompts
#print('from database config')
#print(dir(goat))
#record_db = goat.get_record_db()
def get_goat_db():
"""Returns path to DB file"""
# deferred call to method in goat module due to import issues?!
return goat.get_goat_db()
def get_record_db(db_obj):
"""Gets the records database"""
# deferred call to method in goat module due to import issues?!
return goat.get_record_db(db_obj)
def get_query_db(db_obj):
return goat.get_query_db(db_obj)
def get_search_db(db_obj):
return goat.get_search_db(db_obj)
def get_result_db(db_obj):
return goat.get_result_db(db_obj)
def get_summary_db(db_obj):
return goat.get_summary_db(db_obj)
def get_db_dir_path(goat_dir):
"""Returns full pathname to db directory"""
return os.path.join(goat_dir, 'DB')
def check_for_dbs(goat_dir):
"""Checks whether a database folder already exists"""
if os.path.exists(get_db_dir_path(goat_dir)):
return True
return False
def create_dbs(goat_dir):
"""Creates the initial database structure"""
db_dir = get_db_dir_path(goat_dir)
os.mkdir(db_dir)
settings_config.add_setting(goat_dir, database_directory=db_dir)
def add_by_dir(goat_dir, target_dir=None):
"""
Adds records for each file in a directory. If one or more
extensions are specified, will only add files ending in those
extensions and ignore others
"""
exts = []
select_files = False
recurse = False
if target_dir is None:
target_dir = prompts.DirPrompt(
message = 'Please choose a directory to add files from',
errormsg = 'Unrecognized directory').prompt()
recurse = prompts.YesNoPrompt(
message = 'Would you like to add from subdirs too?').prompt()
if recurse.lower() in {'yes','y'}:
recurse = True
limit = prompts.YesNoPrompt(
message = 'Would you like to limit files?').prompt()
if limit.lower() in {'yes','y'}:
valids = ['file','extension','both']
choice = prompts.LimitedPrompt(
message = 'Limit by file, extension, or both?',
errormsg = 'Please choose "file", "extension", or "both"',
valids = valids).prompt()
if choice == 'file':
select_files = True
elif choice == 'extension':
exts = database_util.get_exts()
else:
select_files = True
exts = database_util.get_exts()
database_util.add_files_by_dir(goat_dir, target_dir,
select_files, recurse, *exts)
def add_by_file(goat_dir, addfile=None):
"""Adds a record for the specified file"""
if addfile is None:
addfile = database_util.get_file()
add_record(goat_dir, addfile=addfile)
def add_record(goat_dir, record=None, addfile=None, rdir=None, subdir=None):
"""
Adds a record to the database. The user is requested to provide
values for missing information.
"""
records_db = get_record_db(goat_dir)
if record is None:
record = database_util.get_record()
if records_db.check_record(record):
print('Goat has detected an existing record for {}'.format(record))
modify = prompts.YesNoPrompt(
message = 'Do you want to modify {}?'.format(record))
if modify in {'no','n'}:
print('Did not modify {}'.format(record))
elif modify in {'yes','y'}:
update_record(goat_dir,record)
else:
print('No such record exists yet, adding record')
records_db.add_record_obj(record)
if addfile is None:
print('Warning, no file for record {}.'
'Goat requires files for all functionality'.format(record))
add_now = prompts.YesNoPrompt(
message = 'Would you like to add a file now?').prompt()
if add_now.lower() in {'yes','y'}:
addfile = database_util.get_file()
elif add_now.lower() in {'no','n'}:
pass # Might change later
try:
print('File to be added is {}'.format(addfile))
database_dirfiles.add_record_from_file(goat_dir, record, addfile)
except Exception:
pass # Could not add file
more_info = prompts.YesNoPrompt(
message = 'Do you wish to add more info for record {}?'.format(record)).prompt()
if more_info.lower() in {'no', 'n'}:
pass # nothing more to do
elif more_info.lower() in {'yes', 'y'}:
records_db.extend_record(record,
**database_util.add_attribute_loop())
def remove_record(goat_dir, record=None):
"""Removes a record from the database"""
records_db = get_record_db(goat_dir)
if record is None:
record = database_util.get_record()
user_conf = prompts.YesNoPrompt(
message = 'Do you wish to delete all data for {}?'.format(record)).prompt()
if user_conf.lower() in {'no', 'n'}:
pass # nothing more to do
elif user_conf.lower() in {'yes', 'y'}:
records_db.remove_record_obj(record)
database_dirfiles.remove_record_dir(goat_dir,record)
def update_record(goat_dir, record=None):
"""
Combines user input with other functions to update records
already present in the database
"""
records_db = get_record_db(goat_dir)
if record is None:
record = database_util.get_record()
choices = {'add', 'change', 'remove', 'quit'}
cont = True
while cont is True:
user_choice = prompts.LimitedPrompt(
message = 'Please choose an option: add, change, remove, quit',
errormsg = 'Unrecognized option',
valids = choices).prompt()
if user_choice.lower() == 'quit':
cont = False
elif user_choice.lower() == 'add':
records_db.extend_record(record,
**database_util.add_attribute_loop(
goat_dir,record))
elif user_choice.lower() == 'remove':
records_db.reduce_record(record,
*database_util.remove_attribute_loop(
goat_dir,record))
elif user_choice.lower() =='change':
to_change = database_util.change_attribute_loop(
goat_dir,record)
for k,v in to_change.items():
records_db.change_record_attr(record,k,v)
def check_record(goat_dir, record=None):
"""Checks whether a record is already present"""
records_db = get_record_db(goat_dir)
if record is None:
record = database_util.get_record()
if records_db.check_record(record):
print('Record for {} exists in database'.format(record))
else:
print('Could not find record for {} in database'.format(record))
def get_record_attr(goat_dir, attr, record=None):
"""Returns a requested attribute for a record"""
records_db = get_record_db(goat_dir)
if record is None:
record = database_util.get_record()
if records_db.check_record(record):
return records_db.check_record_attr(record, attr)
else:
print('Could not find record for {} in database'.format(record))
def list_records(goat_dir, record_type=None):
"""
Lists records in the database, either by their attributes or by
the included files
"""
records_db = get_record_db(goat_dir)
for record in records_db.list_records():
print(record)
records_db.list_record_info(record)
| gpl-3.0 | 8,086,966,148,088,573,000 | 35.751196 | 88 | 0.623096 | false |
shanx/django-maintenancemode | test_settings.py | 1 | 1663 | import os
import re
SECRET_KEY = "DUMMY_SECRET_KEY"
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
INTERNAL_IPS = []
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
os.path.join(PROJECT_ROOT, "test_templates"),
],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.request",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
],
},
},
]
DATABASES = {"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": ":memory:"}}
INSTALLED_APPS = (
"django.contrib.auth",
"django.contrib.admin",
"django.contrib.sessions",
"django.contrib.contenttypes",
"django.contrib.messages",
"django.contrib.sites",
"maintenancemode",
)
MIDDLEWARE = (
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"maintenancemode.middleware.MaintenanceModeMiddleware",
)
ROOT_URLCONF = "test_urls"
SITE_ID = 1
MAINTENANCE_MODE = False # or ``True`` and use ``maintenance`` command
MAINTENANCE_IGNORE_URLS = (re.compile(r"^/ignored.*"),)
| bsd-3-clause | -1,005,136,904,575,613,800 | 28.696429 | 85 | 0.628984 | false |
demisto/content | Packs/CommonScripts/Scripts/CheckContextValue/CheckContextValue_test.py | 1 | 2863 | from CommonServerPython import *
from CheckContextValue import poll_field
context = {
'id': 1,
'name': 'This is incident1',
'type': 'Phishing',
'severity': 0,
'status': 1,
'created': '2019-01-02',
'closed': '0001-01-01T00:00:00Z',
'foo': 'bar',
}
missing_context = {
'id': 2,
'name': 'This is incident2',
'type': 'Phishing',
'severity': 0,
'status': 1,
'created': '2019-01-02',
'closed': '0001-01-01T00:00:00Z',
}
def test_poll_context_field_from_root(mocker):
""" Unit test
Given
- An incident with the context field named 'foo' with value 'bar' in the root
- The regex sent is matching the field value
When
- mock the server response to demisto.context().
Then
Validate the script finds the field
"""
mocker.patch.object(demisto, 'context', return_value=context)
args = {
'key': 'foo',
}
result = poll_field(args)
assert result.readable_output in "The key exists."
assert result.outputs['exists'] is True
def test_poll_context_field_from_root_with_regex_failure(mocker):
""" Unit test
Given
- An incident with the context field named 'foo' with value 'bar' in the root
- The regex sent does not match the context field value
When
- mock the server response to demisto.context().
Then
Validate the script returns a false value
"""
mocker.patch.object(demisto, 'context', return_value=context)
args = {
'key': 'foo',
'regex': '^a',
}
result = poll_field(args)
assert result.readable_output in "The key does not exist."
assert result.outputs['exists'] is False
def test_poll_field_from_root_with_regex_success(mocker):
""" Unit test
Given
- An incident with the context field named 'foo' with value 'bar' in root
- No regex argument sent to the command
When
- mock the server response to demisto.context().
Then
Validate the script finds the context field
"""
mocker.patch.object(demisto, 'context', return_value=context)
args = {
'key': 'foo',
'regex': '^b',
}
result = poll_field(args)
assert result.readable_output in "The key exists."
assert result.outputs['exists'] is True
def test_poll_missing_context_field_in_root(mocker):
""" Unit test
Given
- An incident without the context field named 'foo' in the root
When
- mock the server response to demisto.context().
Then
Validate the script returns a false value
"""
mocker.patch.object(demisto, 'context', return_value=missing_context)
args = {
'key': 'foo',
}
result = poll_field(args)
assert result.readable_output in "The key does not exist."
assert result.outputs['exists'] is False
| mit | -6,227,709,969,366,190,000 | 25.509259 | 85 | 0.618582 | false |
dragoon/kilogram | kilogram/dataset/edit_histories/wikipedia/libs/dewikify.py | 1 | 1518 | import re
class Parser(object):
"""
Parser to remove all kinds of wiki markup tags from an object
"""
def __init__(self):
"""
Constructor
"""
self.string = ''
# all the following regex remove all tags that cannot be rendered
# in text
self.wiki_re = re.compile(r"""\[{2}(File|Category):[\s\S]+\]{2}|
[\s\w#():-]+\||
(\[{2}|\]{2})|
\'{2,5}|
(<s>|<!--)[\s\S]+(</s>|-->)|
{{[\s\S]+}}|
^={1,6}|={1,6}$""", re.X)
def __list(self, listmatch):
return ' ' * (len(listmatch.group()) - 1) + '*'
def __parse(self, string=''):
"""
Parse a string to remove and replace all wiki markup tags
"""
self.string = string
self.string = self.wiki_re.sub('', self.string)
# search for lists
self.listmatch = re.search('^(\*+)', self.string)
if self.listmatch:
self.string = self.__list(self.listmatch) + re.sub('^(\*+)', '', self.string)
return self.string
def parse_string(self, string=''):
"""
Parse a string object to de-wikified text
"""
self.strings = string.splitlines(True)
self.strings = [self.__parse(line) for line in self.strings]
return ''.join(self.strings) | apache-2.0 | 5,212,644,074,540,540,000 | 32.755556 | 89 | 0.431489 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.