code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
import os
from petsc4py import PETSc
import numpy as onp
from veros import logger, veros_kernel, runtime_settings as rs, runtime_state as rst
from veros.core import utilities
from veros.core.streamfunction.solvers.base import LinearSolver
from veros.core.operators import numpy as npx, update, update_add, at, flush
class PETScSolver(LinearSolver):
def __init__(self, state):
if rst.proc_num > 1 and rs.device == "cpu" and "OMP_NUM_THREADS" not in os.environ:
logger.warning(
"Environment variable OMP_NUM_THREADS is not set, which can lead to severely "
"degraded performance when MPI is used."
)
settings = state.settings
if settings.enable_cyclic_x:
boundary_type = ("periodic", "ghosted")
else:
boundary_type = ("ghosted", "ghosted")
self._da = PETSc.DMDA().create(
[settings.nx, settings.ny],
stencil_width=1,
stencil_type="star",
comm=rs.mpi_comm,
proc_sizes=rs.num_proc,
boundary_type=boundary_type,
ownership_ranges=[
(settings.nx // rs.num_proc[0],) * rs.num_proc[0],
(settings.ny // rs.num_proc[1],) * rs.num_proc[1],
],
)
if rs.device == "gpu":
self._da.setVecType("cuda")
self._da.setMatType("aijcusparse")
self._matrix, self._boundary_fac = self._assemble_poisson_matrix(state)
petsc_options = PETSc.Options()
# setup krylov method
self._ksp = PETSc.KSP()
self._ksp.create(self._da.comm)
self._ksp.setOperators(self._matrix)
self._ksp.setType("bcgs")
self._ksp.setTolerances(atol=1e-24, rtol=1e-14, max_it=1000)
# preconditioner
self._ksp.getPC().setType("gamg")
petsc_options["pc_gamg_type"] = "agg"
petsc_options["pc_gamg_reuse_interpolation"] = True
petsc_options["pc_gamg_threshold"] = 1e-4
petsc_options["pc_gamg_sym_graph"] = True
petsc_options["pc_gamg_agg_nsmooths"] = 2
petsc_options["mg_levels_pc_type"] = "jacobi"
if rs.petsc_options:
petsc_options.insertString(rs.petsc_options)
self._ksp.setFromOptions()
self._ksp.getPC().setFromOptions()
self._rhs_petsc = self._da.createGlobalVec()
self._sol_petsc = self._da.createGlobalVec()
def _petsc_solver(self, rhs, x0):
# hangs on multi-GPU without this
flush()
self._da.getVecArray(self._rhs_petsc)[...] = rhs[2:-2, 2:-2]
self._da.getVecArray(self._sol_petsc)[...] = x0[2:-2, 2:-2]
self._ksp.solve(self._rhs_petsc, self._sol_petsc)
info = self._ksp.getConvergedReason()
iterations = self._ksp.getIterationNumber()
if info < 0:
logger.warning(f"Streamfunction solver did not converge after {iterations} iterations (error code: {info})")
if rs.monitor_streamfunction_residual:
# re-use rhs vector to store residual
rhs_norm = self._rhs_petsc.norm(PETSc.NormType.NORM_2)
self._matrix.multAdd(self._sol_petsc, -self._rhs_petsc, self._rhs_petsc)
residual_norm = self._rhs_petsc.norm(PETSc.NormType.NORM_2)
rel_residual = residual_norm / rhs_norm
if rel_residual > 1e-8:
logger.warning(
f"Streamfunction solver did not achieve required precision (rel. residual: {rel_residual:.2e})"
)
return npx.asarray(self._da.getVecArray(self._sol_petsc)[...])
def solve(self, state, rhs, x0, boundary_val=None):
"""
Arguments:
rhs: Right-hand side vector
x0: Initial guess
boundary_val: Array containing values to set on boundary elements. Defaults to `x0`.
"""
rhs, x0 = prepare_solver_inputs(state, rhs, x0, boundary_val, self._boundary_fac)
linear_solution = self._petsc_solver(rhs, x0)
return update(rhs, at[2:-2, 2:-2], linear_solution)
def _assemble_poisson_matrix(self, state):
"""
Construct a sparse matrix based on the stencil for the 2D Poisson equation.
"""
vs = state.variables
settings = state.settings
matrix = self._da.getMatrix()
boundary_mask = ~npx.any(vs.boundary_mask[2:-2, 2:-2], axis=2)
# assemble diagonals
main_diag = (
-vs.hvr[3:-1, 2:-2]
/ vs.dxu[2:-2, npx.newaxis]
/ vs.dxt[3:-1, npx.newaxis]
/ vs.cosu[npx.newaxis, 2:-2] ** 2
- vs.hvr[2:-2, 2:-2]
/ vs.dxu[2:-2, npx.newaxis]
/ vs.dxt[2:-2, npx.newaxis]
/ vs.cosu[npx.newaxis, 2:-2] ** 2
- vs.hur[2:-2, 2:-2]
/ vs.dyu[npx.newaxis, 2:-2]
/ vs.dyt[npx.newaxis, 2:-2]
* vs.cost[npx.newaxis, 2:-2]
/ vs.cosu[npx.newaxis, 2:-2]
- vs.hur[2:-2, 3:-1]
/ vs.dyu[npx.newaxis, 2:-2]
/ vs.dyt[npx.newaxis, 3:-1]
* vs.cost[npx.newaxis, 3:-1]
/ vs.cosu[npx.newaxis, 2:-2]
)
east_diag = (
vs.hvr[3:-1, 2:-2] / vs.dxu[2:-2, npx.newaxis] / vs.dxt[3:-1, npx.newaxis] / vs.cosu[npx.newaxis, 2:-2] ** 2
)
west_diag = (
vs.hvr[2:-2, 2:-2] / vs.dxu[2:-2, npx.newaxis] / vs.dxt[2:-2, npx.newaxis] / vs.cosu[npx.newaxis, 2:-2] ** 2
)
north_diag = (
vs.hur[2:-2, 3:-1]
/ vs.dyu[npx.newaxis, 2:-2]
/ vs.dyt[npx.newaxis, 3:-1]
* vs.cost[npx.newaxis, 3:-1]
/ vs.cosu[npx.newaxis, 2:-2]
)
south_diag = (
vs.hur[2:-2, 2:-2]
/ vs.dyu[npx.newaxis, 2:-2]
/ vs.dyt[npx.newaxis, 2:-2]
* vs.cost[npx.newaxis, 2:-2]
/ vs.cosu[npx.newaxis, 2:-2]
)
main_diag = npx.where(boundary_mask, main_diag, 0.0)
main_diag = npx.where(main_diag == 0.0, 1.0, main_diag)
# construct sparse matrix
cf = tuple(
# copy to NumPy for speed
onp.asarray(diag)
for diag in (
main_diag,
boundary_mask * east_diag,
boundary_mask * west_diag,
boundary_mask * north_diag,
boundary_mask * south_diag,
)
)
row = PETSc.Mat.Stencil()
col = PETSc.Mat.Stencil()
ij_offsets = [(0, 0), (1, 0), (-1, 0), (0, 1), (0, -1)]
(i0, i1), (j0, j1) = self._da.getRanges()
for j in range(j0, j1):
for i in range(i0, i1):
iloc, jloc = i % (settings.nx // rs.num_proc[0]), j % (settings.ny // rs.num_proc[1])
row.index = (i, j)
for diag, offset in zip(cf, ij_offsets):
io, jo = (i + offset[0], j + offset[1])
col.index = (io, jo)
matrix.setValueStencil(row, col, diag[iloc, jloc])
matrix.assemble()
boundary_scale = {
"east": npx.asarray(cf[1][-1, :]),
"west": npx.asarray(cf[2][0, :]),
"north": npx.asarray(cf[3][:, -1]),
"south": npx.asarray(cf[4][:, 0]),
}
return matrix, boundary_scale
@veros_kernel
def prepare_solver_inputs(state, rhs, x0, boundary_val, boundary_fac):
vs = state.variables
settings = state.settings
if boundary_val is None:
boundary_val = x0
x0 = utilities.enforce_boundaries(x0, settings.enable_cyclic_x)
boundary_mask = ~npx.any(vs.boundary_mask, axis=2)
rhs = npx.where(boundary_mask, rhs, boundary_val) # set right hand side on boundaries
# add dirichlet BC to rhs
if not settings.enable_cyclic_x:
if rst.proc_idx[0] == rs.num_proc[0] - 1:
rhs = update_add(rhs, at[-3, 2:-2], -rhs[-2, 2:-2] * boundary_fac["east"])
if rst.proc_idx[0] == 0:
rhs = update_add(rhs, at[2, 2:-2], -rhs[1, 2:-2] * boundary_fac["west"])
if rst.proc_idx[1] == rs.num_proc[1] - 1:
rhs = update_add(rhs, at[2:-2, -3], -rhs[2:-2, -2] * boundary_fac["north"])
if rst.proc_idx[1] == 0:
rhs = update_add(rhs, at[2:-2, 2], -rhs[2:-2, 1] * boundary_fac["south"])
return rhs, x0
| dionhaefner/veros | veros/core/streamfunction/solvers/petsc_.py | Python | mit | 8,370 |
"""
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <getting_started>`.
"""
print __doc__
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: Simplified BSD
# Standard scientific Python imports
import pylab as pl
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits,
# let's have a look at the first 3 images, stored in the `images`
# attribute of the dataset. If we were working from image files, we
# could load them using pylab.imread. For these images know which
# digit they represent: it is given in the 'target' of the dataset.
for index, (image, label) in enumerate(zip(digits.images, digits.target)[:4]):
pl.subplot(2, 4, index + 1)
pl.axis('off')
pl.imshow(image, cmap=pl.cm.gray_r, interpolation='nearest')
pl.title('Training: %i' % label)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print "Classification report for classifier %s:\n%s\n" % (
classifier, metrics.classification_report(expected, predicted))
print "Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted)
for index, (image, prediction) in enumerate(
zip(digits.images[n_samples / 2:], predicted)[:4]):
pl.subplot(2, 4, index + 5)
pl.axis('off')
pl.imshow(image, cmap=pl.cm.gray_r, interpolation='nearest')
pl.title('Prediction: %i' % prediction)
pl.show()
| cdegroc/scikit-learn | examples/plot_digits_classification.py | Python | bsd-3-clause | 2,228 |
# -*- coding: utf-8 -*-
from flask import session, current_app
from quokka_themes import render_theme_template
def render_template(template, theme=None, **context):
theme = theme or []
if not isinstance(theme, (list, tuple)):
theme = [theme]
sys_theme = session.get('theme', current_app.config.get('DEFAULT_THEME'))
if sys_theme:
theme.append(sys_theme)
return render_theme_template(theme, template, **context)
| CoolCloud/quokka | quokka/core/templates.py | Python | mit | 452 |
import os
import tarfile
import h5py
import numpy
import six
from six.moves import range, cPickle
from fuel.converters.base import fill_hdf5_file, check_exists
DISTRIBUTION_FILE = 'cifar-10-python.tar.gz'
@check_exists(required_files=[DISTRIBUTION_FILE])
def convert_cifar10(directory, output_directory,
output_filename='cifar10.hdf5'):
"""Converts the CIFAR-10 dataset to HDF5.
Converts the CIFAR-10 dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.CIFAR10`. The converted dataset is saved as
'cifar10.hdf5'.
It assumes the existence of the following file:
* `cifar-10-python.tar.gz`
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'cifar10.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode='w')
input_file = os.path.join(directory, DISTRIBUTION_FILE)
tar_file = tarfile.open(input_file, 'r:gz')
train_batches = []
for batch in range(1, 6):
file = tar_file.extractfile(
'cifar-10-batches-py/data_batch_%d' % batch)
try:
if six.PY3:
array = cPickle.load(file, encoding='latin1')
else:
array = cPickle.load(file)
train_batches.append(array)
finally:
file.close()
train_features = numpy.concatenate(
[batch['data'].reshape(batch['data'].shape[0], 3, 32, 32)
for batch in train_batches])
train_labels = numpy.concatenate(
[numpy.array(batch['labels'], dtype=numpy.uint8)
for batch in train_batches])
train_labels = numpy.expand_dims(train_labels, 1)
file = tar_file.extractfile('cifar-10-batches-py/test_batch')
try:
if six.PY3:
test = cPickle.load(file, encoding='latin1')
else:
test = cPickle.load(file)
finally:
file.close()
test_features = test['data'].reshape(test['data'].shape[0],
3, 32, 32)
test_labels = numpy.array(test['labels'], dtype=numpy.uint8)
test_labels = numpy.expand_dims(test_labels, 1)
data = (('train', 'features', train_features),
('train', 'targets', train_labels),
('test', 'features', test_features),
('test', 'targets', test_labels))
fill_hdf5_file(h5file, data)
h5file['features'].dims[0].label = 'batch'
h5file['features'].dims[1].label = 'channel'
h5file['features'].dims[2].label = 'height'
h5file['features'].dims[3].label = 'width'
h5file['targets'].dims[0].label = 'batch'
h5file['targets'].dims[1].label = 'index'
h5file.flush()
h5file.close()
return (output_path,)
def fill_subparser(subparser):
"""Sets up a subparser to convert the CIFAR10 dataset files.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `cifar10` command.
"""
subparser.set_defaults(func=convert_cifar10)
| nke001/attention-lvcsr | libs/fuel/fuel/converters/cifar10.py | Python | mit | 3,360 |
import os
import sys
class SoftChrootInitError(IOError):
"""Error during soft-chroot initialization"""
pass
class SoftChroot:
"""Soft Chroot module
Provides chroot feature for interation with Web-UI. Since it is not real chroot, so the name is SOFT CHROOT.
The module prevents access to entire file-system, allowing access only to subdirs of SOFT-CHROOT directory.
"""
def __init__(self):
self.enabled = None
self.chdir = None
def initialize(self, chdir):
""" initialize module, by setting soft-chroot-directory
Sets soft-chroot directory and 'enabled'-flag
Args:
self (SoftChroot) : self
chdir (string) : absolute path to soft-chroot
Raises:
SoftChrootInitError: when chdir doesn't exist
"""
orig_chdir = chdir
if chdir:
chdir = chdir.strip()
if (chdir):
# enabling soft-chroot:
if not os.path.isdir(chdir):
raise SoftChrootInitError(2, 'SOFT-CHROOT is requested, but the folder doesn\'t exist', orig_chdir)
self.enabled = True
self.chdir = chdir.rstrip(os.path.sep) + os.path.sep
else:
self.enabled = False
def get_chroot(self):
"""Returns root in chrooted environment
Raises:
RuntimeError: when `SoftChroot` is not initialized OR enabled
"""
if None == self.enabled:
raise RuntimeError('SoftChroot is not initialized')
if not self.enabled:
raise RuntimeError('SoftChroot is not enabled')
return self.chdir
def is_root_abs(self, abspath):
""" Checks whether absolute path @abspath is the root in the soft-chrooted environment"""
if None == self.enabled:
raise RuntimeError('SoftChroot is not initialized')
if None == abspath:
raise ValueError('abspath can not be None')
if not self.enabled:
# if not chroot environment : check, whether parent is the same dir:
parent = os.path.dirname(abspath.rstrip(os.path.sep))
return parent==abspath
# in soft-chrooted env: check, that path == chroot
path = abspath.rstrip(os.path.sep) + os.path.sep
return self.chdir == path
def is_subdir(self, abspath):
""" Checks whether @abspath is subdir (on any level) of soft-chroot"""
if None == self.enabled:
raise RuntimeError('SoftChroot is not initialized')
if None == abspath:
return False
if not self.enabled:
return True
if not abspath.endswith(os.path.sep):
abspath += os.path.sep
return abspath.startswith(self.chdir)
def chroot2abs(self, path):
""" Converts chrooted path to absolute path"""
if None == self.enabled:
raise RuntimeError('SoftChroot is not initialized')
if not self.enabled:
return path
if None == path or len(path)==0:
return self.chdir
if not path.startswith(os.path.sep):
path = os.path.sep + path
return self.chdir[:-1] + path
def abs2chroot(self, path, force = False):
""" Converts absolute path to chrooted path"""
if None == self.enabled:
raise RuntimeError('SoftChroot is not initialized')
if None == path:
raise ValueError('path is empty')
if not self.enabled:
return path
if path == self.chdir.rstrip(os.path.sep):
return '/'
resulst = None
if not path.startswith(self.chdir):
if (force):
result = self.get_chroot()
else:
raise ValueError("path must starts with 'chdir': %s" % path)
else:
l = len(self.chdir)-1
result = path[l:]
return result
| loulich/Couchpotato | couchpotato/core/softchroot.py | Python | gpl-3.0 | 3,963 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Augmentations for images.
"""
import collections
import functools
import itertools
import multiprocessing
import random
import numpy as np
import tensorflow as tf
from absl import flags
from libml import utils, ctaugment
from libml.utils import EasyDict
from third_party.auto_augment import augmentations, policies
FLAGS = flags.FLAGS
POOL = None
POLICIES = EasyDict(cifar10=policies.cifar10_policies(),
svhn=policies.svhn_policies(),
svhn_noextra=policies.svhn_policies())
RANDOM_POLICY_OPS = (
'Identity', 'AutoContrast', 'Equalize', 'Rotate',
'Solarize', 'Color', 'Contrast', 'Brightness',
'Sharpness', 'ShearX', 'TranslateX', 'TranslateY',
'Posterize', 'ShearY'
)
AUGMENT_ENUM = 'd x m aa'.split() + ['r%d_%d_%d' % (nops, mag, cutout) for nops, mag, cutout in
itertools.product(range(1, 5), range(1, 16), range(0, 100, 25))]
flags.DEFINE_integer('K', 1, 'Number of strong augmentation for unlabeled data.')
flags.DEFINE_enum('augment', 'd.d',
[x + '.' + y for x, y in itertools.product(AUGMENT_ENUM, AUGMENT_ENUM)] +
[x + '.' + y + '.' + z for x, y, z in itertools.product(AUGMENT_ENUM, 'dx', AUGMENT_ENUM)],
'Dataset augmentation method (x=identity, m=mirror, d=default, aa=auto-augment, '
'rxyy=random augment with x ops and magnitude yy),'
'first is for labeled data, others are for unlabeled.')
def init_pool():
global POOL
if POOL is None:
para = max(1, len(utils.get_available_gpus())) * FLAGS.para_augment
POOL = multiprocessing.Pool(para)
def augment_mirror(x):
return tf.image.random_flip_left_right(x)
def augment_shift(x, w):
y = tf.pad(x, [[w] * 2, [w] * 2, [0] * 2], mode='REFLECT')
return tf.random_crop(y, tf.shape(x))
def augment_noise(x, std):
return x + std * tf.random_normal(tf.shape(x), dtype=x.dtype)
def numpy_apply_policy(x, policy):
return augmentations.apply_policy(policy, x).astype('f')
def stack_augment(augment: list):
def func(x):
xl = [augment[i](x) if augment[i] is not None else x for i in range(len(augment))]
return {k: tf.stack([x[k] for x in xl]) for k in xl[0].keys()}
return func
class Primitives:
@staticmethod
def m():
return lambda x: augment_mirror(x['image'])
@staticmethod
def ms(shift):
return lambda x: augment_shift(augment_mirror(x['image']), shift)
@staticmethod
def s(shift):
return lambda x: augment_shift(x['image'], shift)
AugmentPair = collections.namedtuple('AugmentPair', 'tf numpy')
PoolEntry = collections.namedtuple('PoolEntry', 'payload batch')
class AugmentPool:
def __init__(self, get_samples):
self.get_samples = get_samples
def __call__(self, *args, **kwargs):
return self.get_samples()
NOAUGMENT = AugmentPair(tf=lambda x: dict(image=x['image'], label=x['label'], index=x.get('index', -1)),
numpy=AugmentPool)
class AugmentPoolAA(AugmentPool):
def __init__(self, get_samples, policy_group):
init_pool()
self.get_samples = get_samples
self.policy_group = policy_group
self.queue = []
self.fill_queue()
@staticmethod
def numpy_apply_policies(arglist):
x, policies = arglist
return np.stack([augmentations.apply_policy(policy, y) for y, policy in zip(x, policies)]).astype('f')
def queue_images(self, batch):
args = []
image = batch['image']
if image.ndim == 4:
for x in range(image.shape[0]):
args.append((image[x:x + 1], [random.choice(POLICIES[self.policy_group])]))
else:
for x in image[:, 1:]:
args.append((x, [random.choice(POLICIES[self.policy_group]) for _ in range(x.shape[0])]))
self.queue.append(PoolEntry(payload=POOL.imap(self.numpy_apply_policies, args), batch=batch))
def fill_queue(self):
for _ in range(4):
self.queue_images(self.get_samples())
def __call__(self, *args, **kwargs):
del args, kwargs
batch = self.get_samples()
entry = self.queue.pop(0)
samples = np.stack(list(entry.payload))
if entry.batch['image'].ndim == 4:
samples = samples.reshape(entry.batch['image'].shape)
entry.batch['image'] = samples
else:
samples = samples.reshape(entry.batch['image'][:, 1:].shape)
entry.batch['image'][:, 1:] = samples
self.queue_images(batch)
return entry.batch
class AugmentPoolRA(AugmentPoolAA):
def __init__(self, get_samples, nops, magnitude, cutout):
init_pool()
self.get_samples = get_samples
self.nops = nops
self.magnitude = magnitude
self.size = cutout
self.queue = []
self.fill_queue()
@staticmethod
def numpy_apply_policies(arglist):
x, policies, cutout = arglist
return np.stack([augmentations.cutout_numpy(augmentations.apply_policy(policy, y),
size=int(0.01 * cutout * min(y.shape[:2])))
for y, policy in zip(x, policies)]).astype('f')
def queue_images(self, batch):
args = []
image = batch['image']
policy = lambda: [(op, 1.0, self.magnitude) for op in np.random.choice(RANDOM_POLICY_OPS, self.nops)]
if image.ndim == 4:
for x in range(image.shape[0]):
args.append((image[x:x + 1], [policy()], self.size))
else:
for x in image[:, 1:]:
args.append((x, [policy() for _ in range(x.shape[0])], self.size))
self.queue.append(PoolEntry(payload=POOL.imap(self.numpy_apply_policies, args), batch=batch))
class AugmentPoolCTA(AugmentPool):
def __init__(self, get_samples):
init_pool()
self.get_samples = get_samples
self.queue = []
self.fill_queue()
@staticmethod
def numpy_apply_policies(arglist):
x, cta, probe = arglist
if x.ndim == 3:
assert probe
policy = cta.policy(probe=True)
return dict(policy=policy,
probe=ctaugment.apply(x, policy),
image=ctaugment.apply(x, cta.policy(probe=False)))
assert not probe
return dict(image=np.stack([x[0]] + [ctaugment.apply(y, cta.policy(probe=False)) for y in x[1:]]).astype('f'))
def queue_images(self):
batch = self.get_samples()
args = [(x, batch['cta'], batch['probe']) for x in batch['image']]
self.queue.append(PoolEntry(payload=POOL.imap(self.numpy_apply_policies, args), batch=batch))
def fill_queue(self):
for _ in range(4):
self.queue_images()
def __call__(self, *args, **kwargs):
del args, kwargs
entry = self.queue.pop(0)
samples = list(entry.payload)
entry.batch['image'] = np.stack(x['image'] for x in samples)
if 'probe' in samples[0]:
entry.batch['probe'] = np.stack(x['probe'] for x in samples)
entry.batch['policy'] = [x['policy'] for x in samples]
self.queue_images()
return entry.batch
DEFAULT_AUGMENT = EasyDict(
cifar10=AugmentPair(tf=lambda x: dict(image=Primitives.ms(4)(x), label=x['label'], index=x.get('index', -1)),
numpy=AugmentPool),
cifar100=AugmentPair(tf=lambda x: dict(image=Primitives.ms(4)(x), label=x['label'], index=x.get('index', -1)),
numpy=AugmentPool),
fashion_mnist=AugmentPair(tf=lambda x: dict(image=Primitives.ms(4)(x), label=x['label'], index=x.get('index', -1)),
numpy=AugmentPool),
stl10=AugmentPair(tf=lambda x: dict(image=Primitives.ms(12)(x), label=x['label'], index=x.get('index', -1)),
numpy=AugmentPool),
svhn=AugmentPair(tf=lambda x: dict(image=Primitives.s(4)(x), label=x['label'], index=x.get('index', -1)),
numpy=AugmentPool),
svhn_noextra=AugmentPair(tf=lambda x: dict(image=Primitives.s(4)(x), label=x['label'], index=x.get('index', -1)),
numpy=AugmentPool),
)
AUTO_AUGMENT = EasyDict({
k: AugmentPair(tf=v.tf, numpy=functools.partial(AugmentPoolAA, policy_group=k))
for k, v in DEFAULT_AUGMENT.items()
})
def get_augmentation(dataset: str, augmentation: str):
if augmentation == 'x':
return NOAUGMENT
elif augmentation == 'm':
return AugmentPair(tf=lambda x: dict(image=Primitives.m()(x), label=x['label'], index=x.get('index', -1)),
numpy=AugmentPool)
elif augmentation == 'd':
return DEFAULT_AUGMENT[dataset]
elif augmentation == 'aa':
return AUTO_AUGMENT[dataset]
elif augmentation[0] == 'r':
nops, mag, cutout = (int(x) for x in augmentation[1:].split('_'))
return AugmentPair(tf=DEFAULT_AUGMENT[dataset].tf,
numpy=functools.partial(AugmentPoolRA, nops=nops, magnitude=mag, cutout=cutout))
else:
raise NotImplementedError(augmentation)
def augment_function(dataset: str):
augmentations = FLAGS.augment.split('.')
assert len(augmentations) == 2
return [get_augmentation(dataset, x) for x in augmentations]
def pair_augment_function(dataset: str):
augmentations = FLAGS.augment.split('.')
assert len(augmentations) == 3
unlabeled = [get_augmentation(dataset, x) for x in augmentations[1:]]
return [get_augmentation(dataset, augmentations[0]),
AugmentPair(tf=stack_augment([x.tf for x in unlabeled]), numpy=unlabeled[-1].numpy)]
def many_augment_function(dataset: str):
augmentations = FLAGS.augment.split('.')
assert len(augmentations) == 3
unlabeled = [get_augmentation(dataset, x) for x in (augmentations[1:2] + augmentations[2:] * FLAGS.K)]
return [get_augmentation(dataset, augmentations[0]),
AugmentPair(tf=stack_augment([x.tf for x in unlabeled]), numpy=unlabeled[-1].numpy)]
| google-research/remixmatch | libml/augment.py | Python | apache-2.0 | 10,719 |
#!/usr/bin/python3
'''Implementation of the Beeminder API: https://www.beeminder.com/api
Still very incomplete.'''
import json
import re
import requests
class BeeminderMock:
def __init__(self, mockdata):
self.mockdata = mockdata
@staticmethod
def getidp(ath):
m = re.search('([a-z0-9]+)\.json$', path)
return m.group(1) if m else None
def execute(self, path, params=None, request_type='get'):
print(path, params, request_type)
if path.endswith('datapoints.json'):
return self.mockdata
if request_type == 'delete':
dataid = self.getid(path)
for (i, pt) in enumerate(self.mockdata):
if pt['id'] == dataid:
del self.mockdata[i]
return pt
else:
return None
raise ValueError((path, params, request_type))
class BeeminderBackend:
API_URL = 'https://www.beeminder.com/api/v1/'
def __init__(self, auth_token, dryrun, debug):
self.auth_token = auth_token
self.dryrun = dryrun
self.debug = debug
def execute(self, path, params=None, request_type='get'):
'''Return the url for accessing path, adding the given args
and the auth token'''
if self.debug:
print('{method} {path} {params}'.format(
method=request_type.upper(),
path=path,
params=params or ''
))
if self.dryrun:
print('{} {} {}'.format(request_type.upper(), path, params))
key = (path,
tuple(sorted(params.items())) if params is not None else (),
request_type)
print(repr(key))
return self.dryrun[key]
path = path.lstrip('/')
method = getattr(requests, request_type)
args = {'auth_token': self.auth_token}
if params is not None:
args.update(params)
response = method(self.API_URL + path, params=args)
return response.json()
class Beeminder:
def __init__(self, auth_token, username=None, dryrun=False, debug=False):
self._username = username
self._user = None
self._goals = {}
self._data = {}
if not debug:
self.backend = BeeminderBackend(auth_token, dryrun, debug)
else:
import mock
self.backend = BeeminderMock(mock.mockdata)
def get(self, path, params=None):
return self.backend.execute(path, params, 'get')
def delete(self, path, params=None):
return self.backend.execute(path, params, 'delete')
def put(self, path, params=None):
return self.backend.execute(path, params, 'put')
def post(self, path, params=None):
return self.backend.execute(path, params, 'post')
@property
def user(self):
'''Return the user object'''
if self._user is None:
self._user = self.get('/users/me.json')
return self._user
@property
def username(self):
'''Return the user's username'''
return self._username or self.user['username']
def goal(self, slug):
'''Return the goal object for the given slug'''
if slug not in self._goals:
path = '/users/{}/goals/{}.json'.format(
self.username, slug)
self._goals[slug] = self.get(path)
return self._goals[slug]
@property
def goals(self):
'''Return the list of goals'''
path = '/users/{}/goals.json'.format(self.username)
goal_list = self.get(path)
for goal in goal_list:
slug = goal['slug']
self._goals[slug] = goal
return self._goals
def data(self, slug):
'''Return the datapoints for the given goal'''
if slug not in self._data:
path = '/users/{}/goals/{}/datapoints.json'.format(
self.username, slug)
self._data[slug] = self.get(path)
return self._data[slug]
def delete_point(self, slug, data_id):
path = '/users/{}/goals/{}/datapoints/{}.json'.format(
self.username, slug, data_id)
return self.delete(path)
def update_point(self, slug, data_id,
timestamp=None, value=None, comment=None):
path = '/users/{}/goals/{}/datapoints/{}.json'.format(
self.username, slug, data_id)
params = {}
if timestamp is not None:
params['timestamp'] = timestamp
if value is not None:
params['value'] = value
if comment is not None:
params['comment'] = comment
if params:
return self.put(path, params=params)
def create_all(self, slug, points):
path = '/users/{}/goals/{}/datapoints/create_all.json'.format(
self.username, slug)
datapoints = json.dumps(points)
self.post(path, params={'datapoints': datapoints})
| sagittarian/beelist | beeminder.py | Python | mit | 4,196 |
# OpenShot Video Editor is a program that creates, modifies, and edits video files.
# Copyright (C) 2009 Jonathan Thomas
#
# This file is part of OpenShot Video Editor (http://launchpad.net/openshot/).
#
# OpenShot Video Editor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenShot Video Editor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenShot Video Editor. If not, see <http://www.gnu.org/licenses/>.
import os, gtk
import gtk, gtk.glade
from classes.project import project
from classes import messagebox, info
from windows.SimpleGladeApp import SimpleGladeApp
# init the foreign language
from language import Language_Init
class frmAbout(SimpleGladeApp):
def __init__(self, path="About.glade", root="aboutdialog1", domain="OpenShot", version="0.0.0", project=None, **kwargs):
SimpleGladeApp.__init__(self, os.path.join(project.GLADE_DIR, path), root, domain, **kwargs)
# Add language support
_ = Language_Init.Translator(project).lang.gettext
# set version from constructor
self.aboutdialog1.set_version(version)
# init authors
authors = []
for person in info.CREDITS['code']:
name = person['name']
email = person['email']
authors.append("%s <%s>" % (name, email))
self.aboutdialog1.set_authors(authors)
# init documenters
authors = []
for person in info.CREDITS['documentation']:
name = person['name']
email = person['email']
authors.append("%s <%s>" % (name, email))
self.aboutdialog1.set_documenters(authors)
# init translators
self.aboutdialog1.set_translator_credits("Translation credits are located on LaunchPad:\nhttps://translations.launchpad.net/openshot")
def new(self):
print "A new %s has been created" % self.__class__.__name__
def on_aboutdialog1_close(self, widget, *args):
print "on_aboutdialog1_close called with self.%s" % widget.get_name()
# close the window
self.frmAbout.destroy()
def on_aboutdialog1_response(self, widget, *args):
print "on_aboutdialog1_close called with self.%s" % widget.get_name()
# close the window
self.aboutdialog1.destroy()
def main():
aboutdialog1 = frmAbout()
aboutdialog1.run()
if __name__ == "__main__":
main()
| zaenalarifin/openshot_jmd | openshot/windows/About.py | Python | gpl-3.0 | 2,627 |
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import random
import functools
from devtools_testutils import AzureTestCase, PowerShellPreparer
from azure.agrifood.farming.aio import FarmBeatsClient
from azure.agrifood.farming.models import Boundary, Polygon
from azure.core.exceptions import HttpResponseError
class FarmBeatsTestAsync(AzureTestCase):
def create_client(self, agrifood_endpoint):
credential = self.get_credential(FarmBeatsClient, is_async=True)
return self.create_client_from_credential(
FarmBeatsClient,
endpoint=agrifood_endpoint,
credential=credential,
)
def generate_random_name(self, name):
if self.is_live:
created_name = "{}-{}".format(name, random.randint(0, 100000))
self.scrubber.register_name_pair(created_name, name)
return created_name
return name
async def create_boundary_if_not_exist(self, client, farmer_id, boundary_id):
try:
return await client.boundaries.get(farmer_id=farmer_id, boundary_id=boundary_id)
except HttpResponseError:
return await client.boundaries.create_or_update(
farmer_id=farmer_id,
boundary_id=boundary_id,
boundary=Boundary(
description="Created by SDK",
geometry=Polygon(
coordinates=[
[
[73.70457172393799, 20.545385304358106],
[73.70457172393799, 20.545385304358106],
[73.70448589324951, 20.542411534243367],
[73.70877742767334, 20.541688176010233],
[73.71023654937744, 20.545083911372505],
[73.70663166046143, 20.546992723579137],
[73.70457172393799, 20.545385304358106],
]
]
)
)
)
async def delete_boundary(self, client, farmer_id, boundary_id):
await client.boundaries.delete(farmer_id=farmer_id, boundary_id=boundary_id)
| Azure/azure-sdk-for-python | sdk/agrifood/azure-agrifood-farming/tests/testcase_async.py | Python | mit | 2,509 |
'''
***********************************************************
* Discrete Structures
* Trip Through Germany Program
* Programmer: Mark Eatough
* Course: CSIS 2430
* Created Novermber 3, 2013
***********************************************************
'''
from TimeDistanceMoney import*
class TrainTravel:
def __init__(self, cityFrom, cityTo, time, dollars):
self.cityFrom = cityFrom
self.cityTo = cityTo
self.time = time
self.dollars = Dollars(dollars)
self.euros = convertToEuros(dollars)
self.travelBy = "Train"
def displayTrainTrip(self):
print "Leaving From: ", self.cityFrom
print "Comming To: ", self.cityTo
print "Total time: ", self.time
print "Total cost in euros: ", self.euros
print "Total cost in USD: ", self.dollars
class TaxiTravel:
def __init__(self, cityFrom, cityTo, distance):
self.cityFrom = cityFrom
self.cityTo = cityTo
self.euros = Euros(float(distance)*1.2)
self.dollars = convertToDollars(self.euros.euros)
self.time = Time(0,int((float(distance)/float(130))*60))
self.travelBy = "Taxi" | meatough/Marks-Programs | cs 2430/Assignment12 Germany Trip/TrainClasses.py | Python | gpl-3.0 | 1,163 |
from pandac.PandaModules import *
from direct.showbase.PythonUtil import reduceAngle
from otp.movement import Impulse
import math
class PetChase(Impulse.Impulse):
def __init__(self, target = None, minDist = None, moveAngle = None):
Impulse.Impulse.__init__(self)
self.target = target
if minDist is None:
minDist = 5.0
self.minDist = minDist
if moveAngle is None:
moveAngle = 20.0
self.moveAngle = moveAngle
self.lookAtNode = NodePath('lookatNode')
self.lookAtNode.hide()
self.vel = None
self.rotVel = None
return
def setTarget(self, target):
self.target = target
def destroy(self):
self.lookAtNode.removeNode()
del self.lookAtNode
del self.target
del self.vel
del self.rotVel
def _setMover(self, mover):
Impulse.Impulse._setMover(self, mover)
self.lookAtNode.reparentTo(self.nodePath)
self.vel = self.VecType(0)
self.rotVel = self.VecType(0)
def _process(self, dt):
Impulse.Impulse._process(self, dt)
me = self.nodePath
target = self.target
targetPos = target.getPos(me)
x = targetPos[0]
y = targetPos[1]
distance = math.sqrt(x * x + y * y)
self.lookAtNode.lookAt(target)
relH = reduceAngle(self.lookAtNode.getH(me))
epsilon = 0.005
rotSpeed = self.mover.getRotSpeed()
if relH < -epsilon:
vH = -rotSpeed
elif relH > epsilon:
vH = rotSpeed
else:
vH = 0
if abs(vH * dt) > abs(relH):
vH = relH / dt
if distance > self.minDist and abs(relH) < self.moveAngle:
vForward = self.mover.getFwdSpeed()
else:
vForward = 0
distanceLeft = distance - self.minDist
if distance > self.minDist and vForward * dt > distanceLeft:
vForward = distanceLeft / dt
if vForward:
self.vel.setY(vForward)
self.mover.addShove(self.vel)
if vH:
self.rotVel.setX(vH)
self.mover.addRotShove(self.rotVel)
def setMinDist(self, minDist):
self.minDist = minDist
| Spiderlover/Toontown | toontown/pets/PetChase.py | Python | mit | 2,267 |
"""py.test for simpleread.py"""
# =======================================================================
# Distributed under the MIT License.
# (See accompanying file LICENSE or copy at
# http://opensource.org/licenses/MIT)
# =======================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import StringIO
import eppy.simpleread as simpleread
def test_idf2txt():
"""py.test for idf2txt"""
data = ((
"""
VERSION,
7.3; !- Version Identifier
SIMULATIONCONTROL,
Yes, !- Do Zone Sizing Calculation
Yes, !- Do System Sizing Calculation
Yes, !- Do Plant Sizing Calculation
No, !- Run Simulation for Sizing Periods
Yes; !- Run Simulation for Weather File Run Periods
BUILDING,
Empire State Building, !- Name
30.0, !- North Axis
City, !- Terrain
0.04, !- Loads Convergence Tolerance Value
0.4, !- Temperature Convergence Tolerance Value
FullExterior, !- Solar Distribution
25, !- Maximum Number of Warmup Days
6; !- Minimum Number of Warmup Days
SITE:LOCATION,
CHICAGO_IL_USA TMY2-94846, !- Name
41.78, !- Latitude
-87.75, !- Longitude
-6.0, !- Time Zone
190.0; !- Elevation
""",
""";
BUILDING,
Empire State Building,
30.0,
City,
0.04,
0.4,
FullExterior,
25.0,
6.0;
SIMULATIONCONTROL,
Yes,
Yes,
Yes,
No,
Yes;
SITE:LOCATION,
CHICAGO_IL_USA TMY2-94846,
41.78,
-87.75,
-6.0,
190.0;
VERSION,
7.3;
"""), # intxt, outtxt
)
for intxt, outtxt in data:
result = simpleread.idf2txt(intxt)
assert result == outtxt
def test_idfreadtest():
"""py.test for idfreadtest"""
data = (("""!IDD_Version 7.2.0.006
Version,
\\unique-object
\\format singleLine
A1 ; \\field Version Identifier
SimulationControl,
\\unique-object
A1, \\field Do Zone Sizing Calculation
A2, \\field Do System Sizing Calculation
A3, \\field Do Plant Sizing Calculation
A4, \\field Run Simulation for Sizing Periods
A5; \\field Run Simulation for Weather File Run Periods
Building,
\\unique-object
A1 , \\field Name
N1 , \\field North Axis
A2 , \\field Terrain
N2 , \\field Loads Convergence Tolerance Value
N3 , \\field Temperature Convergence Tolerance Value
A3 , \\field Solar Distribution
N4 , \\field Maximum Number of Warmup Days
N5 ; \\field Minimum Number of Warmup Days
Site:Location,
\\unique-object
A1 , \\field Name
N1 , \\field Latitude
N2 , \\field Longitude
N3 , \\field Time Zone
N4 ; \\field Elevation
""",
"""
VERSION,
7.3; !- Version Identifier
SIMULATIONCONTROL,
Yes, !- Do Zone Sizing Calculation
Yes, !- Do System Sizing Calculation
Yes, !- Do Plant Sizing Calculation
No, !- Run Simulation for Sizing Periods
Yes; !- Run Simulation for Weather File Run Periods
BUILDING,
Empire State Building, !- Name
30.0, !- North Axis
City, !- Terrain
0.04, !- Loads Convergence Tolerance Value
0.4, !- Temperature Convergence Tolerance Value
FullExterior, !- Solar Distribution
25, !- Maximum Number of Warmup Days
6; !- Minimum Number of Warmup Days
SITE:LOCATION,
CHICAGO_IL_USA TMY2-94846, !- Name
41.78, !- Latitude
-87.75, !- Longitude
-6.0, !- Time Zone
190.0; !- Elevation
"""
), # iddtxt, idftxt
)
for iddtxt, idftxt in data:
iddhandle = StringIO.StringIO(iddtxt)
idfhandle1 = StringIO.StringIO(idftxt)
idfhandle2 = StringIO.StringIO(idftxt)
result = simpleread.idfreadtest(iddhandle, idfhandle1, idfhandle2)
assert result == True
| pachi/eppy | eppy/tests/test_simpleread.py | Python | mit | 4,629 |
from flask import Flask,request, abort
import os
import datetime
import hashlib
from databaseutil import DatabaseUtility
import json
import hmac
import base64
from datetime import datetime as dt, timedelta as td
import config
dbutil = DatabaseUtility()
app = Flask(__name__)
KEY = config.SECRET_KEY # should be same as that in app
NONCE_ALIVE_TIME = 2 * 60 #seconds
def generate_hash(registration_id):
return str(hashlib.sha256(registration_id.encode()).hexdigest())
@app.errorhandler(404)
def page_not_found(error):
return "{\"error\",\"NOT FOUND\"}",404
def registerid(registration_id):
hash = generate_hash(registration_id)
if dbutil.is_present(hash, registration_id):
return '{\"status\":\"Already Present\"}'
dbutil.add_id(hash, registration_id)
return '{\"status\":\"Added\"}'
def get_files():
return {'files' : dbutil.get_files(int(request.form['offset']), request.form['datatypes']) }
def get_topmost_files():
return {'files' : dbutil.get_topmost_files(int(request.form['offset']), request.form['datatypes']) }
def get_recent_files():
return {'files' : dbutil.get_recent_files(int(request.form['offset']), request.form['datatypes']) }
def authenticate():
if not ('sec' in request.form and 'sig' in request.form):
abort(404)
sec = request.form['sec']
sig = request.form['sig']
ts = base64.urlsafe_b64decode(sec.encode())
try:
nonce_creation_time = dt.strptime(ts.decode(), "%Y-%m-%d %H:%M:%S")
except Exception:
abort(404)
if nonce_creation_time + td(seconds = NONCE_ALIVE_TIME) < dt.utcnow():
abort(404)
h = hmac.new(KEY.encode(), sec.encode(), digestmod=hashlib.sha256)
if sig != h.hexdigest():
abort(404)
@app.route('/register/',methods=['POST'])
def register():
authenticate()
registration_id = request.form['registration_id']
return registerid(registration_id)
@app.route('/published/',methods=['POST'])
def published():
authenticate()
return json.dumps(get_files())
@app.route('/datatypes/',methods=['POST'])
def get_data_types():
authenticate()
return json.dumps({'datatypes':dbutil.get_data_types()})
@app.route('/topmost/',methods=['POST'])
def topmost():
authenticate()
return json.dumps(get_topmost_files())
@app.route('/updateselfviews/',methods=['POST'])
def updateselfviews():
authenticate()
selfviews = json.loads(request.form['selfviews'])
newviews = {}
for file in selfviews:
dbutil.updatefileviews(file['fileid'], int(file['selfviews']))
newviews['fileid'] = dbutil.getfileviews(file['fileid'])
return json.dumps(newviews)
@app.route('/incrementviews/',methods=['POST'])
def incrementviews():
authenticate()
fileid = request.form['fileid']
dbutil.updatefileviews(fileid, 1)
return '{"success":"' + fileid + ' views incremented by one"}'
@app.route('/recent/',methods=['POST'])
def recent():
authenticate()
return json.dumps(get_recent_files())
@app.route('/')
def get_time():
now = datetime.datetime.now()
nowStr = str(now.year) + "-" + str(now.month) + "-" + str(now.day) + " " + str(now.hour) + " " + str(now.minute) + " " + str(now.second)
return nowStr
if __name__ == '__main__':
port = int(os.environ.get("PORT", 8000))
app.run(host='0.0.0.0', port=port,debug=True) | rachitnaruzu/resultnotifier | backend/main.py | Python | mit | 3,402 |
#! python
# WMI query to list all properties and values of the root/cimv2:Win32_BIOS class.
# To use WMI in Python, install the Python for Windows extensions:
# http://sourceforge.net/projects/pywin32/files/pywin32/
# This Python script was generated using the WMI Code Generator, Version 9.02
# http://www.robvanderwoude.com/wmigen.php
import sys
import win32com.client
try:
strComputer = sys.argv[1]
except IndexError:
strComputer = "."
try:
strUsername = sys.argv[2]
except IndexError:
strUsername = ""
try:
strPassword = sys.argv[3]
except IndexError:
strPassword = ""
objWMIService = win32com.client.Dispatch( "WbemScripting.SWbemLocator" )
objSWbemServices = objWMIService.ConnectServer( strComputer, "root/cimv2", strUsername, strPassword )
colItems = objSWbemServices.ExecQuery( "SELECT * FROM Win32_BIOS" )
global serviceTag
serviceTag = ""
for objItem in colItems:
strList = " "
try:
for objElem in objItem.BIOSVersion :
strList = strList + str( objElem ) + " "
except:
strList = strList + 'null'
print( " BIOS :" + strList )
print( " Service Tag : " + str(objItem.SerialNumber))
print
serviceTag = str(objItem.SerialNumber)
| kek91/administratorsfriend | sys_win32_bios.py | Python | mit | 1,306 |
# Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import os
from os import path
import contextlib
import subprocess
from subprocess import PIPE
import sys
import toml
from mach.registrar import Registrar
@contextlib.contextmanager
def cd(new_path):
"""Context manager for changing the current working directory"""
previous_path = os.getcwd()
try:
os.chdir(new_path)
yield
finally:
os.chdir(previous_path)
def host_triple():
os_type = subprocess.check_output(["uname", "-s"]).strip().lower()
if os_type == "linux":
os_type = "unknown-linux-gnu"
elif os_type == "darwin":
os_type = "apple-darwin"
elif os_type == "android":
os_type == "linux-androideabi"
else:
os_type == "unknown"
cpu_type = subprocess.check_output(["uname", "-m"]).strip().lower()
if cpu_type in ["i386", "i486", "i686", "i768", "x86"]:
cpu_type = "i686"
elif cpu_type in ["x86_64", "x86-64", "x64", "amd64"]:
cpu_type = "x86_64"
elif cpu_type == "arm":
cpu_type = "arm"
else:
cpu_type = "unknown"
return "%s-%s" % (cpu_type, os_type)
class CommandBase(object):
"""Base class for mach command providers.
This mostly handles configuration management, such as .servobuild."""
def __init__(self, context):
self.context = context
def resolverelative(category, key):
# Allow ~
self.config[category][key] = path.expanduser(self.config[category][key])
# Resolve relative paths
self.config[category][key] = path.join(context.topdir,
self.config[category][key])
if not hasattr(self.context, "bootstrapped"):
self.context.bootstrapped = False
config_path = path.join(context.topdir, ".servobuild")
if path.exists(config_path):
with open(config_path) as f:
self.config = toml.loads(f.read())
else:
self.config = {}
# Handle missing/default items
self.config.setdefault("tools", {})
default_cache_dir = os.environ.get("SERVO_CACHE_DIR",
path.join(context.topdir, ".servo"))
self.config["tools"].setdefault("cache-dir", default_cache_dir)
resolverelative("tools", "cache-dir")
self.config["tools"].setdefault("cargo-home-dir",
path.join(context.topdir, ".cargo"))
resolverelative("tools", "cargo-home-dir")
context.sharedir = self.config["tools"]["cache-dir"]
self.config["tools"].setdefault("system-rust", False)
self.config["tools"].setdefault("system-cargo", False)
self.config["tools"].setdefault("rust-root", "")
self.config["tools"].setdefault("cargo-root", "")
if not self.config["tools"]["system-rust"]:
self.config["tools"]["rust-root"] = path.join(
context.sharedir, "rust", *self.rust_snapshot_path().split("/"))
if not self.config["tools"]["system-cargo"]:
self.config["tools"]["cargo-root"] = path.join(
context.sharedir, "cargo", self.cargo_build_id())
self.config.setdefault("build", {})
self.config["build"].setdefault("android", False)
self.config["build"].setdefault("mode", "")
self.config["build"].setdefault("debug-mozjs", False)
self.config.setdefault("android", {})
self.config["android"].setdefault("sdk", "")
self.config["android"].setdefault("ndk", "")
self.config["android"].setdefault("toolchain", "")
self.config.setdefault("gonk", {})
self.config["gonk"].setdefault("b2g", "")
self.config["gonk"].setdefault("product", "flame")
_rust_snapshot_path = None
_cargo_build_id = None
def rust_snapshot_path(self):
if self._rust_snapshot_path is None:
filename = path.join(self.context.topdir, "rust-snapshot-hash")
with open(filename) as f:
snapshot_hash = f.read().strip()
self._rust_snapshot_path = "%s-%s" % (snapshot_hash, host_triple())
return self._rust_snapshot_path
def cargo_build_id(self):
if self._cargo_build_id is None:
filename = path.join(self.context.topdir, "cargo-nightly-build")
with open(filename) as f:
self._cargo_build_id = f.read().strip()
return self._cargo_build_id
def get_target_dir(self):
if "CARGO_TARGET_DIR" in os.environ:
return os.environ["CARGO_TARGET_DIR"]
else:
return path.join(self.context.topdir, "target")
def get_binary_path(self, release, dev):
base_path = self.get_target_dir()
release_path = path.join(base_path, "release", "servo")
dev_path = path.join(base_path, "debug", "servo")
# Prefer release if both given
if release and dev:
dev = False
release_exists = path.exists(release_path)
dev_exists = path.exists(dev_path)
if not release_exists and not dev_exists:
print("No Servo binary found. Please run './mach build' and try again.")
sys.exit()
if release and release_exists:
return release_path
if dev and dev_exists:
return dev_path
if not dev and not release and release_exists and dev_exists:
print("You have multiple profiles built. Please specify which "
"one to run with '--release' or '--dev'.")
sys.exit()
if not dev and not release:
if release_exists:
return release_path
else:
return dev_path
print("The %s profile is not built. Please run './mach build%s' "
"and try again." % ("release" if release else "dev",
" --release" if release else ""))
sys.exit()
def build_env(self, gonk=False, hosts_file_path=None):
"""Return an extended environment dictionary."""
env = os.environ.copy()
extra_path = []
extra_lib = []
if not self.config["tools"]["system-rust"] \
or self.config["tools"]["rust-root"]:
env["RUST_ROOT"] = self.config["tools"]["rust-root"]
# These paths are for when rust-root points to an unpacked installer
extra_path += [path.join(self.config["tools"]["rust-root"], "rustc", "bin")]
extra_lib += [path.join(self.config["tools"]["rust-root"], "rustc", "lib")]
# These paths are for when rust-root points to a rustc sysroot
extra_path += [path.join(self.config["tools"]["rust-root"], "bin")]
extra_lib += [path.join(self.config["tools"]["rust-root"], "lib")]
if not self.config["tools"]["system-cargo"] \
or self.config["tools"]["cargo-root"]:
# This path is for when rust-root points to an unpacked installer
extra_path += [
path.join(self.config["tools"]["cargo-root"], "cargo", "bin")]
# This path is for when rust-root points to a rustc sysroot
extra_path += [
path.join(self.config["tools"]["cargo-root"], "bin")]
if extra_path:
env["PATH"] = "%s%s%s" % (
os.pathsep.join(extra_path), os.pathsep, env["PATH"])
if "CARGO_HOME" not in env:
env["CARGO_HOME"] = self.config["tools"]["cargo-home-dir"]
if "CARGO_TARGET_DIR" not in env:
env["CARGO_TARGET_DIR"] = path.join(self.context.topdir, "target")
if extra_lib:
if sys.platform == "darwin":
env["DYLD_LIBRARY_PATH"] = "%s%s%s" % \
(os.pathsep.join(extra_lib),
os.pathsep,
env.get("DYLD_LIBRARY_PATH", ""))
else:
env["LD_LIBRARY_PATH"] = "%s%s%s" % \
(os.pathsep.join(extra_lib),
os.pathsep,
env.get("LD_LIBRARY_PATH", ""))
# Paths to Android build tools:
if self.config["android"]["sdk"]:
env["ANDROID_SDK"] = self.config["android"]["sdk"]
if self.config["android"]["ndk"]:
env["ANDROID_NDK"] = self.config["android"]["ndk"]
if self.config["android"]["toolchain"]:
env["ANDROID_TOOLCHAIN"] = self.config["android"]["toolchain"]
if gonk:
if self.config["gonk"]["b2g"]:
env["GONKDIR"] = self.config["gonk"]["b2g"]
if "GONKDIR" not in env:
# Things can get pretty opaque if this hasn't been set
print("Please set $GONKDIR in your environment or .servobuild file")
sys.exit(1)
if self.config["gonk"]["product"]:
env["GONK_PRODUCT"] = self.config["gonk"]["product"]
env["ARCH_DIR"] = "arch-arm"
env["CPPFLAGS"] = (
"-DANDROID -DTARGET_OS_GONK "
"-DANDROID_VERSION=19 "
"-DGR_GL_USE_NEW_SHADER_SOURCE_SIGNATURE=1 "
"-isystem %(gonkdir)s/bionic/libc/%(archdir)s/include "
"-isystem %(gonkdir)s/bionic/libc/include/ "
"-isystem %(gonkdir)s/bionic/libc/kernel/common "
"-isystem %(gonkdir)s/bionic/libc/kernel/%(archdir)s "
"-isystem %(gonkdir)s/bionic/libm/include "
"-I%(gonkdir)s/system "
"-I%(gonkdir)s/system/core/include "
"-I%(gonkdir)s/frameworks/native/opengl/include "
"-I%(gonkdir)s/external/zlib "
) % {"gonkdir": env["GONKDIR"], "archdir": env["ARCH_DIR"]}
env["CXXFLAGS"] = (
"-O2 -mandroid -fPIC "
"-isystem %(gonkdir)s/api/cpp/include "
"-isystem %(gonkdir)s/external/stlport/stlport "
"-isystem %(gonkdir)s/bionic "
"-isystem %(gonkdir)s/bionic/libstdc++/include "
"%(cppflags)s"
) % {"gonkdir": env["GONKDIR"], "cppflags": env["CPPFLAGS"]}
env["CFLAGS"] = (
"%(cxxflags)s"
) % {"cxxflags": env["CXXFLAGS"]}
another_extra_path = path.join(
env["GONKDIR"], "prebuilts", "gcc", "linux-x86", "arm", "arm-linux-androideabi-4.7", "bin")
env["gonkdir"] = env["GONKDIR"]
env["gonk_toolchain_prefix"] = (
"%(toolchain)s/arm-linux-androideabi-"
) % {"toolchain": another_extra_path}
env["PATH"] = "%s%s%s" % (another_extra_path, os.pathsep, env["PATH"])
env["LDFLAGS"] = (
"-mandroid -L%(gonkdir)s/out/target/product/%(gonkproduct)s/obj/lib "
"-Wl,-rpath-link=%(gonkdir)s/out/target/product/%(gonkproduct)s/obj/lib "
"--sysroot=%(gonkdir)s/out/target/product/%(gonkproduct)s/obj/"
) % {"gonkdir": env["GONKDIR"], "gonkproduct": env["GONK_PRODUCT"]}
# Not strictly necessary for a vanilla build, but might be when tweaking the openssl build
openssl_dir = (
"%(gonkdir)s/out/target/product/%(gonkproduct)s/obj/lib"
) % {"gonkdir": env["GONKDIR"], "gonkproduct": env["GONK_PRODUCT"]}
env["OPENSSL_LIB_DIR"] = openssl_dir
env['OPENSSL_INCLUDE_DIR'] = path.join(env["GONKDIR"], "external/openssl/include")
# FIXME: These are set because they are the variable names that
# android-rs-glue expects. However, other submodules have makefiles that
# reference the env var names above. Once glutin is enabled and set as
# the default, we could modify the subproject makefiles to use the names
# below and remove the vars above, to avoid duplication.
if "ANDROID_SDK" in env:
env["ANDROID_HOME"] = env["ANDROID_SDK"]
if "ANDROID_NDK" in env:
env["NDK_HOME"] = env["ANDROID_NDK"]
if "ANDROID_TOOLCHAIN" in env:
env["NDK_STANDALONE"] = env["ANDROID_TOOLCHAIN"]
if hosts_file_path:
env['HOST_FILE'] = hosts_file_path
env['RUSTDOC'] = path.join(self.context.topdir, 'etc', 'rustdoc-with-private')
if subprocess.call(['which', 'ld.gold'], stdout=PIPE, stderr=PIPE) == 0:
env['RUSTC'] = path.join(self.context.topdir, 'etc', 'rustc-with-gold')
return env
def servo_crate(self):
return path.join(self.context.topdir, "components", "servo")
def android_support_dir(self):
return path.join(self.context.topdir, "support", "android")
def ensure_bootstrapped(self):
if self.context.bootstrapped:
return
Registrar.dispatch("update-submodules", context=self.context)
if not self.config["tools"]["system-rust"] and \
not path.exists(path.join(
self.config["tools"]["rust-root"], "rustc", "bin", "rustc")):
Registrar.dispatch("bootstrap-rust", context=self.context)
if not self.config["tools"]["system-cargo"] and \
not path.exists(path.join(
self.config["tools"]["cargo-root"], "cargo", "bin", "cargo")):
Registrar.dispatch("bootstrap-cargo", context=self.context)
self.context.bootstrapped = True
| meh/servo | python/servo/command_base.py | Python | mpl-2.0 | 13,987 |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 10 11:14:28 2018
@author: carlos.arana
Objeto estándar de metadatos
"""
import pandas as pd
from AsignarDimension.AsignarDimension import AsignarDimension
class Meta(object):
def __init__(self, name):
# Descripciones del Parámetro
self.name = name
self.ClaveParametro = str()
self.NombreParametro = str()
self.DescParam = str()
self.UnidadesParam = str()
self.TituloParametro = str() # Para nombrar la columna del parametro
self.PeriodoParam = str()
self.TipoInt = int()
# Handlings
self.ParDtype = str()
self.TipoVar = str()
self.array = []
self.TipoAgr = str()
# Descripciones del proceso de Minería
self.nomarchivodataset = str()
self.extarchivodataset = str()
self.ArchivoDataset = str()
self.ContenidoHojaDatos = str()
self.ClaveDataset = str()
self.ActDatos = str()
self.Agregacion = str()
self.DescVarIntegridad = str()
self.DirFuente = str()
self.DSBase = str()
self.ClaveDimension = str()
self.NomDimension = str()
self.DirDimension = str()
self.RepoMina = str()
self.DirDestino = str()
# Prepara definiciones que vienen desde la fuente
self.NomDataset = str()
self.DescDataset = str()
self.DispTemp = str()
self.PeriodoAct = str()
self.DesagrMax = str()
self.Notas = str()
self.NomFuente = str()
self.UrlFuente = str()
# Instrucciones incluidas para el compilador
self.getmetafromds = 0 # Si es 1, el compilador jalará metadatos desde el dataset estándar.
self.percent = 0 # Si es 1, el compilador asignará los datos del parámetro como el porcentaje de
# municipios con datos.
# Genera metadatos automáticos
def metafromds(self):
# Cargar metadatos del dataset
metadataset = pd.read_excel(self.DirFuente + '\\' + self.ArchivoDataset,
sheetname="METADATOS")
metadataset.set_index('Metadato', inplace=True)
metadataset = metadataset['Descripcion']
# Descripciones generadas desde los metadatos del dataset.
self.NomDataset = metadataset['Nombre del Dataset']
self.DescDataset = metadataset['Descripcion del dataset']
self.DispTemp = metadataset['Disponibilidad Temporal']
self.PeriodoAct = metadataset['Periodo de actualizacion']
self.DesagrMax = metadataset['Nivel de Desagregacion']
self.Notas = metadataset['Notas']
self.NomFuente = metadataset['Fuente']
self.UrlFuente = metadataset['URL_Fuente']
def fillmeta(self):
self.ArchivoDataset = self.nomarchivodataset + "." + self.extarchivodataset
self.DirFuente = r'D:\PCCS\01_Dmine\Datasets\{}'.format(self.ClaveDataset)
self.DSBase = '"{}", disponible en https://github.com/INECC-PCCS/01_Dmine/tree/master/Datasets/{}'.format(
self.ArchivoDataset, self.ClaveDataset)
self.ClaveDimension = self.ClaveParametro[1:3]
self.NomDimension = AsignarDimension(self.ClaveDimension)['nombre']
self.DirDimension = self.ClaveDimension + "_" + AsignarDimension(self.ClaveDimension)['directorio']
self.RepoMina = 'https://github.com/INECC-PCCS/01_Dmine/tree/master/{}/{}'.format(
self.DirDimension, self.ClaveParametro)
self.DirDestino = r'D:\PCCS\01_Dmine\{}'.format(
self.ClaveDimension + "_" + AsignarDimension(self.ClaveDimension)['directorio'])
if self.getmetafromds == 1:
self.metafromds(self)
DescIntegridad = {
1 : 'La variable de integridad municipal para esta Dataset es binaria: \n'
'1 = El municipio cuenta con informacion \n0 = El municipio no cuenta con información',
2 : 'Para calcular la variable de integridad de este dataset, se verifica la existencia de datos en '
'cada una de las variables que se utilizaron para construir el parámetro. El valor de la variable '
'de integridad multiplicado por 100 indica el porcentaje de variables del dataset que tienen '
'datos para la construcción del parámetro',
3 : 'Los datos para este parametro se agregaron desde los individuos de una poblacion, por lo que se '
'considera que los datos están completos y que si un municipio no tiene datos significa que ese '
'municipio tiene cero unidades de las que está considerando el parámetro',
}
self.DescVarIntegridad = DescIntegridad[self.TipoInt]
def checkall(self):
contents = {
'name': self.name,
'ClaveParametro': self.ClaveParametro,
'NombreParametro': self.NombreParametro,
'DescParam': self.DescParam,
'UnidadesParam': self.UnidadesParam,
'TituloParametro': self.TituloParametro,
'PeriodoParam': self.PeriodoParam,
'TipoInt': self.TipoInt,
'ParDtype': self.ParDtype,
'TipoVar': self.TipoVar,
'nomarchivodataset': self.nomarchivodataset,
'extarchivodataset': self.extarchivodataset,
'ArchivoDataset': self.ArchivoDataset,
'ContenidoHojaDatos': self.ContenidoHojaDatos,
'ClaveDataset': self.ClaveDataset,
'ActDatos': self.ActDatos,
'Agregacion': self.Agregacion,
'DescVarIntegridad': self.DescVarIntegridad,
'DirFuente': self.DirFuente,
'DSBase': self.DSBase,
'ClaveDimension': self.ClaveDimension,
'NomDimension': self.NomDimension,
'DirDimension': self.DirDimension,
'RepoMina': self.RepoMina,
'DirDestino': self.DirDestino,
'NomDataset': self.NomDataset,
'DescDataset': self.DescDataset,
'DispTemp': self.DispTemp,
'PeriodoAct': self.PeriodoAct,
'DesagrMax': self.DesagrMax,
'Notas': self.Notas,
'NomFuente': self.NomFuente,
'UrlFuente': self.UrlFuente,
'getmetafromds': self.getmetafromds,
'NomDataset': self.NomDataset,
'DescDataset': self.DescDataset,
'DispTemp': self.DispTemp,
'PeriodoAct': self.PeriodoAct,
'DesagrMax': self.DesagrMax,
'Notas': self.Notas,
'NomFuente': self.NomFuente,
'UrlFuente': self.UrlFuente,
'ArchivoDataset': self.ArchivoDataset,
'DirFuente': self.DirFuente,
'DSBase': self.DSBase,
'ClaveDimension': self.ClaveDimension,
'NomDimension': self.NomDimension,
'DirDimension': self.DirDimension,
'RepoMina': self.RepoMina,
'DirDestino': self.DirDestino,
'DescVarIntegridad': self.DescVarIntegridad,
}
for k,v in contents.items():
try:
if v == '' or v == int():
print(k)
except:
print('{} : {}'.format(k, v))
# Actualizar notas del parametro de forma manual, ya sea agregando notas o reemplazando
# las que por defecto trae el dataset.
| Caranarq/01_Dmine | Scripts/classes/Meta.py | Python | gpl-3.0 | 7,445 |
# -*- coding: utf-8 -*-
import json
import os
import stat
import tarfile
import zipfile
from datetime import datetime, timedelta
from django.conf import settings
from django.core.files import temp
from django.core.files.storage import default_storage as storage
from django.test.utils import override_settings
from unittest import mock
import responses
import six
from pyquery import PyQuery as pq
from six import text_type
from six.moves.urllib_parse import urlencode
from waffle.testutils import override_switch
from olympia import amo
from olympia.activity.models import ActivityLog
from olympia.addons.models import (
Addon, AddonCategory, AddonReviewerFlags, Category)
from olympia.amo.tests import (
TestCase, addon_factory, create_default_webext_appversion, formset,
initial, version_factory)
from olympia.amo.urlresolvers import reverse
from olympia.constants.categories import CATEGORIES_BY_ID
from olympia.constants.licenses import LICENSES_BY_BUILTIN
from olympia.devhub import views
from olympia.files.tests.test_models import UploadTest
from olympia.files.utils import parse_addon
from olympia.lib.akismet.models import AkismetReport
from olympia.lib.git import AddonGitRepository
from olympia.users.models import UserProfile
from olympia.versions.models import License, VersionPreview
from olympia.zadmin.models import Config, set_config
def get_addon_count(name):
"""Return the number of addons with the given name."""
return Addon.unfiltered.filter(name__localized_string=name).count()
def _parse_addon_theme_permission_wrapper(*args, **kwargs):
parsed = parse_addon(*args, **kwargs)
parsed['permissions'] = parsed.get('permissions', []) + ['theme']
return parsed
class TestSubmitBase(TestCase):
fixtures = ['base/addon_3615', 'base/addon_5579', 'base/users']
def setUp(self):
super(TestSubmitBase, self).setUp()
assert self.client.login(email='[email protected]')
self.user = UserProfile.objects.get(email='[email protected]')
self.addon = self.get_addon()
def get_addon(self):
return Addon.objects.get(pk=3615)
def get_version(self):
return self.get_addon().versions.latest()
def generate_source_zip(self, suffix='.zip', data=u'z' * (2 ** 21),
compression=zipfile.ZIP_DEFLATED):
tdir = temp.gettempdir()
source = temp.NamedTemporaryFile(suffix=suffix, dir=tdir)
with zipfile.ZipFile(source, 'w', compression=compression) as zip_file:
zip_file.writestr('foo', data)
source.seek(0)
return source
def generate_source_tar(
self, suffix='.tar.gz', data=b't' * (2 ** 21), mode=None):
tdir = temp.gettempdir()
source = temp.NamedTemporaryFile(suffix=suffix, dir=tdir)
if mode is None:
mode = 'w:bz2' if suffix.endswith('.tar.bz2') else 'w:gz'
with tarfile.open(fileobj=source, mode=mode) as tar_file:
tar_info = tarfile.TarInfo('foo')
tar_info.size = len(data)
tar_file.addfile(tar_info, six.BytesIO(data))
source.seek(0)
return source
def generate_source_garbage(self, suffix='.zip', data=b'g' * (2 ** 21)):
tdir = temp.gettempdir()
source = temp.NamedTemporaryFile(suffix=suffix, dir=tdir)
source.write(data)
source.seek(0)
return source
class TestAddonSubmitAgreementWithPostReviewEnabled(TestSubmitBase):
def test_set_read_dev_agreement(self):
response = self.client.post(reverse('devhub.submit.agreement'), {
'distribution_agreement': 'on',
'review_policy': 'on',
})
assert response.status_code == 302
self.user.reload()
self.assertCloseToNow(self.user.read_dev_agreement)
def test_set_read_dev_agreement_error(self):
set_config('last_dev_agreement_change_date', '2018-01-01 00:00')
before_agreement_last_changed = (
datetime(2018, 1, 1) - timedelta(days=1))
self.user.update(read_dev_agreement=before_agreement_last_changed)
response = self.client.post(reverse('devhub.submit.agreement'))
assert response.status_code == 200
assert 'agreement_form' in response.context
form = response.context['agreement_form']
assert form.is_valid() is False
assert form.errors == {
'distribution_agreement': [u'This field is required.'],
'review_policy': [u'This field is required.'],
}
doc = pq(response.content)
for id_ in form.errors.keys():
selector = 'li input#id_%s + a + .errorlist' % id_
assert doc(selector).text() == 'This field is required.'
def test_read_dev_agreement_skip(self):
after_agreement_last_changed = (
datetime(2018, 1, 1) + timedelta(days=1))
self.user.update(read_dev_agreement=after_agreement_last_changed)
response = self.client.get(reverse('devhub.submit.agreement'))
self.assert3xx(response, reverse('devhub.submit.distribution'))
def test_read_dev_agreement_set_to_future(self):
set_config('last_dev_agreement_change_date', '2099-12-31 00:00')
read_dev_date = datetime(2018, 1, 1)
self.user.update(read_dev_agreement=read_dev_date)
response = self.client.get(reverse('devhub.submit.agreement'))
self.assert3xx(response, reverse('devhub.submit.distribution'))
def test_read_dev_agreement_set_to_future_not_agreed_yet(self):
set_config('last_dev_agreement_change_date', '2099-12-31 00:00')
self.user.update(read_dev_agreement=None)
response = self.client.get(reverse('devhub.submit.agreement'))
assert response.status_code == 200
assert 'agreement_form' in response.context
def test_read_dev_agreement_invalid_date_agreed_post_fallback(self):
set_config('last_dev_agreement_change_date', '2099-25-75 00:00')
read_dev_date = datetime(2018, 1, 1)
self.user.update(read_dev_agreement=read_dev_date)
response = self.client.get(reverse('devhub.submit.agreement'))
self.assert3xx(response, reverse('devhub.submit.distribution'))
def test_read_dev_agreement_invalid_date_not_agreed_post_fallback(self):
set_config('last_dev_agreement_change_date', '2099,31,12,0,0')
self.user.update(read_dev_agreement=None)
response = self.client.get(reverse('devhub.submit.agreement'))
self.assertRaises(ValueError)
assert response.status_code == 200
assert 'agreement_form' in response.context
def test_read_dev_agreement_no_date_configured_agreed_post_fallback(self):
response = self.client.get(reverse('devhub.submit.agreement'))
self.assert3xx(response, reverse('devhub.submit.distribution'))
def test_read_dev_agreement_no_date_configured_not_agreed_post_fallb(self):
self.user.update(read_dev_agreement=None)
response = self.client.get(reverse('devhub.submit.agreement'))
assert response.status_code == 200
assert 'agreement_form' in response.context
def test_read_dev_agreement_captcha_inactive(self):
self.user.update(read_dev_agreement=None)
response = self.client.get(reverse('devhub.submit.agreement'))
assert response.status_code == 200
form = response.context['agreement_form']
assert 'recaptcha' not in form.fields
doc = pq(response.content)
assert doc('.g-recaptcha') == []
@override_switch('developer-agreement-captcha', active=True)
def test_read_dev_agreement_captcha_active_error(self):
self.user.update(read_dev_agreement=None)
response = self.client.get(reverse('devhub.submit.agreement'))
assert response.status_code == 200
form = response.context['agreement_form']
assert 'recaptcha' in form.fields
response = self.client.post(reverse('devhub.submit.agreement'))
# Captcha is properly rendered
doc = pq(response.content)
assert doc('.g-recaptcha')
assert 'recaptcha' in response.context['agreement_form'].errors
@override_switch('developer-agreement-captcha', active=True)
def test_read_dev_agreement_captcha_active_success(self):
self.user.update(read_dev_agreement=None)
response = self.client.get(reverse('devhub.submit.agreement'))
assert response.status_code == 200
form = response.context['agreement_form']
assert 'recaptcha' in form.fields
# Captcha is also properly rendered
doc = pq(response.content)
assert doc('.g-recaptcha')
verify_data = urlencode({
'secret': '',
'remoteip': '127.0.0.1',
'response': 'test',
})
responses.add(
responses.GET,
'https://www.google.com/recaptcha/api/siteverify?' + verify_data,
json={'error-codes': [], 'success': True})
response = self.client.post(reverse('devhub.submit.agreement'), data={
'g-recaptcha-response': 'test',
'distribution_agreement': 'on',
'review_policy': 'on',
})
assert response.status_code == 302
assert response['Location'] == reverse('devhub.submit.distribution')
class TestAddonSubmitDistribution(TestCase):
fixtures = ['base/users']
def setUp(self):
super(TestAddonSubmitDistribution, self).setUp()
self.client.login(email='[email protected]')
self.user = UserProfile.objects.get(email='[email protected]')
def test_check_agreement_okay(self):
response = self.client.post(reverse('devhub.submit.agreement'))
self.assert3xx(response, reverse('devhub.submit.distribution'))
response = self.client.get(reverse('devhub.submit.distribution'))
assert response.status_code == 200
# No error shown for a redirect from previous step.
assert b'This field is required' not in response.content
def test_submit_notification_warning(self):
config = Config.objects.create(
key='submit_notification_warning',
value='Text with <a href="http://example.com">a link</a>.')
response = self.client.get(reverse('devhub.submit.distribution'))
assert response.status_code == 200
doc = pq(response.content)
assert doc('.notification-box.warning').html().strip() == config.value
def test_redirect_back_to_agreement(self):
self.user.update(read_dev_agreement=None)
response = self.client.get(
reverse('devhub.submit.distribution'), follow=True)
self.assert3xx(response, reverse('devhub.submit.agreement'))
# read_dev_agreement needs to be a more recent date than
# the setting.
set_config('last_dev_agreement_change_date', '2018-01-01 00:00')
before_agreement_last_changed = (
datetime(2018, 1, 1) - timedelta(days=1))
self.user.update(read_dev_agreement=before_agreement_last_changed)
response = self.client.get(
reverse('devhub.submit.distribution'), follow=True)
self.assert3xx(response, reverse('devhub.submit.agreement'))
def test_listed_redirects_to_next_step(self):
response = self.client.post(reverse('devhub.submit.distribution'),
{'channel': 'listed'})
self.assert3xx(response,
reverse('devhub.submit.upload', args=['listed']))
def test_unlisted_redirects_to_next_step(self):
response = self.client.post(reverse('devhub.submit.distribution'),
{'channel': 'unlisted'})
self.assert3xx(response, reverse('devhub.submit.upload',
args=['unlisted']))
def test_channel_selection_error_shown(self):
url = reverse('devhub.submit.distribution')
# First load should have no error
assert b'This field is required' not in self.client.get(url).content
# Load with channel preselected (e.g. back from next step) - no error.
assert b'This field is required' not in self.client.get(
url, args=['listed']).content
# A post submission without channel selection should be an error
assert b'This field is required' in self.client.post(url).content
class TestAddonSubmitUpload(UploadTest, TestCase):
fixtures = ['base/users']
@classmethod
def setUpTestData(cls):
create_default_webext_appversion()
def setUp(self):
super(TestAddonSubmitUpload, self).setUp()
self.upload = self.get_upload('webextension_no_id.xpi')
assert self.client.login(email='[email protected]')
self.client.post(reverse('devhub.submit.agreement'))
def post(self, compatible_apps=None, expect_errors=False,
listed=True, status_code=200, url=None, extra_kwargs=None):
if compatible_apps is None:
compatible_apps = [amo.FIREFOX, amo.ANDROID]
data = {
'upload': self.upload.uuid.hex,
'compatible_apps': [p.id for p in compatible_apps]
}
url = url or reverse('devhub.submit.upload',
args=['listed' if listed else 'unlisted'])
response = self.client.post(
url, data, follow=True, **(extra_kwargs or {}))
assert response.status_code == status_code
if not expect_errors:
# Show any unexpected form errors.
if response.context and 'new_addon_form' in response.context:
assert (
response.context['new_addon_form'].errors.as_text() == '')
return response
def test_unique_name(self):
addon_factory(name='Beastify')
self.post(expect_errors=False)
def test_unlisted_name_not_unique(self):
"""We don't enforce name uniqueness for unlisted add-ons."""
addon_factory(name='Beastify',
version_kw={'channel': amo.RELEASE_CHANNEL_LISTED})
assert get_addon_count('Beastify') == 1
# We're not passing `expected_errors=True`, so if there was any errors
# like "This name is already in use. Please choose another one", the
# test would fail.
response = self.post()
# Kind of redundant with the `self.post()` above: we just want to make
# really sure there's no errors raised by posting an add-on with a name
# that is already used by an unlisted add-on.
assert 'new_addon_form' not in response.context
assert get_addon_count('Beastify') == 2
def test_name_not_unique_between_types(self):
"""We don't enforce name uniqueness between add-ons types."""
addon_factory(name='Beastify', type=amo.ADDON_THEME)
assert get_addon_count('Beastify') == 1
# We're not passing `expected_errors=True`, so if there was any errors
# like "This name is already in use. Please choose another one", the
# test would fail.
response = self.post()
# Kind of redundant with the `self.post()` above: we just want to make
# really sure there's no errors raised by posting an add-on with a name
# that is already used by an unlisted add-on.
assert 'new_addon_form' not in response.context
assert get_addon_count('Beastify') == 2
def test_success_listed(self):
assert Addon.objects.count() == 0
response = self.post()
addon = Addon.objects.get()
version = addon.find_latest_version(channel=amo.RELEASE_CHANNEL_LISTED)
assert version
assert version.channel == amo.RELEASE_CHANNEL_LISTED
self.assert3xx(
response, reverse('devhub.submit.source', args=[addon.slug]))
log_items = ActivityLog.objects.for_addons(addon)
assert log_items.filter(action=amo.LOG.CREATE_ADDON.id), (
'New add-on creation never logged.')
assert not addon.tags.filter(tag_text='dynamic theme').exists()
@mock.patch('olympia.reviewers.utils.sign_file')
def test_success_unlisted(self, mock_sign_file):
"""Sign automatically."""
assert Addon.objects.count() == 0
# No validation errors or warning.
result = {
'errors': 0,
'warnings': 0,
'notices': 2,
'metadata': {},
'messages': [],
}
self.upload = self.get_upload(
'extension.xpi', validation=json.dumps(result))
self.post(listed=False)
addon = Addon.objects.get()
version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
assert version
assert version.channel == amo.RELEASE_CHANNEL_UNLISTED
assert addon.status == amo.STATUS_NULL
assert mock_sign_file.called
assert not addon.tags.filter(tag_text='dynamic theme').exists()
def test_missing_compatible_apps(self):
url = reverse('devhub.submit.upload', args=['listed'])
response = self.client.post(url, {'upload': self.upload.uuid.hex})
assert response.status_code == 200
assert response.context['new_addon_form'].errors.as_text() == (
'* compatible_apps\n * Need to select at least one application.')
doc = pq(response.content)
assert doc('ul.errorlist').text() == (
'Need to select at least one application.')
def test_default_supported_platforms(self):
"""Test that we default to PLATFORM_ALL during submission.
This is temporarily while we're in process of getting rid
of supported platforms.
https://github.com/mozilla/addons-server/issues/8752
"""
response = self.post()
addon = Addon.objects.get()
# Success, redirecting to source submission step.
self.assert3xx(
response, reverse('devhub.submit.source', args=[addon.slug]))
# Check that `all_files` is correct
all_ = sorted([f.filename for f in addon.current_version.all_files])
assert all_ == [u'beastify-1.0-an+fx.xpi']
# Default to PLATFORM_ALL
assert addon.current_version.supported_platforms == [amo.PLATFORM_ALL]
# And check that compatible apps have a sensible default too
apps = [app.id for app in addon.current_version.compatible_apps.keys()]
assert sorted(apps) == sorted([amo.FIREFOX.id, amo.ANDROID.id])
@mock.patch('olympia.devhub.views.auto_sign_file')
def test_one_xpi_for_multiple_apps_unlisted_addon(
self, mock_auto_sign_file):
assert Addon.objects.count() == 0
response = self.post(
compatible_apps=[amo.FIREFOX, amo.ANDROID], listed=False)
addon = Addon.unfiltered.get()
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
self.assert3xx(
response, reverse('devhub.submit.source', args=[addon.slug]))
all_ = sorted([f.filename for f in latest_version.all_files])
assert all_ == [u'beastify-1.0-an+fx.xpi']
mock_auto_sign_file.assert_has_calls([
mock.call(f)
for f in latest_version.all_files])
def test_static_theme_wizard_button_shown(self):
response = self.client.get(reverse(
'devhub.submit.upload', args=['listed']), follow=True)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#wizardlink')
assert doc('#wizardlink').attr('href') == (
reverse('devhub.submit.wizard', args=['listed']))
response = self.client.get(reverse(
'devhub.submit.upload', args=['unlisted']), follow=True)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#wizardlink')
assert doc('#wizardlink').attr('href') == (
reverse('devhub.submit.wizard', args=['unlisted']))
def test_static_theme_submit_listed(self):
assert Addon.objects.count() == 0
path = os.path.join(
settings.ROOT, 'src/olympia/devhub/tests/addons/static_theme.zip')
self.upload = self.get_upload(abspath=path)
response = self.post()
addon = Addon.objects.get()
self.assert3xx(
response, reverse('devhub.submit.details', args=[addon.slug]))
all_ = sorted([f.filename for f in addon.current_version.all_files])
assert all_ == [u'weta_fade-1.0-an+fx.xpi'] # A single XPI for all.
assert addon.type == amo.ADDON_STATICTHEME
previews = list(addon.current_version.previews.all())
assert len(previews) == 3
assert storage.exists(previews[0].image_path)
assert storage.exists(previews[1].image_path)
assert storage.exists(previews[2].image_path)
def test_static_theme_submit_unlisted(self):
assert Addon.unfiltered.count() == 0
path = os.path.join(
settings.ROOT, 'src/olympia/devhub/tests/addons/static_theme.zip')
self.upload = self.get_upload(abspath=path)
with mock.patch('olympia.devhub.views.auto_sign_file', lambda x: None):
response = self.post(listed=False)
addon = Addon.unfiltered.get()
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
self.assert3xx(
response, reverse('devhub.submit.finish', args=[addon.slug]))
all_ = sorted([f.filename for f in latest_version.all_files])
assert all_ == [u'weta_fade-1.0-an+fx.xpi'] # A single XPI for all.
assert addon.type == amo.ADDON_STATICTHEME
# Only listed submissions need a preview generated.
assert latest_version.previews.all().count() == 0
def test_static_theme_wizard_listed(self):
# Check we get the correct template.
url = reverse('devhub.submit.wizard', args=['listed'])
response = self.client.get(url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#theme-wizard')
assert doc('#theme-wizard').attr('data-version') == '1.0'
assert doc('input#theme-name').attr('type') == 'text'
# And then check the upload works. In reality the zip is generated
# client side in JS but the zip file is the same.
assert Addon.objects.count() == 0
path = os.path.join(
settings.ROOT, 'src/olympia/devhub/tests/addons/static_theme.zip')
self.upload = self.get_upload(abspath=path)
response = self.post(url=url)
addon = Addon.objects.get()
# Next step is same as non-wizard flow too.
self.assert3xx(
response, reverse('devhub.submit.details', args=[addon.slug]))
all_ = sorted([f.filename for f in addon.current_version.all_files])
assert all_ == [u'weta_fade-1.0-an+fx.xpi'] # A single XPI for all.
assert addon.type == amo.ADDON_STATICTHEME
previews = list(addon.current_version.previews.all())
assert len(previews) == 3
assert storage.exists(previews[0].image_path)
assert storage.exists(previews[1].image_path)
assert storage.exists(previews[2].image_path)
def test_static_theme_wizard_unlisted(self):
# Check we get the correct template.
url = reverse('devhub.submit.wizard', args=['unlisted'])
response = self.client.get(url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#theme-wizard')
assert doc('#theme-wizard').attr('data-version') == '1.0'
assert doc('input#theme-name').attr('type') == 'text'
# And then check the upload works. In reality the zip is generated
# client side in JS but the zip file is the same.
assert Addon.unfiltered.count() == 0
path = os.path.join(
settings.ROOT, 'src/olympia/devhub/tests/addons/static_theme.zip')
self.upload = self.get_upload(abspath=path)
with mock.patch('olympia.devhub.views.auto_sign_file', lambda x: None):
response = self.post(url=url, listed=False)
addon = Addon.unfiltered.get()
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
# Next step is same as non-wizard flow too.
self.assert3xx(
response, reverse('devhub.submit.finish', args=[addon.slug]))
all_ = sorted([f.filename for f in latest_version.all_files])
assert all_ == [u'weta_fade-1.0-an+fx.xpi'] # A single XPI for all.
assert addon.type == amo.ADDON_STATICTHEME
# Only listed submissions need a preview generated.
assert latest_version.previews.all().count() == 0
@mock.patch('olympia.devhub.forms.parse_addon',
wraps=_parse_addon_theme_permission_wrapper)
def test_listed_dynamic_theme_is_tagged(self, parse_addon_mock):
assert Addon.objects.count() == 0
path = os.path.join(
settings.ROOT,
'src/olympia/devhub/tests/addons/valid_webextension.xpi')
self.upload = self.get_upload(abspath=path)
response = self.post()
addon = Addon.objects.get()
self.assert3xx(
response, reverse('devhub.submit.source', args=[addon.slug]))
assert addon.tags.filter(tag_text='dynamic theme').exists()
@mock.patch('olympia.devhub.forms.parse_addon',
wraps=_parse_addon_theme_permission_wrapper)
def test_unlisted_dynamic_theme_isnt_tagged(self, parse_addon_mock):
assert Addon.objects.count() == 0
path = os.path.join(
settings.ROOT,
'src/olympia/devhub/tests/addons/valid_webextension.xpi')
self.upload = self.get_upload(abspath=path)
with mock.patch('olympia.devhub.views.auto_sign_file', lambda x: None):
response = self.post(listed=False)
addon = Addon.objects.get()
self.assert3xx(
response, reverse('devhub.submit.source', args=[addon.slug]))
assert not addon.tags.filter(tag_text='dynamic theme').exists()
class TestAddonSubmitSource(TestSubmitBase):
def setUp(self):
super(TestAddonSubmitSource, self).setUp()
assert not self.get_version().source
self.url = reverse('devhub.submit.source', args=[self.addon.slug])
self.next_url = reverse(
'devhub.submit.details', args=[self.addon.slug])
def post(self, has_source, source, expect_errors=False, status_code=200):
data = {
'has_source': 'yes' if has_source else 'no',
}
if source is not None:
data['source'] = source
response = self.client.post(self.url, data, follow=True)
assert response.status_code == status_code
if not expect_errors:
# Show any unexpected form errors.
if response.context and 'form' in response.context:
assert response.context['form'].errors == {}
return response
@override_settings(FILE_UPLOAD_MAX_MEMORY_SIZE=1)
def test_submit_source(self):
response = self.post(
has_source=True, source=self.generate_source_zip())
self.assert3xx(response, self.next_url)
self.addon = self.addon.reload()
assert self.get_version().source
assert self.addon.needs_admin_code_review
mode = (
'0%o' % (os.stat(self.get_version().source.path)[stat.ST_MODE]))
assert mode == '0100644'
def test_submit_source_targz(self):
response = self.post(
has_source=True, source=self.generate_source_tar())
self.assert3xx(response, self.next_url)
self.addon = self.addon.reload()
assert self.get_version().source
assert self.addon.needs_admin_code_review
mode = (
'0%o' % (os.stat(self.get_version().source.path)[stat.ST_MODE]))
assert mode == '0100644'
def test_submit_source_tgz(self):
response = self.post(
has_source=True, source=self.generate_source_tar(
suffix='.tgz'))
self.assert3xx(response, self.next_url)
self.addon = self.addon.reload()
assert self.get_version().source
assert self.addon.needs_admin_code_review
mode = (
'0%o' % (os.stat(self.get_version().source.path)[stat.ST_MODE]))
assert mode == '0100644'
def test_submit_source_tarbz2(self):
response = self.post(
has_source=True, source=self.generate_source_tar(
suffix='.tar.bz2'))
self.assert3xx(response, self.next_url)
self.addon = self.addon.reload()
assert self.get_version().source
assert self.addon.needs_admin_code_review
mode = (
'0%o' % (os.stat(self.get_version().source.path)[stat.ST_MODE]))
assert mode == '0100644'
@override_settings(FILE_UPLOAD_MAX_MEMORY_SIZE=1)
def test_say_no_but_submit_source_anyway_fails(self):
response = self.post(
has_source=False, source=self.generate_source_zip(),
expect_errors=True)
assert response.context['form'].errors == {
'source': [
u'Source file uploaded but you indicated no source was needed.'
]
}
self.addon = self.addon.reload()
assert not self.get_version().source
assert not self.addon.needs_admin_code_review
def test_say_yes_but_dont_submit_source_fails(self):
response = self.post(
has_source=True, source=None, expect_errors=True)
assert response.context['form'].errors == {
'source': [u'You have not uploaded a source file.']
}
self.addon = self.addon.reload()
assert not self.get_version().source
assert not self.addon.needs_admin_code_review
@override_settings(FILE_UPLOAD_MAX_MEMORY_SIZE=2 ** 22)
def test_submit_source_in_memory_upload(self):
source = self.generate_source_zip()
source_size = os.stat(source.name)[stat.ST_SIZE]
assert source_size < settings.FILE_UPLOAD_MAX_MEMORY_SIZE
response = self.post(has_source=True, source=source)
self.assert3xx(response, self.next_url)
self.addon = self.addon.reload()
assert self.get_version().source
assert self.addon.needs_admin_code_review
mode = (
'0%o' % (os.stat(self.get_version().source.path)[stat.ST_MODE]))
assert mode == '0100644'
@override_settings(FILE_UPLOAD_MAX_MEMORY_SIZE=2 ** 22)
def test_submit_source_in_memory_upload_with_targz(self):
source = self.generate_source_tar()
source_size = os.stat(source.name)[stat.ST_SIZE]
assert source_size < settings.FILE_UPLOAD_MAX_MEMORY_SIZE
response = self.post(has_source=True, source=source)
self.assert3xx(response, self.next_url)
self.addon = self.addon.reload()
assert self.get_version().source
assert self.addon.needs_admin_code_review
mode = (
'0%o' % (os.stat(self.get_version().source.path)[stat.ST_MODE]))
assert mode == '0100644'
def test_with_bad_source_extension(self):
response = self.post(
has_source=True, source=self.generate_source_zip(suffix='.exe'),
expect_errors=True)
assert response.context['form'].errors == {
'source': [
u'Unsupported file type, please upload an archive file '
u'(.zip, .tar.gz, .tar.bz2).'],
}
self.addon = self.addon.reload()
assert not self.get_version().source
assert not self.addon.needs_admin_code_review
def test_with_non_compressed_tar(self):
response = self.post(
# Generate a .tar.gz which is actually not compressed.
has_source=True, source=self.generate_source_tar(mode='w'),
expect_errors=True)
assert response.context['form'].errors == {
'source': [u'Invalid or broken archive.'],
}
self.addon = self.addon.reload()
assert not self.get_version().source
assert not self.addon.needs_admin_code_review
def test_with_bad_source_not_an_actual_archive(self):
response = self.post(
has_source=True, source=self.generate_source_garbage(
suffix='.zip'), expect_errors=True)
assert response.context['form'].errors == {
'source': [u'Invalid or broken archive.'],
}
self.addon = self.addon.reload()
assert not self.get_version().source
assert not self.addon.needs_admin_code_review
def test_with_bad_source_broken_archive(self):
source = self.generate_source_zip(
data='Hello World', compression=zipfile.ZIP_STORED)
data = source.read().replace(b'Hello World', b'dlroW olleH')
source.seek(0) # First seek to rewrite from the beginning
source.write(data)
source.seek(0) # Second seek to reset like it's fresh.
# Still looks like a zip at first glance.
assert zipfile.is_zipfile(source)
source.seek(0) # Last seek to reset source descriptor before posting.
response = self.post(
has_source=True, source=source, expect_errors=True)
assert response.context['form'].errors == {
'source': [u'Invalid or broken archive.'],
}
self.addon = self.addon.reload()
assert not self.get_version().source
assert not self.addon.needs_admin_code_review
def test_with_bad_source_broken_archive_compressed_tar(self):
source = self.generate_source_tar()
with open(source.name, "r+b") as fobj:
fobj.truncate(512)
# Still looks like a tar at first glance.
assert tarfile.is_tarfile(source.name)
# Re-open and post.
with open(source.name, 'rb'):
response = self.post(
has_source=True, source=source, expect_errors=True)
assert response.context['form'].errors == {
'source': [u'Invalid or broken archive.'],
}
self.addon = self.addon.reload()
assert not self.get_version().source
assert not self.addon.needs_admin_code_review
def test_no_source(self):
response = self.post(has_source=False, source=None)
self.assert3xx(response, self.next_url)
self.addon = self.addon.reload()
assert not self.get_version().source
assert not self.addon.needs_admin_code_review
def test_non_extension_redirects_past_to_details(self):
# static themes should redirect
self.addon.update(type=amo.ADDON_STATICTHEME)
response = self.client.get(self.url)
self.assert3xx(response, self.next_url)
# extensions shouldn't redirect
self.addon.update(type=amo.ADDON_EXTENSION)
response = self.client.get(self.url)
assert response.status_code == 200
# check another non-extension type also redirects
self.addon.update(type=amo.ADDON_DICT)
response = self.client.get(self.url)
self.assert3xx(response, self.next_url)
@override_settings(FILE_UPLOAD_MAX_MEMORY_SIZE=1)
@override_switch('enable-uploads-commit-to-git-storage', active=False)
def test_submit_source_doesnt_commit_to_git_by_default(self):
response = self.post(
has_source=True, source=self.generate_source_zip())
self.assert3xx(response, self.next_url)
self.addon = self.addon.reload()
assert self.get_version().source
repo = AddonGitRepository(self.addon.pk, package_type='source')
assert not os.path.exists(repo.git_repository_path)
@override_switch('enable-uploads-commit-to-git-storage', active=True)
def test_submit_source_commits_to_git(self):
response = self.post(
has_source=True, source=self.generate_source_zip())
self.assert3xx(response, self.next_url)
self.addon = self.addon.reload()
assert self.get_version().source
repo = AddonGitRepository(self.addon.pk, package_type='source')
assert os.path.exists(repo.git_repository_path)
@override_switch('enable-uploads-commit-to-git-storage', active=True)
@mock.patch('olympia.devhub.views.extract_version_source_to_git.delay')
def test_submit_source_commits_to_git_asnychronously(self, extract_mock):
response = self.post(
has_source=True, source=self.generate_source_zip())
self.assert3xx(response, self.next_url)
self.addon = self.addon.reload()
assert self.get_version().source
extract_mock.assert_called_once_with(
version_id=self.addon.current_version.pk,
author_id=self.user.pk)
class DetailsPageMixin(object):
""" Some common methods between TestAddonSubmitDetails and
TestStaticThemeSubmitDetails."""
def is_success(self, data):
assert self.get_addon().status == amo.STATUS_NULL
response = self.client.post(self.url, data)
assert all(self.get_addon().get_required_metadata())
assert response.status_code == 302
assert self.get_addon().status == amo.STATUS_NOMINATED
return response
def test_submit_name_existing(self):
"""Test that we can submit two add-ons with the same name."""
qs = Addon.objects.filter(name__localized_string='Cooliris')
assert qs.count() == 1
self.is_success(self.get_dict(name='Cooliris'))
assert qs.count() == 2
def test_submit_name_length(self):
# Make sure the name isn't too long.
data = self.get_dict(name='a' * 51)
response = self.client.post(self.url, data)
assert response.status_code == 200
error = 'Ensure this value has at most 50 characters (it has 51).'
self.assertFormError(response, 'form', 'name', error)
def test_submit_name_symbols_only(self):
data = self.get_dict(name='()+([#')
response = self.client.post(self.url, data)
assert response.status_code == 200
error = (
'Ensure this field contains at least one letter or number'
' character.')
self.assertFormError(response, 'form', 'name', error)
data = self.get_dict(name='±↡∋⌚')
response = self.client.post(self.url, data)
assert response.status_code == 200
error = (
'Ensure this field contains at least one letter or number'
' character.')
self.assertFormError(response, 'form', 'name', error)
# 'ø' is not a symbol, it's actually a letter, so it should be valid.
data = self.get_dict(name=u'ø')
response = self.client.post(self.url, data)
assert response.status_code == 302
assert self.get_addon().name == u'ø'
def test_submit_slug_invalid(self):
# Submit an invalid slug.
data = self.get_dict(slug='slug!!! aksl23%%')
response = self.client.post(self.url, data)
assert response.status_code == 200
self.assertFormError(response, 'form', 'slug', "Enter a valid 'slug'" +
' consisting of letters, numbers, underscores or '
'hyphens.')
def test_submit_slug_required(self):
# Make sure the slug is required.
response = self.client.post(self.url, self.get_dict(slug=''))
assert response.status_code == 200
self.assertFormError(
response, 'form', 'slug', 'This field is required.')
def test_submit_summary_required(self):
# Make sure summary is required.
response = self.client.post(self.url, self.get_dict(summary=''))
assert response.status_code == 200
self.assertFormError(
response, 'form', 'summary', 'This field is required.')
def test_submit_summary_symbols_only(self):
data = self.get_dict(summary='()+([#')
response = self.client.post(self.url, data)
assert response.status_code == 200
error = (
'Ensure this field contains at least one letter or number'
' character.')
self.assertFormError(response, 'form', 'summary', error)
data = self.get_dict(summary='±↡∋⌚')
response = self.client.post(self.url, data)
assert response.status_code == 200
error = (
'Ensure this field contains at least one letter or number'
' character.')
self.assertFormError(response, 'form', 'summary', error)
# 'ø' is not a symbol, it's actually a letter, so it should be valid.
data = self.get_dict(summary=u'ø')
response = self.client.post(self.url, data)
assert response.status_code == 302
assert self.get_addon().summary == u'ø'
def test_submit_summary_length(self):
# Summary is too long.
response = self.client.post(self.url, self.get_dict(summary='a' * 251))
assert response.status_code == 200
error = 'Ensure this value has at most 250 characters (it has 251).'
self.assertFormError(response, 'form', 'summary', error)
def test_nomination_date_set_only_once(self):
self.get_version().update(nomination=None)
self.is_success(self.get_dict())
self.assertCloseToNow(self.get_version().nomination)
# Check nomination date is only set once, see bug 632191.
nomdate = datetime.now() - timedelta(days=5)
self.get_version().update(nomination=nomdate, _signal=False)
# Update something else in the addon:
self.get_addon().update(slug='foobar')
assert self.get_version().nomination.timetuple()[0:5] == (
nomdate.timetuple()[0:5])
def test_submit_details_unlisted_should_redirect(self):
version = self.get_addon().versions.latest()
version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
response = self.client.get(self.url)
self.assert3xx(response, self.next_step)
def test_can_cancel_review(self):
addon = self.get_addon()
addon.versions.latest().files.update(status=amo.STATUS_AWAITING_REVIEW)
cancel_url = reverse('devhub.addons.cancel', args=['a3615'])
versions_url = reverse('devhub.addons.versions', args=['a3615'])
response = self.client.post(cancel_url)
self.assert3xx(response, versions_url)
addon = self.get_addon()
assert addon.status == amo.STATUS_NULL
version = addon.versions.latest()
del version.all_files
assert version.statuses == [
(version.all_files[0].id, amo.STATUS_DISABLED)]
@override_switch('akismet-spam-check', active=False)
@mock.patch('olympia.lib.akismet.tasks.AkismetReport.comment_check')
def test_akismet_spam_check_waffle_off(self, comment_check_mock):
data = self.get_dict(name=u'spám')
self.is_success(data)
comment_check_mock.assert_not_called()
assert AkismetReport.objects.count() == 0
@override_switch('akismet-spam-check', active=True)
@override_switch('akismet-addon-action', active=True)
@mock.patch('olympia.lib.akismet.tasks.AkismetReport.comment_check')
def test_akismet_spam_check_spam_action_taken(self, comment_check_mock):
comment_check_mock.return_value = AkismetReport.MAYBE_SPAM
data = self.get_dict(name=u'spám', summary=self.addon.summary)
response = self.client.post(self.url, data)
assert response.status_code == 200
self.assertFormError(
response, 'form', 'name',
'The text entered has been flagged as spam.')
# the summary won't be comment_check'd because it didn't change.
self.addon = self.addon.reload()
assert AkismetReport.objects.count() == 1
report = AkismetReport.objects.get()
assert report.comment_type == 'product-name'
assert report.comment == u'spám'
assert text_type(self.addon.name) != u'spám'
comment_check_mock.assert_called_once()
@override_switch('akismet-spam-check', active=True)
@override_switch('akismet-addon-action', active=False)
@mock.patch('olympia.lib.akismet.tasks.AkismetReport.comment_check')
def test_akismet_spam_check_spam_logging_only(self, comment_check_mock):
comment_check_mock.return_value = AkismetReport.MAYBE_SPAM
data = self.get_dict(name=u'spám', summary=self.addon.summary)
response = self.is_success(data)
# the summary won't be comment_check'd because it didn't change.
self.addon = self.addon.reload()
assert AkismetReport.objects.count() == 1
report = AkismetReport.objects.get()
assert report.comment_type == 'product-name'
assert report.comment == u'spám'
assert text_type(self.addon.name) == u'spám'
assert b'spam' not in response.content
comment_check_mock.assert_called_once()
@override_switch('akismet-spam-check', active=True)
@mock.patch('olympia.lib.akismet.tasks.AkismetReport.comment_check')
def test_akismet_spam_check_ham(self, comment_check_mock):
comment_check_mock.return_value = AkismetReport.HAM
data = self.get_dict(name=u'spám', summary=self.addon.summary)
response = self.is_success(data)
# the summary won't be comment_check'd because it didn't change.
assert comment_check_mock.call_count == 1
assert AkismetReport.objects.count() == 1
report = AkismetReport.objects.get()
assert report.comment_type == 'product-name'
assert report.comment == u'spám'
assert b'spam' not in response.content
@override_switch('akismet-spam-check', active=True)
@mock.patch('olympia.lib.akismet.tasks.AkismetReport.comment_check')
def test_akismet_spam_check_no_changes(self, comment_check_mock):
# Don't change either name or summary from the upload.
data = self.get_dict(name=self.addon.name, summary=self.addon.summary)
self.is_success(data)
comment_check_mock.assert_not_called()
assert AkismetReport.objects.count() == 0
@override_switch('content-optimization', active=False)
def test_name_summary_lengths_short(self):
# check the separate name and summary labels, etc are served
response = self.client.get(self.url)
assert b'Name and Summary' not in response.content
assert b'It will be shown in listings and searches' in response.content
data = self.get_dict(name='a', summary='b')
self.is_success(data)
@override_switch('content-optimization', active=False)
def test_name_summary_lengths_long(self):
data = self.get_dict(name='a' * 50, summary='b' * 50)
self.is_success(data)
@override_switch('content-optimization', active=True)
def test_name_summary_lengths_content_optimization(self):
# check the combined name and summary label, etc are served
response = self.client.get(self.url)
assert b'Name and Summary' in response.content
# name and summary are too short
response = self.client.post(
self.url, self.get_dict(
name='a', summary='b', description='c' * 10))
assert self.get_addon().name != 'a'
assert self.get_addon().summary != 'b'
assert response.status_code == 200
self.assertFormError(
response, 'form', 'name',
'Ensure this value has at least 2 characters (it has 1).')
self.assertFormError(
response, 'form', 'summary',
'Ensure this value has at least 2 characters (it has 1).')
# name and summary individually are okay, but together are too long
response = self.client.post(
self.url, self.get_dict(
name='a' * 50, summary='b' * 50, description='c' * 10))
assert self.get_addon().name != 'a' * 50
assert self.get_addon().summary != 'b' * 50
assert response.status_code == 200
self.assertFormError(
response, 'form', 'name',
'Ensure name and summary combined are at most 70 characters '
u'(they have 100).')
# success: together name and summary are 70 characters.
data = self.get_dict(
name='a' * 2, summary='b' * 68, description='c' * 10)
self.is_success(data)
@override_switch('content-optimization', active=True)
def test_summary_auto_cropping_content_optimization(self):
# See test_forms.py::TestDescribeForm for some more variations.
data = self.get_dict(minimal=False)
data.pop('name')
data.pop('summary')
data.update({
'name_en-us': 'a' * 25,
'name_fr': 'b' * 30,
'summary_en-us': 'c' * 45,
'summary_fr': 'd' * 45, # 30 + 45 is > 70
})
self.is_success(data)
assert self.get_addon().name == 'a' * 25
assert self.get_addon().summary == 'c' * 45
with self.activate('fr'):
assert self.get_addon().name == 'b' * 30
assert self.get_addon().summary == 'd' * 40
@override_switch('content-optimization', active=True)
def test_name_auto_cropping_content_optimization(self):
# See test_forms.py::TestDescribeForm for some more variations.
data = self.get_dict(minimal=False)
data.pop('name')
data.pop('summary')
data.update({
'name_en-us': 'a' * 67,
'name_fr': 'b' * 69,
'summary_en-us': 'c' * 2,
'summary_fr': 'd' * 3,
})
self.is_success(data)
assert self.get_addon().name == 'a' * 67
assert self.get_addon().summary == 'c' * 2
with self.activate('fr'):
assert self.get_addon().name == 'b' * 68
assert self.get_addon().summary == 'd' * 2
class TestAddonSubmitDetails(DetailsPageMixin, TestSubmitBase):
def setUp(self):
super(TestAddonSubmitDetails, self).setUp()
self.url = reverse('devhub.submit.details', args=['a3615'])
AddonCategory.objects.filter(
addon=self.get_addon(),
category=Category.objects.get(id=1)).delete()
AddonCategory.objects.filter(
addon=self.get_addon(),
category=Category.objects.get(id=71)).delete()
ctx = self.client.get(self.url).context['cat_form']
self.cat_initial = initial(ctx.initial_forms[0])
self.next_step = reverse('devhub.submit.finish', args=['a3615'])
License.objects.create(builtin=3, on_form=True)
self.get_addon().update(status=amo.STATUS_NULL)
def get_dict(self, minimal=True, **kw):
result = {}
describe_form = {'name': 'Test name', 'slug': 'testname',
'summary': 'Hello!', 'is_experimental': True,
'requires_payment': True}
if not minimal:
describe_form.update({'description': 'its a description',
'support_url': 'http://stackoverflow.com',
'support_email': '[email protected]'})
cat_initial = kw.pop('cat_initial', self.cat_initial)
cat_form = formset(cat_initial, initial_count=1)
license_form = {'license-builtin': 3}
policy_form = {} if minimal else {
'has_priv': True, 'privacy_policy': 'Ur data belongs to us now.'}
reviewer_form = {} if minimal else {'approval_notes': 'approove plz'}
result.update(describe_form)
result.update(cat_form)
result.update(license_form)
result.update(policy_form)
result.update(reviewer_form)
result.update(**kw)
return result
@override_switch('content-optimization', active=False)
def test_submit_success_required(self):
# Set/change the required fields only
response = self.client.get(self.url)
assert response.status_code == 200
# Post and be redirected - trying to sneak
# in fields that shouldn't be modified via this form.
data = self.get_dict(homepage='foo.com',
tags='whatevs, whatever')
self.is_success(data)
addon = self.get_addon()
# This fields should not have been modified.
assert addon.homepage != 'foo.com'
assert len(addon.tags.values_list()) == 0
# These are the fields that are expected to be edited here.
assert addon.name == 'Test name'
assert addon.slug == 'testname'
assert addon.summary == 'Hello!'
assert addon.is_experimental
assert addon.requires_payment
assert addon.all_categories[0].id == 22
# Test add-on log activity.
log_items = ActivityLog.objects.for_addons(addon)
assert not log_items.filter(action=amo.LOG.EDIT_PROPERTIES.id), (
"Setting properties on submit needn't be logged.")
@override_switch('content-optimization', active=False)
def test_submit_success_optional_fields(self):
# Set/change the optional fields too
# Post and be redirected
data = self.get_dict(minimal=False)
self.is_success(data)
addon = self.get_addon()
# These are the fields that are expected to be edited here.
assert addon.description == 'its a description'
assert addon.support_url == 'http://stackoverflow.com'
assert addon.support_email == '[email protected]'
assert addon.privacy_policy == 'Ur data belongs to us now.'
assert addon.current_version.approval_notes == 'approove plz'
@override_switch('content-optimization', active=True)
def test_submit_success_required_with_content_optimization(self):
# Set/change the required fields only
response = self.client.get(self.url)
assert response.status_code == 200
# Post and be redirected - trying to sneak
# in fields that shouldn't be modified via this form.
data = self.get_dict(
description='its a description', homepage='foo.com',
tags='whatevs, whatever')
self.is_success(data)
addon = self.get_addon()
# This fields should not have been modified.
assert addon.homepage != 'foo.com'
assert len(addon.tags.values_list()) == 0
# These are the fields that are expected to be edited here.
assert addon.name == 'Test name'
assert addon.slug == 'testname'
assert addon.summary == 'Hello!'
assert addon.description == 'its a description'
assert addon.is_experimental
assert addon.requires_payment
assert addon.all_categories[0].id == 22
# Test add-on log activity.
log_items = ActivityLog.objects.for_addons(addon)
assert not log_items.filter(action=amo.LOG.EDIT_PROPERTIES.id), (
"Setting properties on submit needn't be logged.")
@override_switch('content-optimization', active=True)
def test_submit_success_optional_fields_with_content_optimization(self):
# Set/change the optional fields too
# Post and be redirected
data = self.get_dict(minimal=False)
self.is_success(data)
addon = self.get_addon()
# These are the fields that are expected to be edited here.
assert addon.support_url == 'http://stackoverflow.com'
assert addon.support_email == '[email protected]'
assert addon.privacy_policy == 'Ur data belongs to us now.'
assert addon.current_version.approval_notes == 'approove plz'
def test_submit_categories_required(self):
del self.cat_initial['categories']
response = self.client.post(
self.url, self.get_dict(cat_initial=self.cat_initial))
assert response.context['cat_form'].errors[0]['categories'] == (
['This field is required.'])
def test_submit_categories_max(self):
assert amo.MAX_CATEGORIES == 2
self.cat_initial['categories'] = [22, 1, 71]
response = self.client.post(
self.url, self.get_dict(cat_initial=self.cat_initial))
assert response.context['cat_form'].errors[0]['categories'] == (
['You can have only 2 categories.'])
def test_submit_categories_add(self):
assert [cat.id for cat in self.get_addon().all_categories] == [22]
self.cat_initial['categories'] = [22, 1]
self.is_success(self.get_dict())
addon_cats = self.get_addon().categories.values_list('id', flat=True)
assert sorted(addon_cats) == [1, 22]
def test_submit_categories_addandremove(self):
AddonCategory(addon=self.addon, category_id=1).save()
assert sorted(
[cat.id for cat in self.get_addon().all_categories]) == [1, 22]
self.cat_initial['categories'] = [22, 71]
self.client.post(self.url, self.get_dict(cat_initial=self.cat_initial))
category_ids_new = [c.id for c in self.get_addon().all_categories]
assert sorted(category_ids_new) == [22, 71]
def test_submit_categories_remove(self):
category = Category.objects.get(id=1)
AddonCategory(addon=self.addon, category=category).save()
assert sorted(
[cat.id for cat in self.get_addon().all_categories]) == [1, 22]
self.cat_initial['categories'] = [22]
self.client.post(self.url, self.get_dict(cat_initial=self.cat_initial))
category_ids_new = [cat.id for cat in self.get_addon().all_categories]
assert category_ids_new == [22]
def test_ul_class_rendering_regression(self):
"""Test ul of license widget doesn't render `license` class.
Regression test for:
* https://github.com/mozilla/addons-server/issues/8902
* https://github.com/mozilla/addons-server/issues/8920
"""
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
ul = doc('#id_license-builtin')
assert ul.attr('class') is None
def test_set_builtin_license_no_log(self):
self.is_success(self.get_dict(**{'license-builtin': 3}))
addon = self.get_addon()
assert addon.status == amo.STATUS_NOMINATED
assert addon.current_version.license.builtin == 3
log_items = ActivityLog.objects.for_addons(self.get_addon())
assert not log_items.filter(action=amo.LOG.CHANGE_LICENSE.id)
def test_license_error(self):
response = self.client.post(
self.url, self.get_dict(**{'license-builtin': 4}))
assert response.status_code == 200
self.assertFormError(response, 'license_form', 'builtin',
'Select a valid choice. 4 is not one of '
'the available choices.')
def test_set_privacy_nomsg(self):
"""
You should not get punished with a 500 for not writing your policy...
but perhaps you should feel shame for lying to us. This test does not
test for shame.
"""
self.get_addon().update(eula=None, privacy_policy=None)
self.is_success(self.get_dict(has_priv=True))
def test_source_submission_notes_not_shown_by_default(self):
url = reverse('devhub.submit.source', args=[self.addon.slug])
response = self.client.post(url, {
'has_source': 'no'
}, follow=True)
assert response.status_code == 200
doc = pq(response.content)
assert 'Remember: ' not in doc('.source-submission-note').text()
def test_source_submission_notes_shown(self):
url = reverse('devhub.submit.source', args=[self.addon.slug])
response = self.client.post(url, {
'has_source': 'yes', 'source': self.generate_source_zip(),
}, follow=True)
assert response.status_code == 200
doc = pq(response.content)
assert 'Remember: ' in doc('.source-submission-note').text()
class TestStaticThemeSubmitDetails(DetailsPageMixin, TestSubmitBase):
def setUp(self):
super(TestStaticThemeSubmitDetails, self).setUp()
self.url = reverse('devhub.submit.details', args=['a3615'])
AddonCategory.objects.filter(
addon=self.get_addon(),
category=Category.objects.get(id=1)).delete()
AddonCategory.objects.filter(
addon=self.get_addon(),
category=Category.objects.get(id=22)).delete()
AddonCategory.objects.filter(
addon=self.get_addon(),
category=Category.objects.get(id=71)).delete()
Category.from_static_category(CATEGORIES_BY_ID[300]).save() # abstract
Category.from_static_category(CATEGORIES_BY_ID[308]).save() # firefox
Category.from_static_category(CATEGORIES_BY_ID[400]).save() # abstract
Category.from_static_category(CATEGORIES_BY_ID[408]).save() # firefox
self.next_step = reverse('devhub.submit.finish', args=['a3615'])
License.objects.create(builtin=11, on_form=True, creative_commons=True)
self.get_addon().update(
status=amo.STATUS_NULL, type=amo.ADDON_STATICTHEME)
def get_dict(self, minimal=True, **kw):
result = {}
describe_form = {'name': 'Test name', 'slug': 'testname',
'summary': 'Hello!'}
if not minimal:
describe_form.update({'support_url': 'http://stackoverflow.com',
'support_email': '[email protected]'})
cat_form = {'category': 'abstract'}
license_form = {'license-builtin': 11}
result.update(describe_form)
result.update(cat_form)
result.update(license_form)
result.update(**kw)
return result
def test_submit_success_required(self):
# Set/change the required fields only
response = self.client.get(self.url)
assert response.status_code == 200
# Post and be redirected - trying to sneak
# in fields that shouldn't be modified via this form.
data = self.get_dict(homepage='foo.com',
tags='whatevs, whatever')
self.is_success(data)
addon = self.get_addon()
# This fields should not have been modified.
assert addon.homepage != 'foo.com'
assert len(addon.tags.values_list()) == 0
# These are the fields that are expected to be edited here.
assert addon.name == 'Test name'
assert addon.slug == 'testname'
assert addon.summary == 'Hello!'
assert addon.all_categories[0].id == 300
# Test add-on log activity.
log_items = ActivityLog.objects.for_addons(addon)
assert not log_items.filter(action=amo.LOG.EDIT_PROPERTIES.id), (
"Setting properties on submit needn't be logged.")
def test_submit_success_optional_fields(self):
# Set/change the optional fields too
# Post and be redirected
data = self.get_dict(minimal=False)
self.is_success(data)
addon = self.get_addon()
# These are the fields that are expected to be edited here.
assert addon.support_url == 'http://stackoverflow.com'
assert addon.support_email == '[email protected]'
def test_submit_categories_set(self):
assert [cat.id for cat in self.get_addon().all_categories] == []
self.is_success(self.get_dict(category='firefox'))
addon_cats = self.get_addon().categories.values_list('id', flat=True)
assert sorted(addon_cats) == [308, 408]
def test_submit_categories_change(self):
category_desktop = Category.objects.get(id=300)
category_android = Category.objects.get(id=400)
AddonCategory(addon=self.addon, category=category_desktop).save()
AddonCategory(addon=self.addon, category=category_android).save()
assert sorted(
[cat.id for cat in self.get_addon().all_categories]) == [300, 400]
self.client.post(self.url, self.get_dict(category='firefox'))
category_ids_new = [cat.id for cat in self.get_addon().all_categories]
# Only ever one category for Static Themes
assert category_ids_new == [308, 408]
def test_creative_commons_licenses(self):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
content = doc('.addon-submission-process')
assert content('#cc-chooser') # cc license wizard
assert content('#persona-license') # cc license result
assert content('#id_license-builtin') # license list
# There should be one license - 11 we added in setUp - and no 'other'.
assert len(content('input.license')) == 1
assert content('input.license').attr('value') == '11'
assert content('input.license').attr('data-name') == (
LICENSES_BY_BUILTIN[11].name)
def test_set_builtin_license_no_log(self):
self.is_success(self.get_dict(**{'license-builtin': 11}))
addon = self.get_addon()
assert addon.status == amo.STATUS_NOMINATED
assert addon.current_version.license.builtin == 11
log_items = ActivityLog.objects.for_addons(self.get_addon())
assert not log_items.filter(action=amo.LOG.CHANGE_LICENSE.id)
def test_license_error(self):
response = self.client.post(
self.url, self.get_dict(**{'license-builtin': 4}))
assert response.status_code == 200
self.assertFormError(response, 'license_form', 'builtin',
'Select a valid choice. 4 is not one of '
'the available choices.')
class TestAddonSubmitFinish(TestSubmitBase):
def setUp(self):
super(TestAddonSubmitFinish, self).setUp()
self.url = reverse('devhub.submit.finish', args=[self.addon.slug])
@mock.patch.object(settings, 'SITE_URL', 'http://b.ro')
@mock.patch('olympia.devhub.tasks.send_welcome_email.delay')
def test_welcome_email_for_newbies(self, send_welcome_email_mock):
self.client.get(self.url)
context = {
'addon_name': 'Delicious Bookmarks',
'app': six.text_type(amo.FIREFOX.pretty),
'detail_url': 'http://b.ro/en-US/firefox/addon/a3615/',
'version_url': 'http://b.ro/en-US/developers/addon/a3615/versions',
'edit_url': 'http://b.ro/en-US/developers/addon/a3615/edit',
}
send_welcome_email_mock.assert_called_with(
self.addon.id, ['[email protected]'], context)
@mock.patch.object(settings, 'SITE_URL', 'http://b.ro')
@mock.patch('olympia.devhub.tasks.send_welcome_email.delay')
def test_welcome_email_first_listed_addon(self, send_welcome_email_mock):
new_addon = addon_factory(
version_kw={'channel': amo.RELEASE_CHANNEL_UNLISTED})
new_addon.addonuser_set.create(user=self.addon.authors.all()[0])
self.client.get(self.url)
context = {
'addon_name': 'Delicious Bookmarks',
'app': six.text_type(amo.FIREFOX.pretty),
'detail_url': 'http://b.ro/en-US/firefox/addon/a3615/',
'version_url': 'http://b.ro/en-US/developers/addon/a3615/versions',
'edit_url': 'http://b.ro/en-US/developers/addon/a3615/edit',
}
send_welcome_email_mock.assert_called_with(
self.addon.id, ['[email protected]'], context)
@mock.patch.object(settings, 'SITE_URL', 'http://b.ro')
@mock.patch('olympia.devhub.tasks.send_welcome_email.delay')
def test_welcome_email_if_previous_addon_is_incomplete(
self, send_welcome_email_mock):
# If the developer already submitted an addon but didn't finish or was
# rejected, we send the email anyway, it might be a dupe depending on
# how far they got but it's better than not sending any.
new_addon = addon_factory(status=amo.STATUS_NULL)
new_addon.addonuser_set.create(user=self.addon.authors.all()[0])
self.client.get(self.url)
context = {
'addon_name': 'Delicious Bookmarks',
'app': six.text_type(amo.FIREFOX.pretty),
'detail_url': 'http://b.ro/en-US/firefox/addon/a3615/',
'version_url': 'http://b.ro/en-US/developers/addon/a3615/versions',
'edit_url': 'http://b.ro/en-US/developers/addon/a3615/edit',
}
send_welcome_email_mock.assert_called_with(
self.addon.id, ['[email protected]'], context)
@mock.patch('olympia.devhub.tasks.send_welcome_email.delay')
def test_no_welcome_email(self, send_welcome_email_mock):
"""You already submitted an add-on? We won't spam again."""
new_addon = addon_factory(status=amo.STATUS_NOMINATED)
new_addon.addonuser_set.create(user=self.addon.authors.all()[0])
self.client.get(self.url)
assert not send_welcome_email_mock.called
@mock.patch('olympia.devhub.tasks.send_welcome_email.delay')
def test_no_welcome_email_if_unlisted(self, send_welcome_email_mock):
self.make_addon_unlisted(self.addon)
self.client.get(self.url)
assert not send_welcome_email_mock.called
def test_finish_submitting_listed_addon(self):
version = self.addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
assert version.supported_platforms == ([amo.PLATFORM_ALL])
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
content = doc('.addon-submission-process')
links = content('a')
assert len(links) == 3
# First link is to edit listing
assert links[0].attrib['href'] == self.addon.get_dev_url()
# Second link is to edit the version
assert links[1].attrib['href'] == reverse(
'devhub.versions.edit',
args=[self.addon.slug, version.id])
assert links[1].text == (
'Edit version %s' % version.version)
# Third back to my submissions.
assert links[2].attrib['href'] == reverse('devhub.addons')
def test_finish_submitting_unlisted_addon(self):
self.make_addon_unlisted(self.addon)
latest_version = self.addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
content = doc('.addon-submission-process')
links = content('a')
assert len(links) == 2
# First link is to the file download.
file_ = latest_version.all_files[-1]
assert links[0].attrib['href'] == file_.get_url_path('devhub')
assert links[0].text == (
'Download %s' % file_.filename)
# Second back to my submissions.
assert links[1].attrib['href'] == reverse('devhub.addons')
def test_addon_no_versions_redirects_to_versions(self):
self.addon.update(status=amo.STATUS_NULL)
self.addon.versions.all().delete()
response = self.client.get(self.url, follow=True)
# Would go to 'devhub.submit.version' but no previous version means
# channel needs to be selected first.
self.assert3xx(
response,
reverse('devhub.submit.version.distribution', args=['a3615']), 302)
def test_incomplete_directs_to_details(self):
# We get bounced back to details step.
self.addon.update(status=amo.STATUS_NULL)
AddonCategory.objects.filter(addon=self.addon).delete()
response = self.client.get(
reverse('devhub.submit.finish', args=['a3615']), follow=True)
self.assert3xx(
response, reverse('devhub.submit.details', args=['a3615']))
def test_finish_submitting_listed_static_theme(self):
self.addon.update(type=amo.ADDON_STATICTHEME)
version = self.addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
VersionPreview.objects.create(version=version)
assert version.supported_platforms == ([amo.PLATFORM_ALL])
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
content = doc('.addon-submission-process')
links = content('a')
assert len(links) == 2
# First link is to edit listing.
assert links[0].attrib['href'] == self.addon.get_dev_url()
# Second link is back to my submissions.
assert links[1].attrib['href'] == reverse('devhub.themes')
# Text is static theme specific.
assert b'This version will be available after it passes review.' in (
response.content)
# Show the preview we started generating just after the upload step.
imgs = content('section.addon-submission-process img')
assert imgs[0].attrib['src'] == (
version.previews.first().image_url)
assert len(imgs) == 1 # Just the one preview though.
def test_finish_submitting_unlisted_static_theme(self):
self.addon.update(type=amo.ADDON_STATICTHEME)
self.make_addon_unlisted(self.addon)
latest_version = self.addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
content = doc('.addon-submission-process')
links = content('a')
assert len(links) == 2
# First link is to the file download.
file_ = latest_version.all_files[-1]
assert links[0].attrib['href'] == file_.get_url_path('devhub')
assert links[0].text == (
'Download %s' % file_.filename)
# Second back to my submissions.
assert links[1].attrib['href'] == reverse('devhub.themes')
class TestAddonSubmitResume(TestSubmitBase):
def test_redirect_from_other_pages(self):
self.addon.update(status=amo.STATUS_NULL)
AddonCategory.objects.filter(addon=self.addon).delete()
response = self.client.get(
reverse('devhub.addons.edit', args=['a3615']), follow=True)
self.assert3xx(
response, reverse('devhub.submit.details', args=['a3615']))
class TestVersionSubmitDistribution(TestSubmitBase):
def setUp(self):
super(TestVersionSubmitDistribution, self).setUp()
self.url = reverse('devhub.submit.version.distribution',
args=[self.addon.slug])
def test_listed_redirects_to_next_step(self):
response = self.client.post(self.url, {'channel': 'listed'})
self.assert3xx(
response,
reverse('devhub.submit.version.upload', args=[
self.addon.slug, 'listed']))
def test_unlisted_redirects_to_next_step(self):
response = self.client.post(self.url, {'channel': 'unlisted'})
self.assert3xx(
response,
reverse('devhub.submit.version.upload', args=[
self.addon.slug, 'unlisted']))
def test_no_redirect_for_metadata(self):
self.addon.update(status=amo.STATUS_NULL)
AddonCategory.objects.filter(addon=self.addon).delete()
response = self.client.get(self.url)
assert response.status_code == 200
def test_has_read_agreement(self):
self.user.update(read_dev_agreement=None)
response = self.client.get(self.url)
self.assert3xx(
response,
reverse('devhub.submit.version.agreement', args=[self.addon.slug]))
class TestVersionSubmitAutoChannel(TestSubmitBase):
""" Just check we chose the right upload channel. The upload tests
themselves are in other tests. """
def setUp(self):
super(TestVersionSubmitAutoChannel, self).setUp()
self.url = reverse('devhub.submit.version', args=[self.addon.slug])
@mock.patch('olympia.devhub.views._submit_upload',
side_effect=views._submit_upload)
def test_listed_last_uses_listed_upload(self, _submit_upload_mock):
version_factory(addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED)
self.client.post(self.url)
assert _submit_upload_mock.call_count == 1
args, _ = _submit_upload_mock.call_args
assert args[1:] == (
self.addon, amo.RELEASE_CHANNEL_LISTED,
'devhub.submit.version.source')
@mock.patch('olympia.devhub.views._submit_upload',
side_effect=views._submit_upload)
def test_unlisted_last_uses_unlisted_upload(self, _submit_upload_mock):
version_factory(addon=self.addon, channel=amo.RELEASE_CHANNEL_UNLISTED)
self.client.post(self.url)
assert _submit_upload_mock.call_count == 1
args, _ = _submit_upload_mock.call_args
assert args[1:] == (
self.addon, amo.RELEASE_CHANNEL_UNLISTED,
'devhub.submit.version.source')
def test_no_versions_redirects_to_distribution(self):
[v.delete() for v in self.addon.versions.all()]
response = self.client.post(self.url)
self.assert3xx(
response,
reverse('devhub.submit.version.distribution',
args=[self.addon.slug]))
def test_has_read_agreement(self):
self.user.update(read_dev_agreement=None)
response = self.client.get(self.url)
self.assert3xx(
response,
reverse('devhub.submit.version.agreement', args=[self.addon.slug]))
class VersionSubmitUploadMixin(object):
channel = None
fixtures = ['base/users', 'base/addon_3615']
@classmethod
def setUpTestData(cls):
create_default_webext_appversion()
def setUp(self):
super(VersionSubmitUploadMixin, self).setUp()
self.upload = self.get_upload('extension.xpi')
self.addon = Addon.objects.get(id=3615)
self.version = self.addon.current_version
self.addon.update(guid='guid@xpi')
self.user = UserProfile.objects.get(email='[email protected]')
assert self.client.login(email=self.user.email)
self.addon.versions.update(channel=self.channel)
channel = ('listed' if self.channel == amo.RELEASE_CHANNEL_LISTED else
'unlisted')
self.url = reverse('devhub.submit.version.upload',
args=[self.addon.slug, channel])
assert self.addon.has_complete_metadata()
self.version.save()
def post(self, compatible_apps=None,
override_validation=False, expected_status=302, source=None,
extra_kwargs=None):
if compatible_apps is None:
compatible_apps = [amo.FIREFOX]
data = {
'upload': self.upload.uuid.hex,
'compatible_apps': [p.id for p in compatible_apps],
'admin_override_validation': override_validation
}
if source is not None:
data['source'] = source
response = self.client.post(self.url, data, **(extra_kwargs or {}))
assert response.status_code == expected_status
return response
def get_next_url(self, version):
return reverse('devhub.submit.version.source', args=[
self.addon.slug, version.pk])
def test_missing_compatibility_apps(self):
response = self.client.post(self.url, {'upload': self.upload.uuid.hex})
assert response.status_code == 200
assert response.context['new_addon_form'].errors.as_text() == (
'* compatible_apps\n * Need to select at least one application.')
def test_unique_version_num(self):
self.version.update(version='0.1')
response = self.post(expected_status=200)
assert pq(response.content)('ul.errorlist').text() == (
'Version 0.1 already exists.')
def test_same_version_if_previous_is_rejected(self):
# We can't re-use the same version number, even if the previous
# versions have been disabled/rejected.
self.version.update(version='0.1')
self.version.files.update(status=amo.STATUS_DISABLED)
response = self.post(expected_status=200)
assert pq(response.content)('ul.errorlist').text() == (
'Version 0.1 already exists.')
def test_same_version_if_previous_is_deleted(self):
# We can't re-use the same version number if the previous
# versions has been deleted either.
self.version.update(version='0.1')
self.version.delete()
response = self.post(expected_status=200)
assert pq(response.content)('ul.errorlist').text() == (
'Version 0.1 was uploaded before and deleted.')
def test_same_version_if_previous_is_awaiting_review(self):
# We can't re-use the same version number - offer to continue.
self.version.update(version='0.1')
self.version.files.update(status=amo.STATUS_AWAITING_REVIEW)
response = self.post(expected_status=200)
assert pq(response.content)('ul.errorlist').text() == (
'Version 0.1 already exists. '
'Continue with existing upload instead?')
# url is always to the details page even for unlisted (will redirect).
assert pq(response.content)('ul.errorlist a').attr('href') == (
reverse('devhub.submit.version.details', args=[
self.addon.slug, self.version.pk]))
def test_distribution_link(self):
response = self.client.get(self.url)
channel_text = ('listed' if self.channel == amo.RELEASE_CHANNEL_LISTED
else 'unlisted')
distribution_url = reverse('devhub.submit.version.distribution',
args=[self.addon.slug])
doc = pq(response.content)
assert doc('.addon-submit-distribute a').attr('href') == (
distribution_url + '?channel=' + channel_text)
def test_url_is_404_for_disabled_addons(self):
self.addon.update(status=amo.STATUS_DISABLED)
response = self.client.get(self.url)
assert response.status_code == 404
def test_no_redirect_for_metadata(self):
self.addon.update(status=amo.STATUS_NULL)
AddonCategory.objects.filter(addon=self.addon).delete()
response = self.client.get(self.url)
assert response.status_code == 200
def test_static_theme_wizard_button_not_shown_for_extensions(self):
assert self.addon.type != amo.ADDON_STATICTHEME
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert not doc('#wizardlink')
def test_static_theme_wizard_button_shown(self):
channel = ('listed' if self.channel == amo.RELEASE_CHANNEL_LISTED else
'unlisted')
self.addon.update(type=amo.ADDON_STATICTHEME)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#wizardlink')
assert doc('#wizardlink').attr('href') == (
reverse('devhub.submit.version.wizard',
args=[self.addon.slug, channel]))
def test_static_theme_wizard(self):
channel = ('listed' if self.channel == amo.RELEASE_CHANNEL_LISTED else
'unlisted')
self.addon.update(type=amo.ADDON_STATICTHEME)
# Get the correct template.
self.url = reverse('devhub.submit.version.wizard',
args=[self.addon.slug, channel])
mock_point = 'olympia.devhub.views.extract_theme_properties'
with mock.patch(mock_point) as extract_theme_properties_mock:
extract_theme_properties_mock.return_value = {
'colors': {
'frame': '#123456',
'tab_background_text': 'rgba(1,2,3,0.4)',
},
'images': {
'theme_frame': 'header.png',
}
}
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#theme-wizard')
assert doc('#theme-wizard').attr('data-version') == '3.0'
assert doc('input#theme-name').attr('type') == 'hidden'
assert doc('input#theme-name').attr('value') == (
six.text_type(self.addon.name))
# Existing colors should be the default values for the fields
assert doc('#frame').attr('value') == '#123456'
assert doc('#tab_background_text').attr('value') == 'rgba(1,2,3,0.4)'
# And the theme header url is there for the JS to load
assert doc('#theme-header').attr('data-existing-header') == (
'header.png')
# No warning about extra properties
assert b'are unsupported in this wizard' not in response.content
# And then check the upload works.
path = os.path.join(
settings.ROOT, 'src/olympia/devhub/tests/addons/static_theme.zip')
self.upload = self.get_upload(abspath=path)
response = self.post()
version = self.addon.find_latest_version(channel=self.channel)
assert version.channel == self.channel
assert version.all_files[0].status == (
amo.STATUS_AWAITING_REVIEW
if self.channel == amo.RELEASE_CHANNEL_LISTED else
amo.STATUS_APPROVED)
self.assert3xx(response, self.get_next_url(version))
log_items = ActivityLog.objects.for_addons(self.addon)
assert log_items.filter(action=amo.LOG.ADD_VERSION.id)
if self.channel == amo.RELEASE_CHANNEL_LISTED:
previews = list(version.previews.all())
assert len(previews) == 3
assert storage.exists(previews[0].image_path)
assert storage.exists(previews[1].image_path)
assert storage.exists(previews[1].image_path)
else:
assert version.previews.all().count() == 0
def test_static_theme_wizard_unsupported_properties(self):
channel = ('listed' if self.channel == amo.RELEASE_CHANNEL_LISTED else
'unlisted')
self.addon.update(type=amo.ADDON_STATICTHEME)
# Get the correct template.
self.url = reverse('devhub.submit.version.wizard',
args=[self.addon.slug, channel])
mock_point = 'olympia.devhub.views.extract_theme_properties'
with mock.patch(mock_point) as extract_theme_properties_mock:
extract_theme_properties_mock.return_value = {
'colors': {
'frame': '#123456',
'tab_background_text': 'rgba(1,2,3,0.4)',
'tab_line': '#123',
},
'images': {
'additional_backgrounds': [],
},
'something_extra': {},
}
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#theme-wizard')
assert doc('#theme-wizard').attr('data-version') == '3.0'
assert doc('input#theme-name').attr('type') == 'hidden'
assert doc('input#theme-name').attr('value') == (
six.text_type(self.addon.name))
# Existing colors should be the default values for the fields
assert doc('#frame').attr('value') == '#123456'
assert doc('#tab_background_text').attr('value') == 'rgba(1,2,3,0.4)'
# Warning about extra properties this time:
assert b'are unsupported in this wizard' in response.content
unsupported_list = doc('.notification-box.error ul.note li')
assert unsupported_list.length == 3
assert 'tab_line' in unsupported_list.text()
assert 'additional_backgrounds' in unsupported_list.text()
assert 'something_extra' in unsupported_list.text()
# And then check the upload works.
path = os.path.join(
settings.ROOT, 'src/olympia/devhub/tests/addons/static_theme.zip')
self.upload = self.get_upload(abspath=path)
response = self.post()
version = self.addon.find_latest_version(channel=self.channel)
assert version.channel == self.channel
assert version.all_files[0].status == (
amo.STATUS_AWAITING_REVIEW
if self.channel == amo.RELEASE_CHANNEL_LISTED else
amo.STATUS_APPROVED)
self.assert3xx(response, self.get_next_url(version))
log_items = ActivityLog.objects.for_addons(self.addon)
assert log_items.filter(action=amo.LOG.ADD_VERSION.id)
if self.channel == amo.RELEASE_CHANNEL_LISTED:
previews = list(version.previews.all())
assert len(previews) == 3
assert storage.exists(previews[0].image_path)
assert storage.exists(previews[1].image_path)
assert storage.exists(previews[1].image_path)
else:
assert version.previews.all().count() == 0
@mock.patch('olympia.devhub.forms.parse_addon',
wraps=_parse_addon_theme_permission_wrapper)
def test_dynamic_theme_tagging(self, parse_addon_mock):
self.addon.update(guid='[email protected]')
path = os.path.join(
settings.ROOT,
'src/olympia/devhub/tests/addons/valid_webextension.xpi')
self.upload = self.get_upload(abspath=path)
response = self.post()
version = self.addon.find_latest_version(channel=self.channel)
self.assert3xx(
response, self.get_next_url(version))
if self.channel == amo.RELEASE_CHANNEL_LISTED:
assert self.addon.tags.filter(tag_text='dynamic theme').exists()
else:
assert not self.addon.tags.filter(
tag_text='dynamic theme').exists()
class TestVersionSubmitUploadListed(VersionSubmitUploadMixin, UploadTest):
channel = amo.RELEASE_CHANNEL_LISTED
def test_success(self):
response = self.post()
version = self.addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
assert version.channel == amo.RELEASE_CHANNEL_LISTED
assert version.all_files[0].status == amo.STATUS_AWAITING_REVIEW
self.assert3xx(response, self.get_next_url(version))
log_items = ActivityLog.objects.for_addons(self.addon)
assert log_items.filter(action=amo.LOG.ADD_VERSION.id)
@mock.patch('olympia.devhub.views.sign_file')
def test_experiments_inside_webext_are_auto_signed(self, mock_sign_file):
"""Experiment extensions (bug 1220097) are auto-signed."""
self.grant_permission(
self.user, ':'.join(amo.permissions.EXPERIMENTS_SUBMIT))
self.upload = self.get_upload(
'experiment_inside_webextension.xpi',
validation=json.dumps({
"notices": 2, "errors": 0, "messages": [],
"metadata": {}, "warnings": 1,
}))
self.addon.update(
guid='@experiment-inside-webextension-guid',
status=amo.STATUS_APPROVED)
self.post()
# Make sure the file created and signed is for this addon.
assert mock_sign_file.call_count == 1
mock_sign_file_call = mock_sign_file.call_args[0]
signed_file = mock_sign_file_call[0]
assert signed_file.version.addon == self.addon
assert signed_file.version.channel == amo.RELEASE_CHANNEL_LISTED
# There is a log for that file (with passed validation).
log = ActivityLog.objects.latest(field_name='id')
assert log.action == amo.LOG.EXPERIMENT_SIGNED.id
@mock.patch('olympia.devhub.views.sign_file')
def test_experiment_inside_webext_upload_without_permission(
self, mock_sign_file):
self.upload = self.get_upload(
'experiment_inside_webextension.xpi',
validation=json.dumps({
"notices": 2, "errors": 0, "messages": [],
"metadata": {}, "warnings": 1,
}))
self.addon.update(
guid='@experiment-inside-webextension-guid',
status=amo.STATUS_APPROVED)
response = self.post(expected_status=200)
assert pq(response.content)('ul.errorlist').text() == (
'You cannot submit this type of add-on')
assert mock_sign_file.call_count == 0
@mock.patch('olympia.devhub.views.sign_file')
def test_theme_experiment_inside_webext_upload_without_permission(
self, mock_sign_file):
self.upload = self.get_upload(
'theme_experiment_inside_webextension.xpi',
validation=json.dumps({
"notices": 2, "errors": 0, "messages": [],
"metadata": {}, "warnings": 1,
}))
self.addon.update(
guid='@theme–experiment-inside-webextension-guid',
status=amo.STATUS_APPROVED)
response = self.post(expected_status=200)
assert pq(response.content)('ul.errorlist').text() == (
'You cannot submit this type of add-on')
assert mock_sign_file.call_count == 0
def test_incomplete_addon_now_nominated(self):
"""Uploading a new version for an incomplete addon should set it to
nominated."""
self.addon.current_version.files.update(status=amo.STATUS_DISABLED)
self.addon.update_status()
# Deleting all the versions should make it null.
assert self.addon.status == amo.STATUS_NULL
self.post()
self.addon.reload()
assert self.addon.status == amo.STATUS_NOMINATED
class TestVersionSubmitUploadUnlisted(VersionSubmitUploadMixin, UploadTest):
channel = amo.RELEASE_CHANNEL_UNLISTED
def setUp(self):
super(TestVersionSubmitUploadUnlisted, self).setUp()
# Mock sign_file() to avoid errors because signing is not enabled.
patch = mock.patch('olympia.reviewers.utils.sign_file')
self.sign_file_mock = patch.start()
self.addCleanup(patch.stop)
def test_success(self):
"""Sign automatically."""
# No validation errors or warning.
result = {
'errors': 0,
'warnings': 0,
'notices': 2,
'metadata': {},
'messages': [],
}
self.upload = self.get_upload(
'extension.xpi', validation=json.dumps(result))
response = self.post()
version = self.addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
assert version.channel == amo.RELEASE_CHANNEL_UNLISTED
assert version.all_files[0].status == amo.STATUS_APPROVED
self.assert3xx(response, self.get_next_url(version))
assert self.sign_file_mock.call_count == 1
class TestVersionSubmitSource(TestAddonSubmitSource):
def setUp(self):
super(TestVersionSubmitSource, self).setUp()
addon = self.get_addon()
self.version = version_factory(
addon=addon,
channel=amo.RELEASE_CHANNEL_LISTED,
license_id=addon.versions.latest().license_id)
self.url = reverse(
'devhub.submit.version.source', args=[addon.slug, self.version.pk])
self.next_url = reverse(
'devhub.submit.version.details',
args=[addon.slug, self.version.pk])
assert not self.get_version().source
class TestVersionSubmitDetails(TestSubmitBase):
def setUp(self):
super(TestVersionSubmitDetails, self).setUp()
addon = self.get_addon()
self.version = version_factory(
addon=addon,
channel=amo.RELEASE_CHANNEL_LISTED,
license_id=addon.versions.latest().license_id)
self.url = reverse('devhub.submit.version.details',
args=[addon.slug, self.version.pk])
def test_submit_empty_is_okay(self):
assert all(self.get_addon().get_required_metadata())
response = self.client.get(self.url)
assert response.status_code == 200
response = self.client.post(self.url, {})
self.assert3xx(
response, reverse('devhub.submit.version.finish',
args=[self.addon.slug, self.version.pk]))
assert not self.version.approval_notes
assert not self.version.release_notes
def test_submit_success(self):
assert all(self.get_addon().get_required_metadata())
response = self.client.get(self.url)
assert response.status_code == 200
# Post and be redirected - trying to sneak in a field that shouldn't
# be modified when this is not the first listed version.
data = {'approval_notes': 'approove plz',
'release_notes': 'loadsa stuff', 'name': 'foo'}
response = self.client.post(self.url, data)
self.assert3xx(
response, reverse('devhub.submit.version.finish',
args=[self.addon.slug, self.version.pk]))
# This field should not have been modified.
assert self.get_addon().name != 'foo'
self.version.reload()
assert self.version.approval_notes == 'approove plz'
assert self.version.release_notes == 'loadsa stuff'
def test_submit_details_unlisted_should_redirect(self):
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
assert all(self.get_addon().get_required_metadata())
response = self.client.get(self.url)
self.assert3xx(
response, reverse('devhub.submit.version.finish',
args=[self.addon.slug, self.version.pk]))
def test_show_request_for_information(self):
AddonReviewerFlags.objects.create(
addon=self.addon, pending_info_request=self.days_ago(2))
ActivityLog.create(
amo.LOG.REVIEWER_REPLY_VERSION, self.addon, self.version,
user=self.user, details={'comments': 'this should not be shown'})
ActivityLog.create(
amo.LOG.REQUEST_INFORMATION, self.addon, self.version,
user=self.user, details={'comments': 'this is an info request'})
response = self.client.get(self.url)
assert response.status_code == 200
assert b'this should not be shown' not in response.content
assert b'this is an info request' in response.content
def test_dont_show_request_for_information_if_none_pending(self):
ActivityLog.create(
amo.LOG.REVIEWER_REPLY_VERSION, self.addon, self.version,
user=self.user, details={'comments': 'this should not be shown'})
ActivityLog.create(
amo.LOG.REQUEST_INFORMATION, self.addon, self.version,
user=self.user, details={'comments': 'this is an info request'})
response = self.client.get(self.url)
assert response.status_code == 200
assert b'this should not be shown' not in response.content
assert b'this is an info request' not in response.content
def test_clear_request_for_information(self):
AddonReviewerFlags.objects.create(
addon=self.addon, pending_info_request=self.days_ago(2))
response = self.client.post(
self.url, {'clear_pending_info_request': True})
self.assert3xx(
response, reverse('devhub.submit.version.finish',
args=[self.addon.slug, self.version.pk]))
flags = AddonReviewerFlags.objects.get(addon=self.addon)
assert flags.pending_info_request is None
activity = ActivityLog.objects.for_addons(self.addon).filter(
action=amo.LOG.DEVELOPER_CLEAR_INFO_REQUEST.id).get()
assert activity.user == self.user
assert activity.arguments == [self.addon, self.version]
def test_dont_clear_request_for_information(self):
past_date = self.days_ago(2)
AddonReviewerFlags.objects.create(
addon=self.addon, pending_info_request=past_date)
response = self.client.post(self.url)
self.assert3xx(
response, reverse('devhub.submit.version.finish',
args=[self.addon.slug, self.version.pk]))
flags = AddonReviewerFlags.objects.get(addon=self.addon)
assert flags.pending_info_request == past_date
assert not ActivityLog.objects.for_addons(self.addon).filter(
action=amo.LOG.DEVELOPER_CLEAR_INFO_REQUEST.id).exists()
def test_can_cancel_review(self):
addon = self.get_addon()
addon_status = addon.status
addon.versions.latest().files.update(status=amo.STATUS_AWAITING_REVIEW)
cancel_url = reverse('devhub.addons.cancel', args=['a3615'])
versions_url = reverse('devhub.addons.versions', args=['a3615'])
response = self.client.post(cancel_url)
self.assert3xx(response, versions_url)
addon = self.get_addon()
assert addon.status == addon_status # No change.
version = addon.versions.latest()
del version.all_files
assert version.statuses == [
(version.all_files[0].id, amo.STATUS_DISABLED)]
def test_public_addon_stays_public_even_if_had_missing_metadata(self):
"""Posting details for a new version for a public add-on that somehow
had missing metadata despite being public shouldn't reset it to
nominated."""
# Create a built-in License we'll use later when posting.
License.objects.create(builtin=3, on_form=True)
# Remove license from existing versions, but make sure the addon is
# still public, just lacking metadata now.
self.addon.versions.update(license_id=None)
self.addon.reload()
assert self.addon.status == amo.STATUS_APPROVED
assert not self.addon.has_complete_metadata()
# Now, submit details for that new version, adding license. Since
# metadata is missing, name, slug, summary and category are required to
# be present.
data = {
'name': six.text_type(self.addon.name),
'slug': self.addon.slug,
'summary': six.text_type(self.addon.summary),
'form-0-categories': [22, 1],
'form-0-application': 1,
'form-INITIAL_FORMS': 1,
'form-TOTAL_FORMS': 1,
'license-builtin': 3,
}
response = self.client.post(self.url, data)
self.assert3xx(
response, reverse('devhub.submit.version.finish',
args=[self.addon.slug, self.version.pk]))
self.addon.reload()
assert self.addon.has_complete_metadata()
assert self.addon.status == amo.STATUS_APPROVED
def test_submit_static_theme_should_redirect(self):
self.addon.update(type=amo.ADDON_STATICTHEME)
assert all(self.get_addon().get_required_metadata())
response = self.client.get(self.url)
# No extra details for subsequent theme uploads so just redirect.
self.assert3xx(
response, reverse('devhub.submit.version.finish',
args=[self.addon.slug, self.version.pk]))
class TestVersionSubmitDetailsFirstListed(TestAddonSubmitDetails):
""" Testing the case of a listed version being submitted on an add-on that
previously only had unlisted versions - so is missing metadata."""
def setUp(self):
super(TestVersionSubmitDetailsFirstListed, self).setUp()
self.addon.versions.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
self.version = version_factory(addon=self.addon,
channel=amo.RELEASE_CHANNEL_LISTED)
self.version.update(license=None) # Addon needs to be missing data.
self.url = reverse('devhub.submit.version.details',
args=['a3615', self.version.pk])
self.next_step = reverse('devhub.submit.version.finish',
args=['a3615', self.version.pk])
@override_switch('akismet-spam-check', active=True)
@override_switch('akismet-addon-action', active=True)
@mock.patch('olympia.lib.akismet.tasks.AkismetReport.comment_check')
def test_akismet_spam_check_spam_action_taken(self, comment_check_mock):
comment_check_mock.return_value = AkismetReport.MAYBE_SPAM
data = self.get_dict(name=u'spám', summary=self.addon.summary)
response = self.client.post(self.url, data)
assert response.status_code == 200
self.assertFormError(
response, 'form', 'name',
'The text entered has been flagged as spam.')
self.assertFormError(
response, 'form', 'summary',
'The text entered has been flagged as spam.')
# The summary WILL be comment_check'd, even though it didn't change,
# because we don't trust existing metadata when the previous versions
# were unlisted.
self.addon = self.addon.reload()
assert AkismetReport.objects.count() == 2
report = AkismetReport.objects.first()
assert report.comment_type == 'product-name'
assert report.comment == u'spám'
assert text_type(self.addon.name) != u'spám'
report = AkismetReport.objects.last()
assert report.comment_type == 'product-summary'
assert report.comment == u'Delicious Bookmarks is the official'
assert comment_check_mock.call_count == 2
@override_switch('akismet-spam-check', active=True)
@override_switch('akismet-addon-action', active=False)
@mock.patch('olympia.lib.akismet.tasks.AkismetReport.comment_check')
def test_akismet_spam_check_spam_logging_only(self, comment_check_mock):
comment_check_mock.return_value = AkismetReport.MAYBE_SPAM
data = self.get_dict(name=u'spám', summary=self.addon.summary)
response = self.is_success(data)
# The summary WILL be comment_check'd, even though it didn't change,
# because we don't trust existing metadata when the previous versions
# were unlisted.
self.addon = self.addon.reload()
assert AkismetReport.objects.count() == 2
report = AkismetReport.objects.first()
assert report.comment_type == 'product-name'
assert report.comment == u'spám'
assert text_type(self.addon.name) == u'spám' # It changed
report = AkismetReport.objects.last()
assert report.comment_type == 'product-summary'
assert report.comment == u'Delicious Bookmarks is the official'
assert b'spam' not in response.content
assert comment_check_mock.call_count == 2
@override_switch('akismet-spam-check', active=True)
@mock.patch('olympia.lib.akismet.tasks.AkismetReport.comment_check')
def test_akismet_spam_check_ham(self, comment_check_mock):
comment_check_mock.return_value = AkismetReport.HAM
data = self.get_dict(name=u'spám', summary=self.addon.summary)
response = self.is_success(data)
# The summary WILL be comment_check'd, even though it didn't change,
# because we don't trust existing metadata when the previous versions
# were unlisted.
self.addon = self.addon.reload()
assert AkismetReport.objects.count() == 2
report = AkismetReport.objects.first()
assert report.comment_type == 'product-name'
assert report.comment == u'spám'
assert text_type(self.addon.name) == u'spám' # It changed
report = AkismetReport.objects.last()
assert report.comment_type == 'product-summary'
assert report.comment == u'Delicious Bookmarks is the official'
assert b'spam' not in response.content
assert comment_check_mock.call_count == 2
@override_switch('akismet-spam-check', active=True)
@mock.patch('olympia.lib.akismet.tasks.AkismetReport.comment_check')
def test_akismet_spam_check_no_changes(self, comment_check_mock):
comment_check_mock.return_value = AkismetReport.HAM
# Don't change either name or summary from the upload.
data = self.get_dict(name=self.addon.name, summary=self.addon.summary)
response = self.is_success(data)
# No changes but both values were spam checked.
assert AkismetReport.objects.count() == 2
assert b'spam' not in response.content
assert comment_check_mock.call_count == 2
class TestVersionSubmitFinish(TestAddonSubmitFinish):
def setUp(self):
super(TestVersionSubmitFinish, self).setUp()
addon = self.get_addon()
self.version = version_factory(
addon=addon,
channel=amo.RELEASE_CHANNEL_LISTED,
license_id=addon.versions.latest().license_id,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
self.url = reverse('devhub.submit.version.finish',
args=[addon.slug, self.version.pk])
@mock.patch('olympia.devhub.tasks.send_welcome_email.delay')
def test_no_welcome_email(self, send_welcome_email_mock):
"""No emails for version finish."""
self.client.get(self.url)
assert not send_welcome_email_mock.called
def test_addon_no_versions_redirects_to_versions(self):
# No versions makes getting to this step difficult!
pass
# No emails for any of these cases so ignore them.
def test_welcome_email_for_newbies(self):
pass
def test_welcome_email_first_listed_addon(self):
pass
def test_welcome_email_if_previous_addon_is_incomplete(self):
pass
def test_no_welcome_email_if_unlisted(self):
pass
| kumar303/olympia | src/olympia/devhub/tests/test_views_submit.py | Python | bsd-3-clause | 109,870 |
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib
import urlparse
import kodi
import log_utils
import dom_parser
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
from salts_lib.utils2 import i18n
import scraper
BASE_URL = 'http://scene-rls.com'
MULTI_HOST = 'nfo.scene-rls.com'
CATEGORIES = {VIDEO_TYPES.MOVIE: '/category/movies/"', VIDEO_TYPES.EPISODE: '/category/tvshows/"'}
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'scene-rls'
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, require_debrid=True, cache_limit=.5)
sources = self.__get_post_links(html)
for source in sources:
if re.search('\.part\.?\d+', source) or '.rar' in source or 'sample' in source or source.endswith('.nfo'): continue
host = urlparse.urlparse(source).hostname
quality = scraper_utils.blog_get_quality(video, sources[source]['release'], host)
hoster = {'multi-part': False, 'host': host, 'class': self, 'views': None, 'url': source, 'rating': None, 'quality': quality, 'direct': False}
if 'X265' in sources[source]['release'] or 'HEVC' in sources[source]['release']:
hoster['format'] = 'x265'
hosters.append(hoster)
return hosters
def __get_post_links(self, html):
sources = {}
post = dom_parser.parse_dom(html, 'div', {'class': 'postContent'})
if post:
for result in re.finditer('<p\s+style="text-align:\s*center;">(.*?)<br.*?<h2(.*?)(?:<h4|<h3|</div>|$)', post[0], re.DOTALL):
release, links = result.groups()
release = re.sub('</?[^>]*>', '', release)
release = release.upper()
for match in re.finditer('href="([^"]+)', links):
stream_url = match.group(1)
if MULTI_HOST in stream_url: continue
sources[stream_url] = {'release': release}
return sources
def get_url(self, video):
return self._blog_get_url(video)
@classmethod
def get_settings(cls):
settings = super(cls, cls).get_settings()
settings = scraper_utils.disable_sub_check(settings)
name = cls.get_name()
settings.append(' <setting id="%s-filter" type="slider" range="0,180" option="int" label=" %s" default="30" visible="eq(-4,true)"/>' % (name, i18n('filter_results_days')))
settings.append(' <setting id="%s-select" type="enum" label=" %s" lvalues="30636|30637" default="0" visible="eq(-5,true)"/>' % (name, i18n('auto_select')))
return settings
def search(self, video_type, title, year, season=''):
search_url = urlparse.urljoin(self.base_url, '/?s=%s&submit=Find')
search_url = search_url % (urllib.quote_plus(title))
all_html = self._http_get(search_url, require_debrid=True, cache_limit=1)
html = ''
for post in dom_parser.parse_dom(all_html, 'div', {'class': 'post'}):
if CATEGORIES[video_type] in post and self.__get_post_links(post):
html += post
post_pattern = 'class="postTitle">.*?href="(?P<url>[^"]+)[^>]*>(?P<post_title>[^<]+).*?Published on:\s*(?P<date>[^ ]+ 0*\d+, \d+)'
date_format = '%b %d, %Y'
return self._blog_proc_results(html, post_pattern, date_format, video_type, title, year)
| felipenaselva/repo.felipe | plugin.video.salts/scrapers/scenerls_scraper.py | Python | gpl-2.0 | 4,702 |
# -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import collections
import logging
import datetime
from io import BytesIO, UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header
from .packages.urllib3.fields import RequestField
from .packages.urllib3.filepost import encode_multipart_formdata
from .packages.urllib3.util import parse_url
from .exceptions import (
HTTPError, RequestException, MissingSchema, InvalidURL,
ChunkedEncodingError)
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, to_native_string)
from .compat import (
cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, json, builtin_str, basestring, IncompleteRead)
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
log = logging.getLogger(__name__)
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, str):
fp = StringIO(fp)
if isinstance(fp, bytes):
fp = BytesIO(fp)
rf = RequestField(name=k, data=fp.read(),
filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach the request. If a dictionary is provided, form-encoding will take place.
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None,
url=None,
headers=None,
files=None,
data=None,
params=None,
auth=None,
cookies=None,
hooks=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
def prepare(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy()
p._cookies = self._cookies.copy()
p.body = self.body
p.hooks = self.hooks
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = self.method.upper()
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
try:
url = unicode(url)
except NameError:
# We're on Python 3.
url = str(url)
except UnicodeDecodeError:
pass
# Don't do any URL preparation for oddball schemes
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
scheme, auth, host, port, path, query, fragment = parse_url(url)
if not scheme:
raise MissingSchema("Invalid URL {0!r}: No schema supplied. "
"Perhaps you meant http://{0}?".format(url))
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# Only want to apply IDNA to the hostname
try:
host = host.encode('idna').decode('utf-8')
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
if headers:
self.headers = CaseInsensitiveDict((to_native_string(name), value) for name, value in headers.items())
else:
self.headers = CaseInsensitiveDict()
def prepare_body(self, data, files):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
length = None
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, basestring),
not isinstance(data, list),
not isinstance(data, dict)
])
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
if is_stream:
body = data
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length is not None:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, str) or isinstance(data, builtin_str) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if (content_type) and (not 'content-type' in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
if hasattr(body, 'seek') and hasattr(body, 'tell'):
body.seek(0, 2)
self.headers['Content-Length'] = builtin_str(body.tell())
body.seek(0, 0)
elif body is not None:
l = super_len(body)
if l:
self.headers['Content-Length'] = builtin_str(l)
elif self.method not in ('GET', 'HEAD'):
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data."""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content',
'status_code',
'headers',
'url',
'history',
'encoding',
'reason',
'cookies',
'elapsed',
'request',
]
def __init__(self):
super(Response, self).__init__()
self._content = False
self._content_consumed = False
#: Integer Code of responded HTTP Status.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Requires that ``stream=True` on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta)
self.elapsed = datetime.timedelta(0)
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return dict(
(attr, getattr(self, attr, None))
for attr in self.__attrs__
)
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __nonzero__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
try:
self.raise_for_status()
except RequestException:
return False
return True
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the lovely Charade library
(Thanks, Ian!)."""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
"""
if self._content_consumed:
# simulate reading small chunks of the content
return iter_slices(self._content, chunk_size)
def generate():
try:
# Special case for urllib3.
try:
for chunk in self.raw.stream(chunk_size,
decode_content=True):
yield chunk
except IncompleteRead as e:
raise ChunkedEncodingError(e)
except AttributeError:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
gen = generate()
if decode_unicode:
gen = stream_decode_response_unicode(gen, self)
return gen
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size,
decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
try:
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
except AttributeError:
self._content = None
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``charade``.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
"""
if not self.encoding and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
return json.loads(self.content.decode(encoding), **kwargs)
return json.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if 400 <= self.status_code < 500:
http_error_msg = '%s Client Error: %s' % (self.status_code, self.reason)
elif 500 <= self.status_code < 600:
http_error_msg = '%s Server Error: %s' % (self.status_code, self.reason)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Closes the underlying file descriptor and releases the connection
back to the pool.
*Note: Should not normally need to be called explicitly.*
"""
return self.raw.release_conn()
| ktan2020/legacy-automation | win/Lib/site-packages/requests/models.py | Python | mit | 24,872 |
import asyncio
class Menu:
"""An interactive menu class for Discord."""
class Submenu:
"""A metaclass of the Menu class."""
def __init__(self, name, content):
self.content = content
self.leads_to = []
self.name = name
def get_text(self):
text = ""
for idx, menu in enumerate(self.leads_to):
text += "[{}] {}\n".format(idx+1, menu.name)
return text
def get_child(self, child_idx):
try:
return self.leads_to[child_idx]
except IndexError:
raise IndexError("child index out of range")
def add_child(self, child):
self.leads_to.append(child)
class InputSubmenu:
"""A metaclass of the Menu class for submenu options that take input, instead of prompting the user to pick an option."""
def __init__(self, name, content, input_function, leads_to):
self.content = content
self.name = name
self.input_function = input_function
self.leads_to = leads_to
def next_child(self):
return self.leads_to
class ChoiceSubmenu:
"""A metaclass of the Menu class for submenu options for choosing an option from a list."""
def __init__(self, name, content, options, input_function, leads_to):
self.content = content
self.name = name
self.options = options
self.input_function = input_function
self.leads_to = leads_to
def next_child(self):
return self.leads_to
def __init__(self, main_page):
self.children = []
self.main = self.Submenu("main", main_page)
def add_child(self, child):
self.main.add_child(child)
async def start(self, ctx):
current = self.main
menu_msg = None
while True:
output = ""
if type(current) == self.Submenu:
if type(current.content) == str:
output += current.content + "\n"
elif callable(current.content):
current.content()
else:
raise TypeError("submenu body is not a str or function")
if not current.leads_to:
if not menu_msg:
menu_msg = await ctx.send("```" + output + "```")
else:
await menu_msg.edit(content="```" + output + "```")
break
output += "\n" + current.get_text() + "\n"
output += "Enter a number."
if not menu_msg:
menu_msg = await ctx.send("```" + output + "```")
else:
await menu_msg.edit(content="```" + output + "```")
reply = await ctx.bot.wait_for("message", check=lambda m: m.author == ctx.bot.user and m.content.isdigit() and m.channel == ctx.message.channel)
await reply.delete()
try:
current = current.get_child(int(reply.content) - 1)
except IndexError:
print("Invalid number.")
break
elif type(current) == self.InputSubmenu:
if type(current.content) == list:
answers = []
for question in current.content:
await menu_msg.edit(content="```" + question + "\n\nEnter a value." + "```")
reply = await ctx.bot.wait_for("message", check=lambda m: m.author == ctx.bot.user and m.channel == ctx.message.channel)
await reply.delete()
answers.append(reply)
current.input_function(*answers)
else:
await menu_msg.edit(content="```" + current.content + "\n\nEnter a value." + "```")
reply = await ctx.bot.wait_for("message", check=lambda m: m.author == ctx.bot.user and m.channel == ctx.message.channel)
await reply.delete()
current.input_function(reply)
if not current.leads_to:
break
current = current.leads_to
elif type(current) == self.ChoiceSubmenu:
result = "```" + current.content + "\n\n"
if type(current.options) == dict:
indexes = {}
for idx, option in enumerate(current.options):
result += "[{}] {}: {}\n".format(idx+1, option, current.options[option])
indexes[idx] = option
else:
for idx, option in current.options:
result += "[{}] {}\n".format(idx+1, option)
await menu_msg.edit(content=result + "\nPick an option.```")
reply = await ctx.bot.wait_for("message", check=lambda m: m.author == ctx.bot.user and m.content.isdigit() and m.channel == ctx.message.channel)
await reply.delete()
if type(current.options) == dict:
current.input_function(reply, indexes[int(reply.content)-1])
else:
current.input_function(reply, current.options[int(reply.content)-1])
if not current.leads_to:
break
current = current.leads_to
| appu1232/Discord-Selfbot | cogs/utils/menu.py | Python | gpl-3.0 | 5,866 |
"""
CanvasSync by Mathias Perslev
February 2017
--------------------------------------------
cryptography.py, module
Functions used to encrypt and decrypt the settings stored in the .CanvasSync.settings file. When the user has specified
settings the string of information is encrypted using the AES 256 module of the PyCrypto library. A password is
specified by the user upon creation of the settings file. A hashed (thus unreadable) version of the password is stored
locally in the .ps.sync file in the home folder of the user. Upon launch of CanvasSync, the user must specify
a password that matches the one stored in the hashed version. If the password is correct the the settings file is
decrypted and parsed for settings.
"""
# Future imports
from __future__ import print_function
# Inbuilt modules
import getpass
import os.path
import sys
# Third party modules
import bcrypt
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
def get_key_hash(password):
""" Get a 256 byte SHA hash from any length password """
hasher = SHA256.new(password.encode(u"utf-8"))
return hasher.digest()
def encrypt(message):
"""
Encrypts a string using AES-256 (CBC) encryption
A random initialization vector (IV) is padded as the initial 16 bytes of the string
The encrypted message will be padded to length%16 = 0 bytes (AES needs 16 bytes block sizes)
"""
print(u"\nPlease enter a password to encrypt the settings file:")
hashed_password = bcrypt.hashpw(getpass.getpass(), bcrypt.gensalt())
with open(os.path.expanduser(u"~") + u"/.CanvasSync.pw", "w") as pass_file:
pass_file.write(hashed_password)
# Generate random 16 bytes IV
IV = os.urandom(16)
# AES object
encrypter = AES.new(get_key_hash(hashed_password), AES.MODE_CBC, IV)
# Padding to 16 bytes
if len(message) % 16 != 0:
message += " " * (16 - (len(message) % 16))
# Add the unencrypted IV to the beginning of the encrypted_message
encrypted_message = IV + encrypter.encrypt(message.encode("utf-8"))
return encrypted_message
def decrypt(message, password):
"""
Decrypts an AES encrypted string
"""
# Load the locally stored bcrypt hashed password (answer)
path = os.path.expanduser(u"~") + u"/.CanvasSync.pw"
if not os.path.exists(path):
return False
with open(path, "r") as pw_file:
hashed_password = pw_file.read()
# Get password from user and compare to answer
valid_password = False
# If the password isn't null then it was specified as a command-line argument
if password:
if bcrypt.hashpw(password, hashed_password) != hashed_password:
print(u"\n[ERROR] Invalid password. Please try again or invoke CanvasSync with the -s flag to reset settings.")
sys.exit()
else:
# Otherwise, get the password from the user
while not valid_password:
print(u"\nPlease enter password to decrypt the settings file:")
password = getpass.getpass()
if bcrypt.hashpw(password, hashed_password) == hashed_password:
valid_password = True
else:
print(u"\n[ERROR] Invalid password. Please try again or invoke CanvasSync with the -s flag to reset settings.")
# Read the remote IV
remoteIV = message[:16]
# Decrypt message using the correct password
decrypter = AES.new(get_key_hash(hashed_password), AES.MODE_CBC, remoteIV)
decrypted_message = decrypter.decrypt(message[16:])
return decrypted_message.rstrip()
| perslev/CanvasSync | CanvasSync/settings/cryptography.py | Python | mit | 3,583 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import six
import unittest2
from robottelo.ui.location import Location
from robottelo.ui.locators import common_locators
from robottelo.ui.locators import locators
if six.PY2:
import mock
else:
from unittest import mock
class LocationTestCase(unittest2.TestCase):
def test_creation_without_parent_and_without_unassigned_host(self):
location = Location(None)
location.click = mock.Mock()
location.assign_value = mock.Mock()
location.wait_until_element = mock.Mock(return_value=None)
location._configure_location = mock.Mock()
location.select = mock.Mock()
location.create('foo')
click_calls = [
mock.call(locators['location.new']),
mock.call(common_locators['submit']),
mock.call(common_locators['submit'])
]
self.assertEqual(3, location.click.call_count)
location.click.assert_has_calls(click_calls, any_order=False)
location.assign_value.assert_called_once_with(
locators['location.name'], 'foo')
# not called if parent is None
location.select.assert_not_called()
location._configure_location.assert_called_once_with(
capsules=None, all_capsules=None, domains=None, envs=None,
hostgroups=None, medias=None, organizations=None, ptables=None,
resources=None, select=True, subnets=None, templates=None,
users=None, params=None
)
def test_creation_with_parent_and_unassigned_host(self):
location = Location(None)
location.click = mock.Mock()
location.assign_value = mock.Mock()
location.wait_until_element = mock.Mock()
location._configure_location = mock.Mock()
location.select = mock.Mock()
configure_arguments = {
arg: arg for arg in
'capsules all_capsules domains hostgroups medias organizations '
'envs ptables resources select subnets templates users params '
'select'.split()
}
location.create('foo', 'parent', **configure_arguments)
click_calls = [
mock.call(locators['location.new']),
mock.call(common_locators['submit']),
mock.call(locators['location.proceed_to_edit']),
mock.call(common_locators['submit'])
]
self.assertEqual(4, location.click.call_count)
location.click.assert_has_calls(click_calls, any_order=False)
location.assign_value.assert_called_once_with(
locators['location.name'], 'foo')
# called only if parent is not None
location.select.assert_called_once_with(
locators['location.parent'], 'parent'
)
location._configure_location.assert_called_once_with(
**configure_arguments)
| sghai/robottelo | tests/robottelo/ui/test_location.py | Python | gpl-3.0 | 2,887 |
#!/usr/bin/env python
# coding: utf-8
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options] [path...]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
Perforce
CVS
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import BaseHTTPServer
import ConfigParser
import cookielib
import errno
import fnmatch
import getpass
import logging
import marshal
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
import webbrowser
# The md5 module was deprecated in Python 2.5.
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import readline
except ImportError:
pass
try:
import keyring
except ImportError:
keyring = None
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# The account type used for authentication.
# This line could be changed by the review server (see handler for
# upload.py).
AUTH_ACCOUNT_TYPE = "GOOGLE"
# URL of the default review server. As for AUTH_ACCOUNT_TYPE, this line could be
# changed by the review server (see handler for upload.py).
DEFAULT_REVIEW_SERVER = "codereview.appspot.com"
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
# Constants for version control names. Used by GuessVCSName.
VCS_GIT = "Git"
VCS_MERCURIAL = "Mercurial"
VCS_SUBVERSION = "Subversion"
VCS_PERFORCE = "Perforce"
VCS_CVS = "CVS"
VCS_UNKNOWN = "Unknown"
VCS_ABBREVIATIONS = {
VCS_MERCURIAL.lower(): VCS_MERCURIAL,
"hg": VCS_MERCURIAL,
VCS_SUBVERSION.lower(): VCS_SUBVERSION,
"svn": VCS_SUBVERSION,
VCS_PERFORCE.lower(): VCS_PERFORCE,
"p4": VCS_PERFORCE,
VCS_GIT.lower(): VCS_GIT,
VCS_CVS.lower(): VCS_CVS,
}
# OAuth 2.0-Related Constants
LOCALHOST_IP = '127.0.0.1'
DEFAULT_OAUTH2_PORT = 8001
ACCESS_TOKEN_PARAM = 'access_token'
OAUTH_PATH = '/get-access-token'
OAUTH_PATH_PORT_TEMPLATE = OAUTH_PATH + '?port=%(port)d'
AUTH_HANDLER_RESPONSE = """\
<html>
<head>
<title>Authentication Status</title>
</head>
<body>
<p>The authentication flow has completed.</p>
</body>
</html>
"""
# Borrowed from google-api-python-client
OPEN_LOCAL_MESSAGE_TEMPLATE = """\
Your browser has been opened to visit:
%s
If your browser is on a different machine then exit and re-run
upload.py with the command-line parameter
--no_oauth2_webbrowser
"""
NO_OPEN_LOCAL_MESSAGE_TEMPLATE = """\
Go to the following link in your browser:
%s
and copy the access token.
"""
# The result of parsing Subversion's [auto-props] setting.
svn_auto_props_map = None
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self._reason = args["Error"]
self.info = args.get("Info", None)
@property
def reason(self):
# reason is a property on python 2.7 but a member variable on <=2.6.
# self.args is modified so it cannot be used as-is so save the value in
# self._reason.
return self._reason
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None,
extra_headers=None, save_cookies=False,
account_type=AUTH_ACCOUNT_TYPE):
"""Creates a new AbstractRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
account_type: Account type used for authentication. Defaults to
AUTH_ACCOUNT_TYPE.
"""
self.host = host
if (not self.host.startswith("http://") and
not self.host.startswith("https://")):
self.host = "http://" + self.host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers or {}
self.save_cookies = save_cookies
self.account_type = account_type
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data, headers={"Accept": "text/plain"})
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = self.account_type
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response (or a 302) and
directs us to authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
print >>sys.stderr, ''
if e.reason == "BadAuthentication":
if e.info == "InvalidSecondFactor":
print >>sys.stderr, (
"Use an application-specific password instead "
"of your regular account password.\n"
"See http://www.google.com/"
"support/accounts/bin/answer.py?answer=185833")
else:
print >>sys.stderr, "Invalid username or password."
elif e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.\n"
"If you are using a Google Apps account the URL is:\n"
"https://www.google.com/a/yourdomain.com/UnlockCaptcha")
elif e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
elif e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
elif e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
elif e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
elif e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
elif e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
else:
# Unknown error.
raise
print >>sys.stderr, ''
continue
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
extra_headers=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
extra_headers: Dict containing additional HTTP headers that should be
included in the request (string header names mapped to their values),
or None to not include any additional headers.
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
if extra_headers:
for header, value in extra_headers.items():
req.add_header(header, value)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401 or e.code == 302:
self._Authenticate()
elif e.code == 301:
# Handle permanent redirect manually.
url = e.info()["location"]
url_loc = urlparse.urlparse(url)
self.host = '%s://%s' % (url_loc[0], url_loc[1])
elif e.code >= 500:
ErrorExit(e.read())
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
if isinstance(self.auth_function, OAuth2Creds):
access_token = self.auth_function()
if access_token is not None:
self.extra_headers['Authorization'] = 'OAuth %s' % (access_token,)
self.authenticated = True
else:
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
class CondensedHelpFormatter(optparse.IndentedHelpFormatter):
"""Frees more horizontal space by removing indentation from group
options and collapsing arguments between short and long, e.g.
'-o ARG, --opt=ARG' to -o --opt ARG"""
def format_heading(self, heading):
return "%s:\n" % heading
def format_option(self, option):
self.dedent()
res = optparse.HelpFormatter.format_option(self, option)
self.indent()
return res
def format_option_strings(self, option):
self.set_long_opt_delimiter(" ")
optstr = optparse.HelpFormatter.format_option_strings(self, option)
optlist = optstr.split(", ")
if len(optlist) > 1:
if option.takes_value():
# strip METAVAR from all but the last option
optlist = [x.split()[0] for x in optlist[:-1]] + optlist[-1:]
optstr = " ".join(optlist)
return optstr
parser = optparse.OptionParser(
usage=("%prog [options] [-- diff_options] [path...]\n"
"See also: http://code.google.com/p/rietveld/wiki/UploadPyUsage"),
add_help_option=False,
formatter=CondensedHelpFormatter()
)
parser.add_option("-h", "--help", action="store_true",
help="Show this help message and exit.")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs.")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
group.add_option("--print_diffs", dest="print_diffs", action="store_true",
help="Print full diffs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default=DEFAULT_REVIEW_SERVER,
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to '%default'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
group.add_option("--oauth2", action="store_true",
dest="use_oauth2", default=False,
help="Use OAuth 2.0 instead of a password.")
group.add_option("--oauth2_port", action="store", type="int",
dest="oauth2_port", default=DEFAULT_OAUTH2_PORT,
help=("Port to use to handle OAuth 2.0 redirect. Must be an "
"integer in the range 1024-49151, defaults to "
"'%default'."))
group.add_option("--no_oauth2_webbrowser", action="store_false",
dest="open_oauth2_local_webbrowser", default=True,
help="Don't open a browser window to get an access token.")
group.add_option("--account_type", action="store", dest="account_type",
metavar="TYPE", default=AUTH_ACCOUNT_TYPE,
choices=["GOOGLE", "HOSTED"],
help=("Override the default account type "
"(defaults to '%default', "
"valid choices are 'GOOGLE' and 'HOSTED')."))
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-t", "--title", action="store", dest="title",
help="New issue subject or new patch set title")
group.add_option("-m", "--message", action="store", dest="message",
default=None,
help="New issue description or new patch set message")
group.add_option("-F", "--file", action="store", dest="file",
default=None, help="Read the message above from file.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
group.add_option("--private", action="store_true", dest="private",
default=False,
help="Make the issue restricted to reviewers and those CCed")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--base_url", action="store", dest="base_url", default=None,
help="Base URL path for files (listed as \"Base URL\" when "
"viewing issue). If omitted, will be guessed automatically "
"for SVN repos and left blank for others.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Base revision/branch/tree to diff against. Use "
"rev1:rev2 range to review already committed changeset.")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
group.add_option("-p", "--send_patch", action="store_true",
dest="send_patch", default=False,
help="Same as --send_mail, but include diff as an "
"attachment, and prepend email subject with 'PATCH:'.")
group.add_option("--vcs", action="store", dest="vcs",
metavar="VCS", default=None,
help=("Version control system (optional, usually upload.py "
"already guesses the right VCS)."))
group.add_option("--emulate_svn_auto_props", action="store_true",
dest="emulate_svn_auto_props", default=False,
help=("Emulate Subversion's auto properties feature."))
# Git-specific
group = parser.add_option_group("Git-specific options")
group.add_option("--git_similarity", action="store", dest="git_similarity",
metavar="SIM", type="int", default=50,
help=("Set the minimum similarity index for detecting renames "
"and copies. See `git diff -C`. (default 50)."))
group.add_option("--git_no_find_copies", action="store_false", default=True,
dest="git_find_copies",
help=("Prevents git from looking for copies (default off)."))
# Perforce-specific
group = parser.add_option_group("Perforce-specific options "
"(overrides P4 environment variables)")
group.add_option("--p4_port", action="store", dest="p4_port",
metavar="P4_PORT", default=None,
help=("Perforce server and port (optional)"))
group.add_option("--p4_changelist", action="store", dest="p4_changelist",
metavar="P4_CHANGELIST", default=None,
help=("Perforce changelist id"))
group.add_option("--p4_client", action="store", dest="p4_client",
metavar="P4_CLIENT", default=None,
help=("Perforce client/workspace"))
group.add_option("--p4_user", action="store", dest="p4_user",
metavar="P4_USER", default=None,
help=("Perforce user"))
# OAuth 2.0 Methods and Helpers
class ClientRedirectServer(BaseHTTPServer.HTTPServer):
"""A server for redirects back to localhost from the associated server.
Waits for a single request and parses the query parameters for an access token
and then stops serving.
"""
access_token = None
class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler for redirects back to localhost from the associated server.
Waits for a single request and parses the query parameters into the server's
access_token and then stops serving.
"""
def SetAccessToken(self):
"""Stores the access token from the request on the server.
Will only do this if exactly one query parameter was passed in to the
request and that query parameter used 'access_token' as the key.
"""
query_string = urlparse.urlparse(self.path).query
query_params = urlparse.parse_qs(query_string)
if len(query_params) == 1:
access_token_list = query_params.get(ACCESS_TOKEN_PARAM, [])
if len(access_token_list) == 1:
self.server.access_token = access_token_list[0]
def do_GET(self):
"""Handle a GET request.
Parses and saves the query parameters and prints a message that the server
has completed its lone task (handling a redirect).
Note that we can't detect if an error occurred.
"""
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.SetAccessToken()
self.wfile.write(AUTH_HANDLER_RESPONSE)
def log_message(self, format, *args):
"""Do not log messages to stdout while running as command line program."""
pass
def OpenOAuth2ConsentPage(server=DEFAULT_REVIEW_SERVER,
port=DEFAULT_OAUTH2_PORT):
"""Opens the OAuth 2.0 consent page or prints instructions how to.
Uses the webbrowser module to open the OAuth server side page in a browser.
Args:
server: String containing the review server URL. Defaults to
DEFAULT_REVIEW_SERVER.
port: Integer, the port where the localhost server receiving the redirect
is serving. Defaults to DEFAULT_OAUTH2_PORT.
"""
path = OAUTH_PATH_PORT_TEMPLATE % {'port': port}
parsed_url = urlparse.urlparse(server)
scheme = parsed_url[0] or 'https'
if scheme != 'https':
ErrorExit('Using OAuth requires a review server with SSL enabled.')
# If no scheme was given on command line the server address ends up in
# parsed_url.path otherwise in netloc.
host = parsed_url[1] or parsed_url[2]
page = '%s://%s%s' % (scheme, host, path)
webbrowser.open(page, new=1, autoraise=True)
print OPEN_LOCAL_MESSAGE_TEMPLATE % (page,)
def WaitForAccessToken(port=DEFAULT_OAUTH2_PORT):
"""Spins up a simple HTTP Server to handle a single request.
Intended to handle a single redirect from the production server after the
user authenticated via OAuth 2.0 with the server.
Args:
port: Integer, the port where the localhost server receiving the redirect
is serving. Defaults to DEFAULT_OAUTH2_PORT.
Returns:
The access token passed to the localhost server, or None if no access token
was passed.
"""
httpd = ClientRedirectServer((LOCALHOST_IP, port), ClientRedirectHandler)
# Wait to serve just one request before deferring control back
# to the caller of wait_for_refresh_token
httpd.handle_request()
return httpd.access_token
def GetAccessToken(server=DEFAULT_REVIEW_SERVER, port=DEFAULT_OAUTH2_PORT,
open_local_webbrowser=True):
"""Gets an Access Token for the current user.
Args:
server: String containing the review server URL. Defaults to
DEFAULT_REVIEW_SERVER.
port: Integer, the port where the localhost server receiving the redirect
is serving. Defaults to DEFAULT_OAUTH2_PORT.
open_local_webbrowser: Boolean, defaults to True. If set, opens a page in
the user's browser.
Returns:
A string access token that was sent to the local server. If the serving page
via WaitForAccessToken does not receive an access token, this method
returns None.
"""
access_token = None
if open_local_webbrowser:
OpenOAuth2ConsentPage(server=server, port=port)
try:
access_token = WaitForAccessToken(port=port)
except socket.error, e:
print 'Can\'t start local webserver. Socket Error: %s\n' % (e.strerror,)
if access_token is None:
# TODO(dhermes): Offer to add to clipboard using xsel, xclip, pbcopy, etc.
page = 'https://%s%s' % (server, OAUTH_PATH)
print NO_OPEN_LOCAL_MESSAGE_TEMPLATE % (page,)
access_token = raw_input('Enter access token: ').strip()
return access_token
class KeyringCreds(object):
def __init__(self, server, host, email):
self.server = server
# Explicitly cast host to str to work around bug in old versions of Keyring
# (versions before 0.10). Even though newer versions of Keyring fix this,
# some modern linuxes (such as Ubuntu 12.04) still bundle a version with
# the bug.
self.host = str(host)
self.email = email
self.accounts_seen = set()
def GetUserCredentials(self):
"""Prompts the user for a username and password.
Only use keyring on the initial call. If the keyring contains the wrong
password, we want to give the user a chance to enter another one.
"""
# Create a local alias to the email variable to avoid Python's crazy
# scoping rules.
global keyring
email = self.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % self.server)
password = None
if keyring and not email in self.accounts_seen:
try:
password = keyring.get_password(self.host, email)
except:
# Sadly, we have to trap all errors here as
# gnomekeyring.IOError inherits from object. :/
print "Failed to get password from keyring"
keyring = None
if password is not None:
print "Using password from system keyring."
self.accounts_seen.add(email)
else:
password = getpass.getpass("Password for %s: " % email)
if keyring:
answer = raw_input("Store password in system keyring?(y/N) ").strip()
if answer == "y":
keyring.set_password(self.host, email, password)
self.accounts_seen.add(email)
return (email, password)
class OAuth2Creds(object):
"""Simple object to hold server and port to be passed to GetAccessToken."""
def __init__(self, server, port, open_local_webbrowser=True):
self.server = server
self.port = port
self.open_local_webbrowser = open_local_webbrowser
def __call__(self):
"""Uses stored server and port to retrieve OAuth 2.0 access token."""
return GetAccessToken(server=self.server, port=self.port,
open_local_webbrowser=self.open_local_webbrowser)
def GetRpcServer(server, email=None, host_override=None, save_cookies=True,
account_type=AUTH_ACCOUNT_TYPE, use_oauth2=False,
oauth2_port=DEFAULT_OAUTH2_PORT,
open_oauth2_local_webbrowser=True):
"""Returns an instance of an AbstractRpcServer.
Args:
server: String containing the review server URL.
email: String containing user's email address.
host_override: If not None, string containing an alternate hostname to use
in the host header.
save_cookies: Whether authentication cookies should be saved to disk.
account_type: Account type for authentication, either 'GOOGLE'
or 'HOSTED'. Defaults to AUTH_ACCOUNT_TYPE.
use_oauth2: Boolean indicating whether OAuth 2.0 should be used for
authentication.
oauth2_port: Integer, the port where the localhost server receiving the
redirect is serving. Defaults to DEFAULT_OAUTH2_PORT.
open_oauth2_local_webbrowser: Boolean, defaults to True. If True and using
OAuth, this opens a page in the user's browser to obtain a token.
Returns:
A new HttpRpcServer, on which RPC calls can be made.
"""
# If this is the dev_appserver, use fake authentication.
host = (host_override or server).lower()
if re.match(r'(http://)?localhost([:/]|$)', host):
if email is None:
email = "[email protected]"
logging.info("Using debug user %s. Override with --email" % email)
server = HttpRpcServer(
server,
lambda: (email, "password"),
host_override=host_override,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=save_cookies,
account_type=account_type)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
positional_args = [server]
if use_oauth2:
positional_args.append(
OAuth2Creds(server, oauth2_port, open_oauth2_local_webbrowser))
else:
positional_args.append(KeyringCreds(server, host, email).GetUserCredentials)
return HttpRpcServer(*positional_args,
host_override=host_override,
save_cookies=save_cookies,
account_type=account_type)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
if isinstance(value, unicode):
value = value.encode('utf-8')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
if isinstance(value, unicode):
value = value.encode('utf-8')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCodeAndStderr(command, print_output=False,
universal_newlines=True,
env=os.environ):
"""Executes a command and returns the output from stdout, stderr and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (stdout, stderr, return code)
"""
logging.info("Running %s", command)
env = env.copy()
env['LC_MESSAGES'] = 'C'
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines,
env=env)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, errout, p.returncode
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True,
env=os.environ):
"""Executes a command and returns the output from stdout and the return code."""
out, err, retcode = RunShellWithReturnCodeAndStderr(command, print_output,
universal_newlines, env)
return out, retcode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, env=os.environ):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines, env)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GetGUID(self):
"""Return string to distinguish the repository from others, for example to
query all opened review issues for it"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def PostProcessDiff(self, diff):
"""Return the diff with any special post processing this VCS needs, e.g.
to include an svn-style "Index:"."""
return diff
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
def IsBinaryData(self, data):
"""Returns true if data contains a null byte."""
# Derived from how Mercurial's heuristic, see
# http://selenic.com/hg/file/848a6658069e/mercurial/util.py#l229
return bool(data and "\0" in data)
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# Base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GetGUID(self):
return self._GetInfo("Repository UUID")
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns base URL for current diff.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
url = self._GetInfo("URL")
if url:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
guess = ""
# TODO(anatoli) - repository specific hacks should be handled by server
if netloc == "svn.python.org" and scheme == "svn+ssh":
path = "projects" + path
scheme = "http"
guess = "Python "
elif netloc.endswith(".googlecode.com"):
scheme = "http"
guess = "Google Code "
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed %sbase = %s", guess, base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def _GetInfo(self, key):
"""Parses 'svn info' for current dir. Returns value for key or None"""
for line in RunShell(["svn", "info"]).splitlines():
if line.startswith(key + ": "):
return line.split(":", 1)[1].strip()
def _EscapeFilename(self, filename):
"""Escapes filename for SVN commands."""
if "@" in filename and not filename.endswith("@"):
filename = "%s@" % filename
return filename
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals",
self._EscapeFilename(filename)])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start,
self._EscapeFilename(dirname) or "."]
out, err, returncode = RunShellWithReturnCodeAndStderr(cmd)
if returncode:
# Directory might not yet exist at start revison
# svn: Unable to find repository location for 'abc' in revision nnn
if re.match('^svn: Unable to find repository location for .+ in revision \d+', err):
old_files = ()
else:
ErrorExit("Failed to get status for %s:\n%s" % (filename, err))
else:
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [self._EscapeFilename(dirname) or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type",
self._EscapeFilename(filename)], silent_ok=True)
base_content = ""
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if is_binary:
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
# filename must not be escaped. We already add an ampersand here.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
else:
mimetype = mimetype.strip()
get_base = False
# this test for binary is exactly the test prescribed by the
# official SVN docs at
# http://subversion.apache.org/faq.html#binary-files
is_binary = (bool(mimetype) and
not mimetype.startswith("text/") and
mimetype not in ("image/x-xbitmap", "image/x-xpixmap"))
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content, ret_code = RunShellWithReturnCode(
["svn", "cat", self._EscapeFilename(filename)],
universal_newlines=universal_newlines)
if ret_code and status[0] == "R":
# It's a replaced file without local history (see issue208).
# The base file needs to be fetched from the server.
url = "%s/%s" % (self.svn_base, filename)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
elif ret_code:
ErrorExit("Got error status from 'svn cat %s'" % filename)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> (hash before, hash after) of base file.
# Hashes for "no such file" are represented as None.
self.hashes = {}
# Map of new filename -> old filename for renames.
self.renames = {}
def GetGUID(self):
revlist = RunShell("git rev-list --parents HEAD".split()).splitlines()
# M-A: Return the 1st root hash, there could be multiple when a
# subtree is merged. In that case, more analysis would need to
# be done to figure out which HEAD is the 'most representative'.
for r in revlist:
if ' ' not in r:
return r
def PostProcessDiff(self, gitdiff):
"""Converts the diff output to include an svn-style "Index:" line as well
as record the hashes of the files, so we can upload them along with our
diff."""
# Special used by git to indicate "no such content".
NULL_HASH = "0"*40
def IsFileNew(filename):
return filename in self.hashes and self.hashes[filename][0] is None
def AddSubversionPropertyChange(filename):
"""Add svn's property change information into the patch if given file is
new file.
We use Subversion's auto-props setting to retrieve its property.
See http://svnbook.red-bean.com/en/1.1/ch07.html#svn-ch-7-sect-1.3.2 for
Subversion's [auto-props] setting.
"""
if self.options.emulate_svn_auto_props and IsFileNew(filename):
svnprops = GetSubversionPropertyChanges(filename)
if svnprops:
svndiff.append("\n" + svnprops + "\n")
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/(.*)$", line)
if match:
# Add auto property here for previously seen file.
if filename is not None:
AddSubversionPropertyChange(filename)
filecount += 1
# Intentionally use the "after" filename so we can show renames.
filename = match.group(2)
svndiff.append("Index: %s\n" % filename)
if match.group(1) != match.group(2):
self.renames[match.group(2)] = match.group(1)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.(\w+)", line)
if match:
before, after = (match.group(1), match.group(2))
if before == NULL_HASH:
before = None
if after == NULL_HASH:
after = None
self.hashes[filename] = (before, after)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
# Add auto property for the last seen file.
assert filename is not None
AddSubversionPropertyChange(filename)
return "".join(svndiff)
def GenerateDiff(self, extra_args):
extra_args = extra_args[:]
if self.options.revision:
if ":" in self.options.revision:
extra_args = self.options.revision.split(":", 1) + extra_args
else:
extra_args = [self.options.revision] + extra_args
# --no-ext-diff is broken in some versions of Git, so try to work around
# this by overriding the environment (but there is still a problem if the
# git config key "diff.external" is used).
env = os.environ.copy()
if "GIT_EXTERNAL_DIFF" in env:
del env["GIT_EXTERNAL_DIFF"]
# -M/-C will not print the diff for the deleted file when a file is renamed.
# This is confusing because the original file will not be shown on the
# review when a file is renamed. So, get a diff with ONLY deletes, then
# append a diff (with rename detection), without deletes.
cmd = [
"git", "diff", "--no-color", "--no-ext-diff", "--full-index",
"--ignore-submodules",
]
diff = RunShell(
cmd + ["--no-renames", "--diff-filter=D"] + extra_args,
env=env, silent_ok=True)
if self.options.git_find_copies:
similarity_options = ["--find-copies-harder", "-l100000",
"-C%s" % self.options.git_similarity ]
else:
similarity_options = ["-M%s" % self.options.git_similarity ]
diff += RunShell(
cmd + ["--diff-filter=AMCRT"] + similarity_options + extra_args,
env=env, silent_ok=True)
# The CL could be only file deletion or not. So accept silent diff for both
# commands then check for an empty diff manually.
if not diff:
ErrorExit("No output from %s" % (cmd + extra_args))
return diff
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetFileContent(self, file_hash, is_binary):
"""Returns the content of a file identified by its git hash."""
data, retcode = RunShellWithReturnCode(["git", "show", file_hash],
universal_newlines=not is_binary)
if retcode:
ErrorExit("Got error status from 'git show %s'" % file_hash)
return data
def GetBaseFile(self, filename):
hash_before, hash_after = self.hashes.get(filename, (None,None))
base_content = None
new_content = None
status = None
if filename in self.renames:
status = "A +" # Match svn attribute name for renames.
if filename not in self.hashes:
# If a rename doesn't change the content, we never get a hash.
base_content = RunShell(
["git", "show", "HEAD:" + filename], silent_ok=True)
elif not hash_before:
status = "A"
base_content = ""
elif not hash_after:
status = "D"
else:
status = "M"
is_image = self.IsImage(filename)
is_binary = self.IsBinaryData(base_content) or is_image
# Grab the before/after content if we need it.
# Grab the base content if we don't have it already.
if base_content is None and hash_before:
base_content = self.GetFileContent(hash_before, is_binary)
# Only include the "after" file if it's an image; otherwise it
# it is reconstructed from the diff.
if is_image and hash_after:
new_content = self.GetFileContent(hash_after, is_binary)
return (base_content, new_content, is_binary, status)
class CVSVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for CVS."""
def __init__(self, options):
super(CVSVCS, self).__init__(options)
def GetGUID(self):
"""For now we don't know how to get repository ID for CVS"""
return
def GetOriginalContent_(self, filename):
RunShell(["cvs", "up", filename], silent_ok=True)
# TODO need detect file content encoding
content = open(filename).read()
return content.replace("\r\n", "\n")
def GetBaseFile(self, filename):
base_content = None
new_content = None
status = "A"
output, retcode = RunShellWithReturnCode(["cvs", "status", filename])
if retcode:
ErrorExit("Got error status from 'cvs status %s'" % filename)
if output.find("Status: Locally Modified") != -1:
status = "M"
temp_filename = "%s.tmp123" % filename
os.rename(filename, temp_filename)
base_content = self.GetOriginalContent_(filename)
os.rename(temp_filename, filename)
elif output.find("Status: Locally Added"):
status = "A"
base_content = ""
elif output.find("Status: Needs Checkout"):
status = "D"
base_content = self.GetOriginalContent_(filename)
return (base_content, new_content, self.IsBinaryData(base_content), status)
def GenerateDiff(self, extra_args):
cmd = ["cvs", "diff", "-u", "-N"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(extra_args)
data, retcode = RunShellWithReturnCode(cmd)
count = 0
if retcode in [0, 1]:
for line in data.splitlines():
if line.startswith("Index:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from cvs diff")
return data
def GetUnknownFiles(self):
data, retcode = RunShellWithReturnCode(["cvs", "diff"])
if retcode not in [0, 1]:
ErrorExit("Got error status from 'cvs diff':\n%s" % (data,))
unknown_files = []
for line in data.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def GetGUID(self):
# See chapter "Uniquely identifying a repository"
# http://hgbook.red-bean.com/read/customizing-the-output-of-mercurial.html
info = RunShell("hg log -r0 --template {node}".split())
return info.strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
absname = os.path.join(self.repo_dir, filename)
return os.path.relpath(absname)
def GenerateDiff(self, extra_args):
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir,
# but "hg diff" has given us the path relative to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
status, _ = out[0].split(' ', 1)
if len(out) > 1 and status == "A":
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
if ":" in self.base_rev:
base_rev = self.base_rev.split(":", 1)[0]
else:
base_rev = self.base_rev
if status != "A":
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True)
is_binary = self.IsBinaryData(base_content)
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or self.IsBinaryData(new_content)
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary:
new_content = None
return base_content, new_content, is_binary, status
class PerforceVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Perforce."""
def __init__(self, options):
def ConfirmLogin():
# Make sure we have a valid perforce session
while True:
data, retcode = self.RunPerforceCommandWithReturnCode(
["login", "-s"], marshal_output=True)
if not data:
ErrorExit("Error checking perforce login")
if not retcode and (not "code" in data or data["code"] != "error"):
break
print "Enter perforce password: "
self.RunPerforceCommandWithReturnCode(["login"])
super(PerforceVCS, self).__init__(options)
self.p4_changelist = options.p4_changelist
if not self.p4_changelist:
ErrorExit("A changelist id is required")
if (options.revision):
ErrorExit("--rev is not supported for perforce")
self.p4_port = options.p4_port
self.p4_client = options.p4_client
self.p4_user = options.p4_user
ConfirmLogin()
if not options.title:
description = self.RunPerforceCommand(["describe", self.p4_changelist],
marshal_output=True)
if description and "desc" in description:
# Rietveld doesn't support multi-line descriptions
raw_title = description["desc"].strip()
lines = raw_title.splitlines()
if len(lines):
options.title = lines[0]
def GetGUID(self):
"""For now we don't know how to get repository ID for Perforce"""
return
def RunPerforceCommandWithReturnCode(self, extra_args, marshal_output=False,
universal_newlines=True):
args = ["p4"]
if marshal_output:
# -G makes perforce format its output as marshalled python objects
args.extend(["-G"])
if self.p4_port:
args.extend(["-p", self.p4_port])
if self.p4_client:
args.extend(["-c", self.p4_client])
if self.p4_user:
args.extend(["-u", self.p4_user])
args.extend(extra_args)
data, retcode = RunShellWithReturnCode(
args, print_output=False, universal_newlines=universal_newlines)
if marshal_output and data:
data = marshal.loads(data)
return data, retcode
def RunPerforceCommand(self, extra_args, marshal_output=False,
universal_newlines=True):
# This might be a good place to cache call results, since things like
# describe or fstat might get called repeatedly.
data, retcode = self.RunPerforceCommandWithReturnCode(
extra_args, marshal_output, universal_newlines)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (extra_args, data))
return data
def GetFileProperties(self, property_key_prefix = "", command = "describe"):
description = self.RunPerforceCommand(["describe", self.p4_changelist],
marshal_output=True)
changed_files = {}
file_index = 0
# Try depotFile0, depotFile1, ... until we don't find a match
while True:
file_key = "depotFile%d" % file_index
if file_key in description:
filename = description[file_key]
change_type = description[property_key_prefix + str(file_index)]
changed_files[filename] = change_type
file_index += 1
else:
break
return changed_files
def GetChangedFiles(self):
return self.GetFileProperties("action")
def GetUnknownFiles(self):
# Perforce doesn't detect new files, they have to be explicitly added
return []
def IsBaseBinary(self, filename):
base_filename = self.GetBaseFilename(filename)
return self.IsBinaryHelper(base_filename, "files")
def IsPendingBinary(self, filename):
return self.IsBinaryHelper(filename, "describe")
def IsBinaryHelper(self, filename, command):
file_types = self.GetFileProperties("type", command)
if not filename in file_types:
ErrorExit("Trying to check binary status of unknown file %s." % filename)
# This treats symlinks, macintosh resource files, temporary objects, and
# unicode as binary. See the Perforce docs for more details:
# http://www.perforce.com/perforce/doc.current/manuals/cmdref/o.ftypes.html
return not file_types[filename].endswith("text")
def GetFileContent(self, filename, revision, is_binary):
file_arg = filename
if revision:
file_arg += "#" + revision
# -q suppresses the initial line that displays the filename and revision
return self.RunPerforceCommand(["print", "-q", file_arg],
universal_newlines=not is_binary)
def GetBaseFilename(self, filename):
actionsWithDifferentBases = [
"move/add", # p4 move
"branch", # p4 integrate (to a new file), similar to hg "add"
"add", # p4 integrate (to a new file), after modifying the new file
]
# We only see a different base for "add" if this is a downgraded branch
# after a file was branched (integrated), then edited.
if self.GetAction(filename) in actionsWithDifferentBases:
# -Or shows information about pending integrations/moves
fstat_result = self.RunPerforceCommand(["fstat", "-Or", filename],
marshal_output=True)
baseFileKey = "resolveFromFile0" # I think it's safe to use only file0
if baseFileKey in fstat_result:
return fstat_result[baseFileKey]
return filename
def GetBaseRevision(self, filename):
base_filename = self.GetBaseFilename(filename)
have_result = self.RunPerforceCommand(["have", base_filename],
marshal_output=True)
if "haveRev" in have_result:
return have_result["haveRev"]
def GetLocalFilename(self, filename):
where = self.RunPerforceCommand(["where", filename], marshal_output=True)
if "path" in where:
return where["path"]
def GenerateDiff(self, args):
class DiffData:
def __init__(self, perforceVCS, filename, action):
self.perforceVCS = perforceVCS
self.filename = filename
self.action = action
self.base_filename = perforceVCS.GetBaseFilename(filename)
self.file_body = None
self.base_rev = None
self.prefix = None
self.working_copy = True
self.change_summary = None
def GenerateDiffHeader(diffData):
header = []
header.append("Index: %s" % diffData.filename)
header.append("=" * 67)
if diffData.base_filename != diffData.filename:
if diffData.action.startswith("move"):
verb = "rename"
else:
verb = "copy"
header.append("%s from %s" % (verb, diffData.base_filename))
header.append("%s to %s" % (verb, diffData.filename))
suffix = "\t(revision %s)" % diffData.base_rev
header.append("--- " + diffData.base_filename + suffix)
if diffData.working_copy:
suffix = "\t(working copy)"
header.append("+++ " + diffData.filename + suffix)
if diffData.change_summary:
header.append(diffData.change_summary)
return header
def GenerateMergeDiff(diffData, args):
# -du generates a unified diff, which is nearly svn format
diffData.file_body = self.RunPerforceCommand(
["diff", "-du", diffData.filename] + args)
diffData.base_rev = self.GetBaseRevision(diffData.filename)
diffData.prefix = ""
# We have to replace p4's file status output (the lines starting
# with +++ or ---) to match svn's diff format
lines = diffData.file_body.splitlines()
first_good_line = 0
while (first_good_line < len(lines) and
not lines[first_good_line].startswith("@@")):
first_good_line += 1
diffData.file_body = "\n".join(lines[first_good_line:])
return diffData
def GenerateAddDiff(diffData):
fstat = self.RunPerforceCommand(["fstat", diffData.filename],
marshal_output=True)
if "headRev" in fstat:
diffData.base_rev = fstat["headRev"] # Re-adding a deleted file
else:
diffData.base_rev = "0" # Brand new file
diffData.working_copy = False
rel_path = self.GetLocalFilename(diffData.filename)
diffData.file_body = open(rel_path, 'r').read()
# Replicate svn's list of changed lines
line_count = len(diffData.file_body.splitlines())
diffData.change_summary = "@@ -0,0 +1"
if line_count > 1:
diffData.change_summary += ",%d" % line_count
diffData.change_summary += " @@"
diffData.prefix = "+"
return diffData
def GenerateDeleteDiff(diffData):
diffData.base_rev = self.GetBaseRevision(diffData.filename)
is_base_binary = self.IsBaseBinary(diffData.filename)
# For deletes, base_filename == filename
diffData.file_body = self.GetFileContent(diffData.base_filename,
None,
is_base_binary)
# Replicate svn's list of changed lines
line_count = len(diffData.file_body.splitlines())
diffData.change_summary = "@@ -1"
if line_count > 1:
diffData.change_summary += ",%d" % line_count
diffData.change_summary += " +0,0 @@"
diffData.prefix = "-"
return diffData
changed_files = self.GetChangedFiles()
svndiff = []
filecount = 0
for (filename, action) in changed_files.items():
svn_status = self.PerforceActionToSvnStatus(action)
if svn_status == "SKIP":
continue
diffData = DiffData(self, filename, action)
# Is it possible to diff a branched file? Stackoverflow says no:
# http://stackoverflow.com/questions/1771314/in-perforce-command-line-how-to-diff-a-file-reopened-for-add
if svn_status == "M":
diffData = GenerateMergeDiff(diffData, args)
elif svn_status == "A":
diffData = GenerateAddDiff(diffData)
elif svn_status == "D":
diffData = GenerateDeleteDiff(diffData)
else:
ErrorExit("Unknown file action %s (svn action %s)." % \
(action, svn_status))
svndiff += GenerateDiffHeader(diffData)
for line in diffData.file_body.splitlines():
svndiff.append(diffData.prefix + line)
filecount += 1
if not filecount:
ErrorExit("No valid patches found in output from p4 diff")
return "\n".join(svndiff) + "\n"
def PerforceActionToSvnStatus(self, status):
# Mirroring the list at http://permalink.gmane.org/gmane.comp.version-control.mercurial.devel/28717
# Is there something more official?
return {
"add" : "A",
"branch" : "A",
"delete" : "D",
"edit" : "M", # Also includes changing file types.
"integrate" : "M",
"move/add" : "M",
"move/delete": "SKIP",
"purge" : "D", # How does a file's status become "purge"?
}[status]
def GetAction(self, filename):
changed_files = self.GetChangedFiles()
if not filename in changed_files:
ErrorExit("Trying to get base version of unknown file %s." % filename)
return changed_files[filename]
def GetBaseFile(self, filename):
base_filename = self.GetBaseFilename(filename)
base_content = ""
new_content = None
status = self.PerforceActionToSvnStatus(self.GetAction(filename))
if status != "A":
revision = self.GetBaseRevision(base_filename)
if not revision:
ErrorExit("Couldn't find base revision for file %s" % filename)
is_base_binary = self.IsBaseBinary(base_filename)
base_content = self.GetFileContent(base_filename,
revision,
is_base_binary)
is_binary = self.IsPendingBinary(filename)
if status != "D" and status != "SKIP":
relpath = self.GetLocalFilename(filename)
if is_binary:
new_content = open(relpath, "rb").read()
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCSName(options):
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an string indicating which VCS is detected.
Returns:
A pair (vcs, output). vcs is a string indicating which VCS was detected
and is one of VCS_GIT, VCS_MERCURIAL, VCS_SUBVERSION, VCS_PERFORCE,
VCS_CVS, or VCS_UNKNOWN.
Since local perforce repositories can't be easily detected, this method
will only guess VCS_PERFORCE if any perforce options have been specified.
output is a string containing any interesting output from the vcs
detection routine, or None if there is nothing interesting.
"""
for attribute, value in options.__dict__.iteritems():
if attribute.startswith("p4") and value != None:
return (VCS_PERFORCE, None)
def RunDetectCommand(vcs_type, command):
"""Helper to detect VCS by executing command.
Returns:
A pair (vcs, output) or None. Throws exception on error.
"""
try:
out, returncode = RunShellWithReturnCode(command)
if returncode == 0:
return (vcs_type, out.strip())
except OSError, (errcode, message):
if errcode != errno.ENOENT: # command not found code
raise
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
res = RunDetectCommand(VCS_MERCURIAL, ["hg", "root"])
if res != None:
return res
# Subversion from 1.7 has a single centralized .svn folder
# ( see http://subversion.apache.org/docs/release-notes/1.7.html#wc-ng )
# That's why we use 'svn info' instead of checking for .svn dir
res = RunDetectCommand(VCS_SUBVERSION, ["svn", "info"])
if res != None:
return res
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
res = RunDetectCommand(VCS_GIT, ["git", "rev-parse",
"--is-inside-work-tree"])
if res != None:
return res
# detect CVS repos use `cvs status && $? == 0` rules
res = RunDetectCommand(VCS_CVS, ["cvs", "status"])
if res != None:
return res
return (VCS_UNKNOWN, None)
def GuessVCS(options):
"""Helper to guess the version control system.
This verifies any user-specified VersionControlSystem (by command line
or environment variable). If the user didn't specify one, this examines
the current directory, guesses which VersionControlSystem we're using,
and returns an instance of the appropriate class. Exit with an error
if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
vcs = options.vcs
if not vcs:
vcs = os.environ.get("CODEREVIEW_VCS")
if vcs:
v = VCS_ABBREVIATIONS.get(vcs.lower())
if v is None:
ErrorExit("Unknown version control system %r specified." % vcs)
(vcs, extra_output) = (v, None)
else:
(vcs, extra_output) = GuessVCSName(options)
if vcs == VCS_MERCURIAL:
if extra_output is None:
extra_output = RunShell(["hg", "root"]).strip()
return MercurialVCS(options, extra_output)
elif vcs == VCS_SUBVERSION:
return SubversionVCS(options)
elif vcs == VCS_PERFORCE:
return PerforceVCS(options)
elif vcs == VCS_GIT:
return GitVCS(options)
elif vcs == VCS_CVS:
return CVSVCS(options)
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def CheckReviewer(reviewer):
"""Validate a reviewer -- either a nickname or an email addres.
Args:
reviewer: A nickname or an email address.
Calls ErrorExit() if it is an invalid email address.
"""
if "@" not in reviewer:
return # Assume nickname
parts = reviewer.split("@")
if len(parts) > 2:
ErrorExit("Invalid email address: %r" % reviewer)
assert len(parts) == 2
if "." not in parts[1]:
ErrorExit("Invalid email address: %r" % reviewer)
def LoadSubversionAutoProperties():
"""Returns the content of [auto-props] section of Subversion's config file as
a dictionary.
Returns:
A dictionary whose key-value pair corresponds the [auto-props] section's
key-value pair.
In following cases, returns empty dictionary:
- config file doesn't exist, or
- 'enable-auto-props' is not set to 'true-like-value' in [miscellany].
"""
if os.name == 'nt':
subversion_config = os.environ.get("APPDATA") + "\\Subversion\\config"
else:
subversion_config = os.path.expanduser("~/.subversion/config")
if not os.path.exists(subversion_config):
return {}
config = ConfigParser.ConfigParser()
config.read(subversion_config)
if (config.has_section("miscellany") and
config.has_option("miscellany", "enable-auto-props") and
config.getboolean("miscellany", "enable-auto-props") and
config.has_section("auto-props")):
props = {}
for file_pattern in config.options("auto-props"):
props[file_pattern] = ParseSubversionPropertyValues(
config.get("auto-props", file_pattern))
return props
else:
return {}
def ParseSubversionPropertyValues(props):
"""Parse the given property value which comes from [auto-props] section and
returns a list whose element is a (svn_prop_key, svn_prop_value) pair.
See the following doctest for example.
>>> ParseSubversionPropertyValues('svn:eol-style=LF')
[('svn:eol-style', 'LF')]
>>> ParseSubversionPropertyValues('svn:mime-type=image/jpeg')
[('svn:mime-type', 'image/jpeg')]
>>> ParseSubversionPropertyValues('svn:eol-style=LF;svn:executable')
[('svn:eol-style', 'LF'), ('svn:executable', '*')]
"""
key_value_pairs = []
for prop in props.split(";"):
key_value = prop.split("=")
assert len(key_value) <= 2
if len(key_value) == 1:
# If value is not given, use '*' as a Subversion's convention.
key_value_pairs.append((key_value[0], "*"))
else:
key_value_pairs.append((key_value[0], key_value[1]))
return key_value_pairs
def GetSubversionPropertyChanges(filename):
"""Return a Subversion's 'Property changes on ...' string, which is used in
the patch file.
Args:
filename: filename whose property might be set by [auto-props] config.
Returns:
A string like 'Property changes on |filename| ...' if given |filename|
matches any entries in [auto-props] section. None, otherwise.
"""
global svn_auto_props_map
if svn_auto_props_map is None:
svn_auto_props_map = LoadSubversionAutoProperties()
all_props = []
for file_pattern, props in svn_auto_props_map.items():
if fnmatch.fnmatch(filename, file_pattern):
all_props.extend(props)
if all_props:
return FormatSubversionPropertyChanges(filename, all_props)
return None
def FormatSubversionPropertyChanges(filename, props):
"""Returns Subversion's 'Property changes on ...' strings using given filename
and properties.
Args:
filename: filename
props: A list whose element is a (svn_prop_key, svn_prop_value) pair.
Returns:
A string which can be used in the patch file for Subversion.
See the following doctest for example.
>>> print FormatSubversionPropertyChanges('foo.cc', [('svn:eol-style', 'LF')])
Property changes on: foo.cc
___________________________________________________________________
Added: svn:eol-style
+ LF
<BLANKLINE>
"""
prop_changes_lines = [
"Property changes on: %s" % filename,
"___________________________________________________________________"]
for key, value in props:
prop_changes_lines.append("Added: " + key)
prop_changes_lines.append(" + " + value)
return "\n".join(prop_changes_lines) + "\n"
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
options, args = parser.parse_args(argv[1:])
if options.help:
if options.verbose < 2:
# hide Perforce options
parser.epilog = (
"Use '--help -v' to show additional Perforce options. "
"For more help, see "
"http://code.google.com/p/rietveld/wiki/CodeReviewHelp"
)
parser.option_groups.remove(parser.get_option_group('--p4_port'))
parser.print_help()
sys.exit(0)
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
base = options.base_url
if isinstance(vcs, SubversionVCS):
# Guessing the base field is only supported for Subversion.
# Note: Fetching base files may become deprecated in future releases.
guessed_base = vcs.GuessBase(options.download_base)
if base:
if guessed_base and base != guessed_base:
print "Using base URL \"%s\" from --base_url instead of \"%s\"" % \
(base, guessed_base)
else:
base = guessed_base
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
data = vcs.PostProcessDiff(data)
if options.print_diffs:
print "Rietveld diff start:*****"
print data
print "Rietveld diff end:*****"
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.use_oauth2:
options.save_cookies = False
rpc_server = GetRpcServer(options.server,
options.email,
options.host,
options.save_cookies,
options.account_type,
options.use_oauth2,
options.oauth2_port,
options.open_oauth2_local_webbrowser)
form_fields = []
repo_guid = vcs.GetGUID()
if repo_guid:
form_fields.append(("repo_guid", repo_guid))
if base:
b = urlparse.urlparse(base)
username, netloc = urllib.splituser(b.netloc)
if username:
logging.info("Removed username from base URL")
base = urlparse.urlunparse((b.scheme, netloc, b.path, b.params,
b.query, b.fragment))
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
CheckReviewer(reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
CheckReviewer(cc)
form_fields.append(("cc", options.cc))
# Process --message, --title and --file.
message = options.message or ""
title = options.title or ""
if options.file:
if options.message:
ErrorExit("Can't specify both message and message file options")
file = open(options.file, 'r')
message = file.read()
file.close()
if options.issue:
prompt = "Title describing this patch set: "
else:
prompt = "New issue subject: "
title = (
title or message.split('\n', 1)[0].strip() or raw_input(prompt).strip())
if not title and not options.issue:
ErrorExit("A non-empty title is required for a new issue")
# For existing issues, it's fine to give a patchset an empty name. Rietveld
# doesn't accept that so use a whitespace.
title = title or " "
if len(title) > 100:
title = title[:99] + '…'
if title and not options.issue:
message = message or title
form_fields.append(("subject", title))
# If it's a new issue send message as description. Otherwise a new
# message is created below on upload_complete.
if message and not options.issue:
form_fields.append(("description", message))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
if options.private:
if options.issue:
print "Warning: Private flag ignored when updating an existing issue."
else:
form_fields.append(("private", "1"))
if options.send_patch:
options.send_mail = True
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
payload = {} # payload for final request
if options.send_mail:
payload["send_mail"] = "yes"
if options.send_patch:
payload["attach_patch"] = "yes"
if options.issue and message:
payload["message"] = message
payload = urllib.urlencode(payload)
rpc_server.Send("/" + issue + "/upload_complete/" + (patchset or ""),
payload=payload)
return issue, patchset
def main():
try:
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
| wishabi/caja-1 | tools/upload.py | Python | apache-2.0 | 96,830 |
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq add campus`."""
from aquilon.aqdb.model import Country, Campus
from aquilon.worker.broker import BrokerCommand
from aquilon.worker.dbwrappers.location import add_location
from aquilon.worker.processes import DSDBRunner
class CommandAddCampus(BrokerCommand):
required_parameters = ["country", "campus"]
def render(self, session, logger, campus, country, fullname, comments, **_):
dbcountry = Country.get_unique(session, country, compel=True)
add_location(session, Campus, campus, dbcountry, fullname=fullname,
comments=comments)
session.flush()
dsdb_runner = DSDBRunner(logger=logger)
dsdb_runner.add_campus(campus, comments)
dsdb_runner.commit_or_rollback()
return
| quattor/aquilon | lib/aquilon/worker/commands/add_campus.py | Python | apache-2.0 | 1,501 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Cross-platform utilities for creating subprocesses.
For internal use only; no backwards-compatibility guarantees.
"""
from __future__ import absolute_import
import platform
import subprocess
import traceback
# On Windows, we need to use shell=True when creating subprocesses for binary
# paths to be resolved correctly.
force_shell = platform.system() == 'Windows'
# We mimic the interface of the standard Python subprocess module.
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
CalledProcessError = subprocess.CalledProcessError
def call(*args, **kwargs):
if force_shell:
kwargs['shell'] = True
try:
out = subprocess.call(*args, **kwargs)
except OSError:
raise RuntimeError("Executable {} not found".format(args[0]))
except subprocess.CalledProcessError as error:
if isinstance(args, tuple) and (args[0][2] == "pip"):
raise RuntimeError( \
"Full traceback: {}\n Pip install failed for package: {} \
\n Output from execution of subprocess: {}" \
.format(traceback.format_exc(), args[0][6], error. output))
else:
raise RuntimeError("Full trace: {}\
\n Output of the failed child process: {} " \
.format(traceback.format_exc(), error.output))
return out
def check_call(*args, **kwargs):
if force_shell:
kwargs['shell'] = True
try:
out = subprocess.check_call(*args, **kwargs)
except OSError:
raise RuntimeError("Executable {} not found".format(args[0]))
except subprocess.CalledProcessError as error:
if isinstance(args, tuple) and (args[0][2] == "pip"):
raise RuntimeError( \
"Full traceback: {} \n Pip install failed for package: {} \
\n Output from execution of subprocess: {}" \
.format(traceback.format_exc(), args[0][6], error.output))
else:
raise RuntimeError("Full trace: {} \
\n Output of the failed child process: {}" \
.format(traceback.format_exc(), error.output))
return out
def check_output(*args, **kwargs):
if force_shell:
kwargs['shell'] = True
try:
out = subprocess.check_output(*args, **kwargs)
except OSError:
raise RuntimeError("Executable {} not found".format(args[0]))
except subprocess.CalledProcessError as error:
if isinstance(args, tuple) and (args[0][2] == "pip"):
raise RuntimeError( \
"Full traceback: {} \n Pip install failed for package: {} \
\n Output from execution of subprocess: {}" \
.format(traceback.format_exc(), args[0][6], error.output))
else:
raise RuntimeError("Full trace: {}, \
output of the failed child process {} "\
.format(traceback.format_exc(), error.output))
return out
def Popen(*args, **kwargs):
if force_shell:
kwargs['shell'] = True
return subprocess.Popen(*args, **kwargs)
| RyanSkraba/beam | sdks/python/apache_beam/utils/processes.py | Python | apache-2.0 | 3,589 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import gevent_server
import socket
import pytest
import time
from multiprocessing import Process
addr = ("127.0.0.1", 8000)
_CRLF = b'\r\n'
# yield fixtures are demons
# We used to have a yield fixture here and in the server.py tests
# which would start up the server and keep it going, I thought, until
# the end of its scope. Since these server fixtures were scoped to
# module, we believed they would terminate at the end of the module.
# The theory seemed to hold true through the end of Step 3, since we
# only ever started one server throughout the entire testing process.
# Once we created the gevent server, there were within the test suite
# two different server creation fixtures, both scoped to module. We
# falsely believed that each of these fixtures would terminate at the
# end of the module. In practice, it seems that a yield fixture doesn't
# terminate until the end of the entire testing session, regardless
# of defined scope.
# The solution, seen below, is to use just a regular fixture with
# a process-terminating finalizer. The scope behaves properly,
# and autouse also still works.
@pytest.fixture(scope='module', autouse=True)
def gevent_server_setup(request):
process = Process(target=gevent_server.run_gevent_server)
process.daemon = True
process.start()
time.sleep(0.1)
def cleanup():
process.terminate()
request.addfinalizer(cleanup)
return process
@pytest.fixture(scope='function')
def client_setup():
client = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_IP
)
client.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
client.connect(addr)
return client
def verify_response(response):
assert 2 * _CRLF in response
head_and_body = response.split((2 * _CRLF), 1)
head_chunk = head_and_body[0].split(_CRLF)
first_line = head_chunk[0].split()
assert first_line[0] == b"HTTP/1.1"
assert first_line[1].isdigit()
assert first_line[2] is not None
################
# FUNCTIONAL TESTS
################
def test_client_receives_ok_on_image_request(client_setup):
client = client_setup
request = _CRLF.join([
b"GET /images/sample_1.png HTTP/1.1",
b"Host: www.host.com:80",
b""
])
ok_header = b"HTTP/1.1 200 OK"
content_type = b'image'
client.sendall(request)
server_response = client.recv(4096)
client.close()
verify_response(server_response)
assert ok_header in server_response
assert content_type in server_response
def test_client_receives_ok_on_textfile_request(client_setup):
client = client_setup
request = _CRLF.join([
b"GET /sample.txt HTTP/1.1",
b"Host: www.host.com:80",
b""
])
ok_header = b"HTTP/1.1 200 OK"
content_type = b'text/plain'
client.sendall(request)
server_response = client.recv(4096)
client.close()
verify_response(server_response)
assert ok_header in server_response
assert content_type in server_response
def test_client_receives_sample_txt_on_request(client_setup):
client = client_setup
client.sendall(
b"GET /sample.txt HTTP/1.1\r\n"
b"Host: www.host.com:80\r\n"
b"\r\n"
)
text = (
"This is a very simple text file.\n"
"Just to show that we can server it up.\n"
"It is three lines long."
)
server_response = client.recv(4096)
client.close()
verify_response(server_response)
assert text in server_response
def test_client_receives_root_filesystem(client_setup):
client = client_setup
client.sendall(b"GET / HTTP/1.1\r\nHost: www.host.com:80\r\n\r\n")
expected_response = [
'<li>a_web_page.html</li>',
'<li>sample.txt</li>',
'<li>make_time.py</li>',
'<li>images</li>'
]
server_response = client.recv(4096)
client.close()
verify_response(server_response)
for line in expected_response:
assert line in server_response
def test_client_receives_error_on_not_get(client_setup):
client = client_setup
client.sendall(b"POST / HTTP/1.1\r\nHost: www.host.com:80\r\n\r\n")
expected_response = (b"405 Method Not Allowed")
server_response = client.recv(4096)
client.close()
verify_response(server_response)
assert expected_response in server_response
def test_client_receives_error_on_bad_request(client_setup):
client = client_setup
client.sendall(b"GET / HTTP/1.9\r\nHost: www.host.com:80\r\n\r\n")
expected_response = (b"400 Bad Request")
server_response = client.recv(4096)
client.close()
verify_response(server_response)
assert expected_response in server_response
def test_client_receives_error_on_no_host(client_setup):
client = client_setup
client.sendall(b"GET / HTTP/1.1\r\n\r\n")
expected_response = (b"406 Not Acceptable")
server_response = client.recv(4096)
client.close()
verify_response(server_response)
assert expected_response in server_response
def test_client_receives_error_on_bad_uri(client_setup):
client = client_setup
client.sendall(b"GET /sadsge HTTP/1.1\r\nHost: www.host.com:80\r\n\r\n")
expected_response = (b"404 Not Found")
server_response = client.recv(4096)
client.close()
verify_response(server_response)
assert expected_response in server_response
| tlake/http-server | test_functests_gevent_server.py | Python | mit | 5,433 |
from kinko.nodes import Number, Keyword
from kinko.utils import split_args
from .base import TestCase, NODE_EQ_PATCHER
class TestUtils(TestCase):
ctx = [NODE_EQ_PATCHER]
def testSplitArgs(self):
self.assertEqual(
split_args([Number(1), Number(2), Keyword('foo'), Number(3)]),
([Number(1), Number(2)], {'foo': Number(3)}),
)
self.assertEqual(
split_args([Keyword('foo'), Number(1), Number(2), Number(3)]),
([Number(2), Number(3)], {'foo': Number(1)}),
)
with self.assertRaises(TypeError):
split_args([Number(1), Keyword('foo')])
| vmagamedov/kinko | tests/test_utils.py | Python | bsd-3-clause | 640 |
"""
A python module for reading and changing status of verisure devices through
verisure app API.
"""
__all__ = [
'Error',
'LoginError',
'ResponseError',
'Session'
]
from .session import ( # NOQA
Error,
LoginError,
ResponseError,
Session
)
ALARM_ARMED_HOME = 'ARMED_HOME'
ALARM_ARMED_AWAY = 'ARMED_AWAY'
ALARM_DISARMED = 'DISARMED'
LOCK_LOCKED = 'LOCKED'
LOCK_UNLOCKED = 'UNLOCKED'
SMARTPLUG_ON = 'on'
SMARTPLUG_OFF = 'off'
| persandstrom/python-verisure | verisure/__init__.py | Python | mit | 460 |
import sys, os
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk
import gettext
import stat
import imp
nebula_dir = os.getenv('NEBULA_DIR')
modules_dir = nebula_dir + '/modules'
set_visuals = imp.load_source('set_visuals', modules_dir + '/set_visuals.py')
gettext.bindtextdomain('games_nebula', nebula_dir + '/locale')
gettext.textdomain('games_nebula')
_ = gettext.gettext
current_dir = sys.path[0]
config_file_dir = current_dir + '/game/Deus Ex - Invisible War'
config_file_path = config_file_dir + '/user.ini'
class GUI:
def __init__(self):
self.config_load()
self.create_main_window()
def config_load(self):
if os.path.exists(config_file_path):
config_file = open(config_file_path, 'r')
config_content = config_file.readlines()
config_file.close()
for line in config_content:
if 'FullscreenViewportX' in line:
self.custom_width = line.split('=')[1].strip('\r\n')
if 'FullscreenViewportY' in line:
self.custom_height = line.split('=')[1].strip('\r\n')
else:
message_dialog = Gtk.MessageDialog(
None,
0,
Gtk.MessageType.INFO,
Gtk.ButtonsType.OK,
_("Launch the game at least once before using this utility.")
)
content_area = message_dialog.get_content_area()
content_area.set_property('margin-left', 10)
content_area.set_property('margin-right', 10)
content_area.set_property('margin-top', 10)
content_area.set_property('margin-bottom', 10)
message_dialog.run()
message_dialog.destroy()
sys.exit()
def config_save(self):
if os.path.exists(config_file_path):
config_file = open(config_file_path, 'r')
config_content = config_file.readlines()
config_file.close()
config_file = open(config_file_path, 'w')
for line in config_content:
if 'FullscreenViewportY' in line:
config_file.write('FullscreenViewportY=' + self.custom_height + '\r\n')
elif 'FullscreenViewportX' in line:
config_file.write('FullscreenViewportX=' + self.custom_width + '\r\n')
else:
config_file.write(line)
config_file.close()
else:
self.custom_width = '1024'
self.custom_height = '768'
config_file = open(config_file_path, 'w')
config_file.write('[WinDrv.WindowsClient]')
config_file.write('FullscreenViewportY=' + self.custom_height)
config_file.write('FullscreenViewportX=' + self.custom_width)
config_file.close()
def quit_app(self, window, event):
Gtk.main_quit()
def create_main_window(self):
self.main_window = Gtk.Window(
title = _("Deus Ex 2: Invisible War"),
type = Gtk.WindowType.TOPLEVEL,
window_position = Gtk.WindowPosition.CENTER_ALWAYS,
resizable = False,
)
self.main_window.connect('delete-event', self.quit_app)
grid = Gtk.Grid(
margin_left = 10,
margin_right = 10,
margin_top = 10,
margin_bottom = 10,
row_spacing = 10,
column_spacing = 10,
column_homogeneous = True,
)
label_custom_res = Gtk.Label(
label = _("Custom resolution:")
)
entry_custom_width = Gtk.Entry(
placeholder_text = _("Width"),
max_length = 4,
xalign = 0.5,
text = self.custom_width
)
entry_custom_width.connect('changed', self.cb_entry_custom_width)
entry_custom_height = Gtk.Entry(
placeholder_text = _("Height"),
max_length = 4,
xalign = 0.5,
text = self.custom_height
)
entry_custom_height.connect('changed', self.cb_entry_custom_height)
st = os.stat(config_file_path)
writable = bool(st.st_mode & stat.S_IWUSR)
protected = not writable
self.button_set = Gtk.Button(
label = _("Set"),
sensitive = writable
)
self.button_set.connect('clicked', self.cb_button_set)
checkbutton = Gtk.CheckButton(
label = _("Protect settings file from overwriting (root)"),
active = protected
)
checkbutton.connect('clicked', self.cb_checkbutton)
grid.attach(label_custom_res, 0, 0, 2, 1)
grid.attach(entry_custom_width, 0, 1, 1, 1)
grid.attach(entry_custom_height, 1, 1, 1, 1)
grid.attach(self.button_set, 0, 2, 2, 1)
grid.attach(checkbutton, 0, 3, 2, 1)
self.main_window.add(grid)
self.main_window.show_all()
def cb_entry_custom_width(self, entry):
text = entry.get_text().strip()
new_text = (''.join([i for i in text if i in '0123456789']))
entry.set_text(new_text)
self.custom_width = new_text
def cb_entry_custom_height(self, entry):
text = entry.get_text().strip()
new_text = (''.join([i for i in text if i in '0123456789']))
entry.set_text(new_text)
self.custom_height = new_text
def cb_checkbutton(self, button):
user = os.getenv('USER')
if button.get_active():
os.system('chmod -w "' + config_file_path +
'" && gksudo chown root:root "' + config_file_path + '"')
self.button_set.set_sensitive(False)
else:
os.system('gksudo chown ' + user + ':' + user + ' "' + config_file_path +
'" && chmod +w "' + config_file_path + '"')
self.button_set.set_sensitive(True)
def cb_button_set(self, button):
if (self.custom_width == '') or (self.custom_height == ''):
message_dialog = Gtk.MessageDialog(
self.main_window,
0,
Gtk.MessageType.ERROR,
Gtk.ButtonsType.OK,
_("Error")
)
message_dialog.format_secondary_text(_("You have to set width and height."))
content_area = message_dialog.get_content_area()
content_area.set_property('margin-left', 10)
content_area.set_property('margin-right', 10)
content_area.set_property('margin-top', 10)
content_area.set_property('margin-bottom', 10)
message_dialog.run()
message_dialog.destroy()
return
self.config_save()
def main():
import sys
app = GUI()
Gtk.main()
if __name__ == '__main__':
sys.exit(main())
| yancharkin/games_nebula_goglib_scripts | deus_ex_invisible_war/settings.py | Python | gpl-3.0 | 6,894 |
#!/usr/bin/env python
# ============================================================================
'''
This file is part of the lenstractor project.
Copyright 2012 David W. Hogg (NYU) and Phil Marshall (Oxford).
'''
# ============================================================================
if __name__ == '__main__':
import matplotlib
matplotlib.use('Agg')
# Fonts, latex:
matplotlib.rc('font',**{'family':'serif', 'serif':['TimesNewRoman'], 'size':18.0})
matplotlib.rc('text', usetex=True)
import os
import logging
import numpy as np
import pyfits
import time
import string
from astrometry.util import util
import tractor
import lenstractor
# ============================================================================
def main():
"""
NAME
LensTractor.py
PURPOSE
Run the Tractor on a deck of single object cutout images.
Read in an image and its weight map, guess the PSF, put an object at the
centre image of the field and then optimize the catalog and PSF.
COMMENTS
The idea is to identify good lens candidates by principled model
selection: two well-defined models competing against each other, given
multi-epoch imaging data. The Nebula model (1 extended source, plus
N=2, 3 or 4 point sources, with sub-models denoted by "NebulaN") is very
flexible, so should be better at fitting the data in general than the
Lens model (1 extended source, plus 1 background point source). However,
when the Lens provides a good fit, it does so at lower cost (fewer
parameters), so should win by Bayesian information criteria (we use BIC
as a cheap proxy for evidence ratio).
The workflow we probably want to aim for is something like the following:
* Fit PSF images with PSF models; fix PSFs
* Try Nebula2
* Try Nebula4
if Nebula2 beats Nebula4:
Nebula = Nebula2
else:
Nebula = Nebula4
* Try Lens (initialised with Nebula)
if Lens beats Nebula:
Classification = 'Lens'
Return YES
else:
Classification = 'Nebula'
Return NO
Initialisation of Lens via Nebula could be tricky - there is some
parsing to be done, and decisions to be made... In practice we may end
up working just with the Nebula output, which should be at least
easier to interpret than a SExtractor catalog, for example.
Open questions:
Does it make sense to dogmatically associate the extended object with
the deflector?
YES: detection of a deflector galaxy will be needed for a convincing
candidate anyway.
NO: using the extended object to model a high S/N merging image
system should not be punished
To be investigated.
How are we going to interpret the point image positions if we do not
have an estimated deflector position?
OPTIONS
-h --help Print this message
-v --verbose Verbose operation
-s --sample Sample the posterior PDF instead of optimizing
-x --no-plots Do not plot progress
-l --lens Only fit lens model, initialized from scratch
INPUTS
*.fits Deck of postcard images
OPTIONAL INPUTS
-n --nebula K Only fit NebulaK model, initialized from scratch
--manual catalog Initialize model positions from catalog
--optimization-rounds Nr Number of rounds of optimization [2]
--optimization-steps-catalog Nc Number of steps per round spent
optimizing source catalog [10]
--optimization-steps-psf Np Number of steps per round spent
optimizing PSF catalog [2]
-o --output outstem Stem of output catalog filename
--survey name Name of survey (for io formats)
OUTPUTS
stdout Useful information
*.png Plots in png format
To be implemented:
lenstractor_progress.log Logged output
lenstractor_results.txt Model comparison results
lenstractor_lens.cat Lens model parameters, including lightcurves
lenstractor_nebula.cat Nebula model parameters, including lightcurves
EXAMPLES
python LensTractor.py -n 4 \
-o examples/ps1/H1413+117_10x10arcsec \
examples/ps1/H1413+117_10x10arcsec_55*fits > \
examples/ps1/H1413+117_10x10arcsec_Nebula4.log
python LensTractor.py -n 2 \
-o examples/sdss/0951+2635/0951+2635 \
examples/sdss/0951+2635/*sci.fits > \
examples/sdss/0951+2635/0951+2635_Nebula2.log
set id = KIDS_SLID_10058881_SID_8668
python LensTractor.py -v -l -z --survey KIDS \
-o examples/kids/${id} \
examples/kids/${id}_u_???.fits \
examples/kids/${id}_g_???.fits \
examples/kids/${id}_r_???.fits > \
examples/kids/${id}.log &
DEPENDENCIES
* The Tractor astrometry.net/svn/trunk/projects/tractor
* emcee github.com/danfm/emcee
* astrometry.net astrometry.net/svn/trunk/util
BUGS
- SDSS examples show bad WCS treatment...
- Possible problems with image alignment
- Memory leak: restrict no. of sampling iterations :-(
- Header PSF FWHM sometimes NaN, no recovery from this yet
FEATURE REQUESTS
- Lens initialisation, esp source positions, needs careful attention
- StepSizes need optimizing for lens model, esp source position
- Point source mags are not variable
- PSF not being optimized correctly - missing derivatives?
- PhotoCal may need optimizing if zpts are untrustworthy!
HISTORY
2012-07-06 First predicted Lens images Marshall/Hogg (Oxford/NYU)
2013-08- Adapted for KIDS Buddelmeier (Kapteyn)
2014-04- Refactoring for easier experimentation Marshall/Agnello (KIPAC/UCSB)
"""
# --------------------------------------------------------------------
from argparse import ArgumentParser
import sys
# Set available options:
parser = ArgumentParser()
# List of files:
parser.add_argument('inputfiles', metavar='filename', nargs='*') # '*' means there must be 0 or more
# Verbosity:
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', default=False, help='Make more verbose')
# Optimizing only:
parser.add_argument('-z', '--optimize', dest='optimize', action='store_true', default=False, help='Optimize posterior PDF')
# Sampling only:
parser.add_argument('-s', '--sample', dest='MCMC', action='store_true', default=False, help='Sample posterior PDF')
# Plotting:
parser.add_argument('-x', '--no-plots', dest='noplots', action='store_true', default=False, help='Skip plotting')
# Lens model only:
parser.add_argument('-l', '--lens', dest='lens', action='store_true', default=False, help='Fit Lens model')
# Nebula model only:
parser.add_argument('-n', '--nebula', dest='K', type=int, default=0, help='Fit NebulaK model, provide K')
# Output filename:
parser.add_argument('-o', '--output', dest='outstem', type=str, default='lenstractor.cat', help='Output catalog filename stem')
# Survey we are working on (affects data read-in):
parser.add_argument('--survey', dest='survey', type=str, default="PS1", help="Survey (SDSS, PS1 or KIDS)")
# Use SDSS sky server to get data:
parser.add_argument('--SDSS', dest='rcfstring', type=str, default="None", help="Use SDSS skyserver to return cutouts, supply run,camcol,field,ra,dec,roi")
# Manual input of model initialization:
parser.add_argument('--manual', dest='catalog', type=str, default="None", help="Catalog of Nebula model parameters, for initializing positions")
# Read in options and arguments - note only sci and wht images are supplied:
args = parser.parse_args()
if (args.rcfstring == 'None' and len(args.inputfiles) < 1):
# parser.print_help()
print main.__doc__ # Whoah! What does this do?! Some sort of magic.
sys.exit(-1)
vb = args.verbose
# Workflow:
if args.lens:
# modelnames = ['Nebula2','Lens']
# modelnames = ['Nebula4','Lens']
modelnames = ['Lens']
elif args.K > 0:
modelnames = ['Nebula'+str(args.K)]
else:
# modelnames = ['Nebula1','Nebula2','Nebula4','Lens']
modelnames = ['Nebula2','Nebula4','Lens']
# BIC = dict(zip(modelnames,np.zeros(len(modelnames))))
BIC = dict()
if vb:
print "* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *"
print " LensTractor "
print " Fitting",modelnames," models to a deck of FITS postcards"
print "* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *"
# Manual initialization:
if args.catalog != 'None':
manual = True
else:
manual = False
# SDSS skyserver:
if args.rcfstring != 'None':
survey = 'SDSS'
source = 'skyserver'
data = [float(x) for x in args.rcfstring.split(',')]
rcf = data[0:5]
rcf[0:3] = [int(x) for x in rcf[0:3]]
roi = data[5]
assert len(rcf) == 5
else:
survey = args.survey
source = 'local'
# -------------------------------------------------------------------------
if survey == 'SDSS' and source == 'skyserver':
# Download images from SDSS skyserver (using IO functions in sdss.py)
datadir = string.join(args.outstem.split('/')[0:-1],'/')
if len(datadir) == 0: datadir = '.'
images,centroids,total_mags,bands = lenstractor.getSDSSdata(rcf,roi,datadir,vb=vb)
else:
# Read in images (using IO functions in dm.py)
# Organise the deck of inputfiles into scifiles and varfiles:
scifiles,varfiles = lenstractor.Riffle(args.inputfiles,vb=vb)
# Read into Tractor Image objects, and see what filters we have:
images,centroids,total_mags,bands = lenstractor.Deal(scifiles,varfiles,SURVEY=args.survey,vb=vb)
assert len(images) > 0
# -------------------------------------------------------------------------
# Estimate object centroid and SED:
position,SED = lenstractor.Turnover(bands,total_mags,centroids,vb=vb)
# Package up:
dataset = list(images)
# -------------------------------------------------------------------------
# Step through all the models in the workflow, initialising and fitting:
previous = None
counter = 0
for modelname in modelnames:
if vb:
print "* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *"
print "Initializing model: "+modelname
model = lenstractor.Model(modelname,vb=vb)
if modelname != 'Lens':
if manual:
model.initialize('from_scratch', position=args.catalog, SED=SED)
else:
if previous is None:
model.initialize('from_scratch', position=position, SED=SED)
else:
model.initialize(previous)
else:
if previous is None:
model.initialize('from_scratch', position=position, SED=SED)
else:
model.initialize(previous)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Set up logging to the terminal by The Tractor:
if vb:
# lvl = logging.DEBUG
lvl = logging.INFO
else:
# lvl = logging.INFO
lvl = logging.ERROR
logging.basicConfig(level=lvl, format='%(message)s', stream=sys.stdout)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Start a lenstractor, which will make a catalog one src at a time.
# Pass in a copy of the image list, so that the PSF etc are
# initialised correctly for each model.
LT = lenstractor.LensTractor(dataset,model,args.outstem,args.survey,counter=counter,vb=vb,noplots=args.noplots)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if args.optimize:
LT.drive(by='optimizing')
elif args.MCMC:
LT.drive(by='sampling')
else:
LT.drive(by='cunning_and_guile')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if vb:
print "Fit complete."
print "* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *"
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Compute BIC for this fit:
BIC[model.flavor] = LT.getBIC()
if vb: print modelname+" results: chisq, K, N, BIC =",LT.minchisq,LT.K,LT.N,BIC[model.flavor]
# Have model print itself:
for component in model.srcs:
if vb: print component
# Write out simple one-line parameter catalog:
outfile = LT.write_catalog()
if vb: print modelname+" parameter values written to: "+outfile
# Save Nebula2 or Nebula4? Depends on BIC...
previous = model.copy()
counter = LT.counter + 1
# -------------------------------------------------------------------------
# Make some decision about the nature of this system.
if len(modelnames) > 1:
assert model.name == 'Lens'
# Compare models and report:
if vb: print "* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *"
if vb: print "BIC = ",BIC
logBF = -0.5*(BIC['Lens'] - BIC['Nebula'])
Nimg = model.srcs[0].getMultiplicity()
if vb: print "Hypothesis test result: Bayes factor in favour of Lens is exp[",logBF,"]"
if Nimg < 2:
if vb: print "BUT: Lens predicts only 1 image, so it's not a strong lens."
if vb: print "* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *"
# Record result as a "cookie":
if logBF > 0 and Nimg > 1:
result = 'Lens'
else:
result = 'Nebula'
cookie = LT.set_cookie(args.outstem, result)
if vb: print 'Result "'+result+'" written to '+cookie
# -------------------------------------------------------------------------
print "LensTractor stopping."
return
# ============================================================================
if __name__ == '__main__':
main()
| davidwhogg/LensTractor | LensTractor.py | Python | gpl-2.0 | 14,549 |
"""
Collection of query wrappers / abstractions to both facilitate data
retrieval and to reduce dependency on DB-specific API.
"""
from __future__ import print_function
from datetime import datetime, date
from pandas.compat import range, lzip, map, zip
import pandas.compat as compat
import numpy as np
import traceback
from pandas.core.datetools import format as date_format
from pandas.core.api import DataFrame
#------------------------------------------------------------------------------
# Helper execution function
def execute(sql, con, retry=True, cur=None, params=None):
"""
Execute the given SQL query using the provided connection object.
Parameters
----------
sql: string
Query to be executed
con: database connection instance
Database connection. Must implement PEP249 (Database API v2.0).
retry: bool
Not currently implemented
cur: database cursor, optional
Must implement PEP249 (Datbase API v2.0). If cursor is not provided,
one will be obtained from the database connection.
params: list or tuple, optional
List of parameters to pass to execute method.
Returns
-------
Cursor object
"""
try:
if cur is None:
cur = con.cursor()
if params is None:
cur.execute(sql)
else:
cur.execute(sql, params)
return cur
except Exception:
try:
con.rollback()
except Exception: # pragma: no cover
pass
print('Error on sql %s' % sql)
raise
def _safe_fetch(cur):
try:
result = cur.fetchall()
if not isinstance(result, list):
result = list(result)
return result
except Exception as e: # pragma: no cover
excName = e.__class__.__name__
if excName == 'OperationalError':
return []
def tquery(sql, con=None, cur=None, retry=True):
"""
Returns list of tuples corresponding to each row in given sql
query.
If only one column selected, then plain list is returned.
Parameters
----------
sql: string
SQL query to be executed
con: SQLConnection or DB API 2.0-compliant connection
cur: DB API 2.0 cursor
Provide a specific connection or a specific cursor if you are executing a
lot of sequential statements and want to commit outside.
"""
cur = execute(sql, con, cur=cur)
result = _safe_fetch(cur)
if con is not None:
try:
cur.close()
con.commit()
except Exception as e:
excName = e.__class__.__name__
if excName == 'OperationalError': # pragma: no cover
print('Failed to commit, may need to restart interpreter')
else:
raise
traceback.print_exc()
if retry:
return tquery(sql, con=con, retry=False)
if result and len(result[0]) == 1:
# python 3 compat
result = list(lzip(*result)[0])
elif result is None: # pragma: no cover
result = []
return result
def uquery(sql, con=None, cur=None, retry=True, params=None):
"""
Does the same thing as tquery, but instead of returning results, it
returns the number of rows affected. Good for update queries.
"""
cur = execute(sql, con, cur=cur, retry=retry, params=params)
result = cur.rowcount
try:
con.commit()
except Exception as e:
excName = e.__class__.__name__
if excName != 'OperationalError':
raise
traceback.print_exc()
if retry:
print('Looks like your connection failed, reconnecting...')
return uquery(sql, con, retry=False)
return result
def read_frame(sql, con, index_col=None, coerce_float=True, params=None):
"""
Returns a DataFrame corresponding to the result set of the query
string.
Optionally provide an index_col parameter to use one of the
columns as the index. Otherwise will be 0 to len(results) - 1.
Parameters
----------
sql: string
SQL query to be executed
con: DB connection object, optional
index_col: string, optional
column name to use for the returned DataFrame object.
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params: list or tuple, optional
List of parameters to pass to execute method.
"""
cur = execute(sql, con, params=params)
rows = _safe_fetch(cur)
columns = [col_desc[0] for col_desc in cur.description]
cur.close()
con.commit()
result = DataFrame.from_records(rows, columns=columns,
coerce_float=coerce_float)
if index_col is not None:
result = result.set_index(index_col)
return result
frame_query = read_frame
read_sql = read_frame
def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame: DataFrame
name: name of SQL table
con: an open SQL database connection object
flavor: {'sqlite', 'mysql', 'oracle'}, default 'sqlite'
if_exists: {'fail', 'replace', 'append'}, default 'fail'
fail: If table exists, do nothing.
replace: If table exists, drop it, recreate it, and insert data.
append: If table exists, insert data. Create if does not exist.
"""
if 'append' in kwargs:
import warnings
warnings.warn("append is deprecated, use if_exists instead",
FutureWarning)
if kwargs['append']:
if_exists = 'append'
else:
if_exists = 'fail'
if if_exists not in ('fail', 'replace', 'append'):
raise ValueError("'%s' is not valid for if_exists" % if_exists)
exists = table_exists(name, con, flavor)
if if_exists == 'fail' and exists:
raise ValueError("Table '%s' already exists." % name)
# creation/replacement dependent on the table existing and if_exist criteria
create = None
if exists:
if if_exists == 'fail':
raise ValueError("Table '%s' already exists." % name)
elif if_exists == 'replace':
cur = con.cursor()
cur.execute("DROP TABLE %s;" % name)
cur.close()
create = get_schema(frame, name, flavor)
else:
create = get_schema(frame, name, flavor)
if create is not None:
cur = con.cursor()
cur.execute(create)
cur.close()
cur = con.cursor()
# Replace spaces in DataFrame column names with _.
safe_names = [s.replace(' ', '_').strip() for s in frame.columns]
flavor_picker = {'sqlite': _write_sqlite,
'mysql': _write_mysql}
func = flavor_picker.get(flavor, None)
if func is None:
raise NotImplementedError
func(frame, name, safe_names, cur)
cur.close()
con.commit()
def _write_sqlite(frame, table, names, cur):
bracketed_names = ['[' + column + ']' for column in names]
col_names = ','.join(bracketed_names)
wildcards = ','.join(['?'] * len(names))
insert_query = 'INSERT INTO %s (%s) VALUES (%s)' % (
table, col_names, wildcards)
# pandas types are badly handled if there is only 1 column ( Issue #3628 )
if not len(frame.columns) == 1:
data = [tuple(x) for x in frame.values]
else:
data = [tuple(x) for x in frame.values.tolist()]
cur.executemany(insert_query, data)
def _write_mysql(frame, table, names, cur):
bracketed_names = ['`' + column + '`' for column in names]
col_names = ','.join(bracketed_names)
wildcards = ','.join([r'%s'] * len(names))
insert_query = "INSERT INTO %s (%s) VALUES (%s)" % (
table, col_names, wildcards)
data = [tuple(x) for x in frame.values]
cur.executemany(insert_query, data)
def table_exists(name, con, flavor):
flavor_map = {
'sqlite': ("SELECT name FROM sqlite_master "
"WHERE type='table' AND name='%s';") % name,
'mysql': "SHOW TABLES LIKE '%s'" % name}
query = flavor_map.get(flavor, None)
if query is None:
raise NotImplementedError
return len(tquery(query, con)) > 0
def get_sqltype(pytype, flavor):
sqltype = {'mysql': 'VARCHAR (63)',
'sqlite': 'TEXT'}
if issubclass(pytype, np.floating):
sqltype['mysql'] = 'FLOAT'
sqltype['sqlite'] = 'REAL'
if issubclass(pytype, np.integer):
#TODO: Refine integer size.
sqltype['mysql'] = 'BIGINT'
sqltype['sqlite'] = 'INTEGER'
if issubclass(pytype, np.datetime64) or pytype is datetime:
# Caution: np.datetime64 is also a subclass of np.number.
sqltype['mysql'] = 'DATETIME'
sqltype['sqlite'] = 'TIMESTAMP'
if pytype is datetime.date:
sqltype['mysql'] = 'DATE'
sqltype['sqlite'] = 'TIMESTAMP'
if issubclass(pytype, np.bool_):
sqltype['sqlite'] = 'INTEGER'
return sqltype[flavor]
def get_schema(frame, name, flavor, keys=None):
"Return a CREATE TABLE statement to suit the contents of a DataFrame."
lookup_type = lambda dtype: get_sqltype(dtype.type, flavor)
# Replace spaces in DataFrame column names with _.
safe_columns = [s.replace(' ', '_').strip() for s in frame.dtypes.index]
column_types = lzip(safe_columns, map(lookup_type, frame.dtypes))
if flavor == 'sqlite':
columns = ',\n '.join('[%s] %s' % x for x in column_types)
else:
columns = ',\n '.join('`%s` %s' % x for x in column_types)
keystr = ''
if keys is not None:
if isinstance(keys, compat.string_types):
keys = (keys,)
keystr = ', PRIMARY KEY (%s)' % ','.join(keys)
template = """CREATE TABLE %(name)s (
%(columns)s
%(keystr)s
);"""
create_statement = template % {'name': name, 'columns': columns,
'keystr': keystr}
return create_statement
def sequence2dict(seq):
"""Helper function for cx_Oracle.
For each element in the sequence, creates a dictionary item equal
to the element and keyed by the position of the item in the list.
>>> sequence2dict(("Matt", 1))
{'1': 'Matt', '2': 1}
Source:
http://www.gingerandjohn.com/archives/2004/02/26/cx_oracle-executemany-example/
"""
d = {}
for k, v in zip(range(1, 1 + len(seq)), seq):
d[str(k)] = v
return d
| alephu5/Soundbyte | environment/lib/python3.3/site-packages/pandas/io/sql.py | Python | gpl-3.0 | 10,681 |
from sklearn import datasets
from sklearn.neural_network import MLPClassifier
import traceback
from submissions.aartiste import election
from submissions.aartiste import county_demographics
class DataFrame:
data = []
feature_names = []
target = []
target_names = []
trumpECHP = DataFrame()
'''
Extract data from the CORGIS elections, and merge it with the
CORGIS demographics. Both data sets are organized by county and state.
'''
joint = {}
elections = election.get_results()
for county in elections:
try:
st = county['Location']['State Abbreviation']
countyST = county['Location']['County'] + st
trump = county['Vote Data']['Donald Trump']['Percent of Votes']
joint[countyST] = {}
joint[countyST]['ST']= st
joint[countyST]['Trump'] = trump
except:
traceback.print_exc()
demographics = county_demographics.get_all_counties()
for county in demographics:
try:
countyNames = county['County'].split()
cName = ' '.join(countyNames[:-1])
st = county['State']
countyST = cName + st
# elderly =
# college =
# home =
# poverty =
if countyST in joint:
joint[countyST]['Elderly'] = county['Age']["Percent 65 and Older"]
joint[countyST]['HighSchool'] = county['Education']["High School or Higher"]
joint[countyST]['College'] = county['Education']["Bachelor's Degree or Higher"]
joint[countyST]['White'] = county['Ethnicities']["White Alone, not Hispanic or Latino"]
joint[countyST]['Persons'] = county['Housing']["Persons per Household"]
joint[countyST]['Home'] = county['Housing']["Homeownership Rate"]
joint[countyST]['Income'] = county['Income']["Median Houseold Income"]
joint[countyST]['Poverty'] = county['Income']["Persons Below Poverty Level"]
joint[countyST]['Sales'] = county['Sales']["Retail Sales per Capita"]
except:
traceback.print_exc()
'''
Remove the counties that did not appear in both samples.
'''
intersection = {}
for countyST in joint:
if 'College' in joint[countyST]:
intersection[countyST] = joint[countyST]
trumpECHP.data = []
'''
Build the input frame, row by row.
'''
for countyST in intersection:
# choose the input values
row = []
for key in intersection[countyST]:
if key in ['ST', 'Trump']:
continue
row.append(intersection[countyST][key])
trumpECHP.data.append(row)
firstCounty = next(iter(intersection.keys()))
firstRow = intersection[firstCounty]
trumpECHP.feature_names = list(firstRow.keys())
trumpECHP.feature_names.remove('ST')
trumpECHP.feature_names.remove('Trump')
'''
Build the target list,
one entry for each row in the input frame.
The Naive Bayesian network is a classifier,
i.e. it sorts data points into bins.
The best it can do to estimate a continuous variable
is to break the domain into segments, and predict
the segment into which the variable's value will fall.
In this example, I'm breaking Trump's % into two
arbitrary segments.
'''
trumpECHP.target = []
def trumpTarget(percentage):
if percentage > 45:
return 1
return 0
for countyST in intersection:
# choose the target
tt = trumpTarget(intersection[countyST]['Trump'])
trumpECHP.target.append(tt)
trumpECHP.target_names = [
'Trump <= 45%',
'Trump > 45%',
]
'''
Make a customn classifier,
'''
mlpc = MLPClassifier(
hidden_layer_sizes = (100, 50, ),
# activation = 'relu',
solver='sgd', # 'adam',
# alpha = 0.0001,
# batch_size='auto',
learning_rate = 'adaptive', # 'constant',
# power_t = 0.5,
max_iter = 1000, # 200,
# shuffle = True,
# random_state = None,
# tol = 1e-4,
# verbose = False,
# warm_start = False,
# momentum = 0.9,
# nesterovs_momentum = True,
# early_stopping = False,
# validation_fraction = 0.1,
# beta_1 = 0.9,
# beta_2 = 0.999,
# epsilon = 1e-8,
)
'''
Try scaling the data.
'''
trumpScaled = DataFrame()
def setupScales(grid):
global min, max
min = list(grid[0])
max = list(grid[0])
for row in range(1, len(grid)):
for col in range(len(grid[row])):
cell = grid[row][col]
if cell < min[col]:
min[col] = cell
if cell > max[col]:
max[col] = cell
def scaleGrid(grid):
newGrid = []
for row in range(len(grid)):
newRow = []
for col in range(len(grid[row])):
try:
cell = grid[row][col]
scaled = (cell - min[col]) \
/ (max[col] - min[col])
newRow.append(scaled)
except:
pass
newGrid.append(newRow)
return newGrid
setupScales(trumpECHP.data)
trumpScaled.data = scaleGrid(trumpECHP.data)
trumpScaled.feature_names = trumpECHP.feature_names
trumpScaled.target = trumpECHP.target
trumpScaled.target_names = trumpECHP.target_names
'''
Teach a Neural net to count 2
'''
count22 = DataFrame()
count22.data = [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1],
[1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]]
count22.feature_names = ['a', 'b', 'c']
count22.target = [0, 0, 0, 1,
0, 1, 1, 0]
count22.target_names = ['Two']
countMLPC = MLPClassifier(
hidden_layer_sizes = (3,), # (100,),
# activation = 'relu',
solver='sgd', # 'adam',
# alpha = 0.0001,
# batch_size='auto',
# learning_rate = 'constant',
# power_t = 0.5,
max_iter = 10, # 200,
# shuffle = True,
# random_state = None,
# tol = 1e-4,
verbose = True # False,
# warm_start = False,
# momentum = 0.9,
# nesterovs_momentum = True,
# early_stopping = False,
# validation_fraction = 0.1,
# beta_1 = 0.9,
# beta_2 = 0.999,
# epsilon = 1e-8,
)
Examples = {
# 'TrumpDefault': {
# 'frame': trumpECHP,
# },
# 'TrumpSGD': {
# 'frame': trumpECHP,
# 'mlpc': mlpc
# },
# 'TrumpScaled': {
# 'frame': trumpScaled,
# },
'Count to 2': {
'frame': count22,
'mlpc': countMLPC
}
} | armadill-odyssey/aima-python | submissions/aartiste/myNN.py | Python | mit | 6,217 |
from calendaradapter.calendar import Calendar
import os
if __name__ == "__main__":
url = os.getenv('EXCHANGE_URL')
username = os.getenv('EXCHANGE_USERNAME')
password = os.getenv('EXCHANGE_PASSWORD')
calendar = Calendar(url=url, username=username, password=password)
for event in calendar.events:
print ("{start} {stop} - {subject}".format(start=event.start, stop=event.end, subject=event.subject))
| bmcmanus/availability | main.py | Python | gpl-3.0 | 428 |
"""Helper to execute actions on the database independantly from the interface
and output format."""
import logging, datetime, sys, re
from slam import generator, models
from slam.log import DbLogHandler
# set-up logging to the database
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
DBLOGHANDLER = DbLogHandler()
DBLOGHANDLER.setLevel(logging.INFO)
LOGGER.addHandler(DBLOGHANDLER)
STDOUTHANDLER = logging.StreamHandler()
STDOUTHANDLER.setLevel(logging.INFO)
LOGGER.addHandler(STDOUTHANDLER)
class InexistantObjectError(Exception):
"""Exception raised when the given object name was not found in the
database."""
pass
class DuplicateObjectError(Exception):
"""Exception raised when trying to create an object that already exists."""
#def _get_message(self):
# return self._message
#def _set_message(self, message):
# self._message = message
#message = property(_get_message, _set_message)
#def __init__(self, message):
# Call the base class constructor with the parameters it needs
#Exception.__init__(self, message)
pass
class MissingParameterError(Exception):
"""Exception raised when a parameter is missing."""
pass
class ConfigurationFormatError(Exception):
"""Exception raised when the given configuration format is not
supported."""
pass
class PropertyFormatError(Exception):
"""Exception raised if the property format is invalid."""
pass
def isValidHostname(hostname):
disallowed = re.compile("[^a-zA-Z\d\-\.]")
#return all(map(lambda x: len(x) and not disallowed.search(x), hostname.split("."))) //pour chaque x dans split array(),appliquer les functions len(x) and not disallowed.search(x)
return len(hostname) and not disallowed.search(hostname)
def get_host(host_name=None):
"""Retrieve a host object from the database."""
host = None
if host_name:
if models.Host.objects.filter(name=host_name):
host = models.Host.objects.get(name=host_name)
elif models.Alias.objects.filter(name=host_name):
host = models.Alias.objects.get(name=host_name).host
else:
raise InexistantObjectError("Could not find host named: "
+ str(host_name))
return host
def get_pool(pool_name=None, category=None):
"""Retrieve a pool object from the database."""
if pool_name:
if not models.Pool.objects.filter(name=pool_name).count():
raise InexistantObjectError("Could not find pool named: "
+ str(pool_name))
return models.Pool.objects.get(name=pool_name)
elif category:
for poolobj in models.Pool.objects.exclude(category=""):
if category in poolobj.category.split(","):
return poolobj
raise InexistantObjectError("No pool in category: " + category)
else:
return None
def create_pool(pool_name=None, definition=None, category=None):
"""Try to retrieve the given *pool_name* from the database or a create a
new one with the given *definition* otherwise."""
if models.Pool.objects.filter(name=pool_name):
raise DuplicateObjectError("Pool named \""
+ pool_name + "\" already exists.")
if not pool_name:
raise MissingParameterError("Missing a name for the pool to create.")
if not definition:
raise MissingParameterError("Missing pool definition.")
if category is None:
category = ""
else:
category = ",".join(category)
pool = models.Pool.create(name=pool_name, definition=definition,
category=category)
LOGGER.info("Created pool: " + str(pool))
pool.save()
return pool
def create_generator(name, type_, outputfile, default=False, header=None,
footer=None, checkfile=None, timeout=None, domain=None, pools=None):
"""Create a new generator object."""
if name and models.Config.objects.filter(name=name):
raise DuplicateObjectError("Generator \"" + name
+ "\" already exists.")
if not name:
raise MissingParameterError(
"You must provide a name for the new generator.")
if not type_:
raise MissingParameterError(
"You must provide a type for the new generator.")
if not outputfile:
raise MissingParameterError(
"You must provide an output file for the new generator.")
genobj = None
if type_ == "bind":
genobj = generator.BindConfig.create(name=name, default=default,
outputfile=outputfile, header=header, footer=footer,
checkfile=checkfile, update=True, timeout=timeout)
elif type_ == "revbind":
genobj = generator.RevBindConfig.create(name=name, default=default,
outputfile=outputfile, header=header, footer=footer,
checkfile=checkfile, update=True, timeout=timeout)
elif type_ == "dhcp":
genobj = generator.DhcpdConfig.create(name=name, default=default,
outputfile=outputfile, header=header, footer=footer,
checkfile=checkfile, update=True)
elif type_ == "quattor":
genobj = generator.QuattorConfig.create(name=name, default=default,
outputfile=outputfile, header=header, checkfile=checkfile,
footer=footer, update=True)
elif type_ == "laldns":
genobj = generator.LalDnsConfig.create(name=name, default=default,
outputfile=outputfile, header=header, checkfile=checkfile,
footer=footer, update=True)
else:
raise MissingParameterError("Wrong configuration format: " + type_)
genobj.save()
LOGGER.info("Created new generator: " + str(genobj))
if pools:
for pool in pools:
pool = get_pool(pool)
pool.generator.add(genobj)
genobj.save()
return genobj
def get_generator(name):
"""Get the correct configuration generator object."""
genobj = None
if name:
if not models.Config.objects.filter(name=name):
raise InexistantObjectError("Could not find generator: "
+ name)
confobj = models.Config.objects.get(name=name)
if confobj.conftype == "bind":
genobj = generator.BindConfig.objects.get(name=name)
elif confobj.conftype == "rbind":
genobj = generator.RevBindConfig.objects.get(name=name)
elif confobj.conftype == "dhcp":
genobj = generator.DhcpdConfig.objects.get(name=name)
elif confobj.conftype == "quatt":
genobj = generator.QuattorConfig.objects.get(name=name)
elif confobj.conftype == "laldns":
genobj = generator.LalDnsConfig.objects.get(name=name)
else:
raise InexistantObjectError("Could not find generator: "
+ name)
return genobj
def get_default_generators(conf_type=None):
"""Get every generators marked as default, eventually filtered by
configuration type."""
gens = generator.Config.objects.filter(default=True)
if conf_type:
gens = gens.filter(conftype=conf_type)
res = []
for gen in gens:
tmp = get_generator(gen.name)
if tmp:
res.append(tmp)
return res
def modify_generator(name, default=False, outputfile=None, header=None,
footer=None, checkfile=None, timeout=None, domain=None, pools=None):
"""Modify an existing generator."""
gen = get_generator(name)
if gen is None:
raise InexistantObjectError("Could not find generator: " + name)
logmsg = ""
if default:
gen.default = not gen.default
if gen.default:
logmsg += ", set as default"
else:
logmsg += ", removed default"
if outputfile:
gen.outputfile = outputfile
logmsg += ", new output file (" + str(outputfile) + ")"
if header:
gen.headerfile = header
logmsg += ", new header file (" + str(header) + ")"
if footer:
gen.footerfile = footer
logmsg += ", new footer file (" + str(footer) + ")"
if checkfile:
gen.checkfile = ", ".join(checkfile)
logmsg += ", new check files (" + str(checkfile) + ")"
if timeout:
gen.timeout = timeout
logmsg += ", new timeout (" + str(timeout) + ")"
if domain:
gen.domain = domain
logmsg += ", new domain (" + str(domain) + ")"
if pools:
gen.pool_set.clear()
for pool in pools:
gen.pool_set.add(get_pool(pool))
logmsg += ", new pool (" + str(pool.name) + ")"
# skip the first comma
if logmsg:
logmsg = logmsg[1:]
LOGGER.info("Modified generator " + str(name) + logmsg)
gen.save()
def generate(gen_name=None, pool_name=None, conf_format=None,
output=None, header=None, footer=None, checkfile=None, timeout=None,
domain=None, update=True):
"""Generate a specified configuration file for the addresses in the given
pool. It returns a list of duplicate records found in the checkfile of the
generator."""
pools = []
if pool_name:
for pool in pool_name:
pools.append(get_pool(pool))
gens = []
gen = None
if gen_name:
gen = get_generator(gen_name)
elif not conf_format and not output:
gens = get_default_generators(conf_format)
else:
if conf_format == "bind":
gen = generator.BindConfig.create(outputfile=output, header=header,
footer=footer, checkfile=checkfile, timeout=timeout,
update=update, domain=domain)
elif conf_format == "revbind":
gen = generator.RevBindConfig.create(outputfile=output,
header=header, footer=footer, checkfile=checkfile,
timeout=timeout, update=update, domain=domain)
elif conf_format == "dhcp":
gen = generator.DhcpdConfig.create(outputfile=output,
header=header, footer=footer, checkfile=checkfile,
update=update, domain=domain)
elif conf_format == "quattor":
gen = generator.QuattorConfig.create(outputfile=output,
header=header, footer=footer, checkfile=checkfile,
update=update, domain=domain)
elif conf_format == "laldns":
gen = generator.LalDnsConfig.create(outputfile=output,
header=header, footer=footer, checkfile=checkfile,
update=update, domain=domain)
else:
raise ConfigurationFormatError(
"Unknown configuration format: " + str(conf_format))
# Individual generators are treated as a list of one generator
if (not gens) and gen:
gens = [gen]
duplicates = []
for gen in gens:
if "output" not in gen.__dict__ or not gen.output:
gen.load()
relatedpools = models.Pool.objects.filter(generator__name=gen.name,
generator__conftype=gen.conftype)
if relatedpools:
pools = list(relatedpools)
genpools = []
if not pools:
pools = models.Pool.objects.all()
for pool in pools:
hosts = []
for addr in models.Address.objects.filter(pool=pool):
if addr.host:
mx_record = ""
if models.Property.objects.filter(
name="mx", host=addr.host):
mx_record = models.Property.objects.get(
name="mx", host=addr.host).value
hosts.append((addr.host,
addr, addr.host.alias_set.all(), mx_record))
genpools.append((pool, hosts))
gen.backup()
poolmsg = ""
if pools:
poolmsg = " for pool " + ", ".join([pool.name for pool in pools])
if update:
LOGGER.info("Update configuration with generator " + str(gen)
+ poolmsg)
duplicates.extend(gen.updateconf(genpools))
else:
LOGGER.info("Create new configuration with generator " + str(gen)
+ poolmsg)
duplicates.extend(gen.createconf(genpools))
for dup_host, dup_file, dup_line in duplicates:
LOGGER.warn("Duplicate record: a record already exists for "
+ "host " + str(dup_host) + " in file " + dup_file
+ " at line " + str(dup_line))
return duplicates
def allocate_address(pool, host=None, address=None, random=False,
category=None, duration=None):
"""Allocate a new address from *pool* to *host*."""
if not pool:
if address:
for poolobj in models.Pool.objects.all():
poolobj._update()
if (poolobj.addr_range is not None
and address in poolobj.addr_range):
pool = poolobj
break
elif category:
print("foo")
pool = get_pool(None, category)
else:
raise MissingParameterError("Could not find a pool for the given"
" pool name, category or address.")
addr = None
if host:
if address:
addr = pool.allocate(address, host)
else:
pools = [pool]
if category:
for poolobj in models.Pool.objects.all():
if (poolobj != pool
and category in pool.category.split(",")):
pools.append(pool)
for poolobj in pools:
try:
if random:
addr = poolobj.get_rand()
else:
addr = poolobj.get()
if addr:
break
except models.FullPoolError:
pass
if addr:
LOGGER.info("Assign address " + str(addr) + " to host "
+ str(host))
addr.host = host
emptyaddrs = host.address_set.exclude(macaddr="").filter(addr="")
if emptyaddrs:
addr.macaddr = emptyaddrs[0].macaddr
emptyaddrs[0].delete()
addr.save()
else:
if category:
msg = ("No address available in pools from category "
+ category)
else:
msg = "No address available in pool " + pool.name
LOGGER.error(msg)
raise models.FullPoolError(msg)
else:
addr = pool.allocate(address)
LOGGER.info("Reserve address " + str(addr) + " in pool " + pool.name)
if duration:
addr.duration = (datetime.datetime.now() +
datetime.timedelta(days=duration))
addr.save()
return addr
def create_host(host, pool=None, address=None, mac=None, random=False,
alias=None, category=None, serial="", inventory="", duration=None,
nodns=False):
"""Create a new host and assign it the first element of addesses or
automatically one from the given pool, eventually random."""
#validation
if not host:
raise MissingParameterError(
"You must provide a name for the new host.")
if not isValidHostname(hostname=host):
raise PropertyFormatError(
"You must provide a valid name (without space, special character) for the new host.")
if models.Host.objects.filter(name=host):
raise DuplicateObjectError("Host as the host name [" + host + "] already exists.")
#anomalie9
if models.Alias.objects.filter(name=host):
raise DuplicateObjectError("A alias as the host name [" + host + "] already exists.")
if alias:
for alia in alias:
if not isValidHostname(hostname=alia):
raise PropertyFormatError("You must provide a valid alias name (without space, special character) for the new host.")
if models.Alias.objects.filter(name=alia):
raise DuplicateObjectError("A alias as alias name [" + str(alia) + "] already exists.")
if models.Host.objects.filter(name=alia):
raise DuplicateObjectError("A Host as alias name [" + str(alia) + "] already exists.")
if host==alia:
raise DuplicateObjectError("Host should not be equel to a alias name [" + str(alia) + "].")
#fin anomalie9
hostobj = models.Host(name=host)
if not alias:
alias = []
logmac = ""
if mac:
logmac = " (mac: " + str(mac) + ")"
LOGGER.info("Create new host \"" + str(host) + logmac + "\".")
if category:
hostobj.category = category
if serial:
hostobj.serial = serial
if inventory:
hostobj.inventory = inventory
if nodns:
hostobj.nodns = True
hostobj.save()
for alia in alias:
if models.Alias.objects.filter(name=alia):
LOGGER.warn("Alias " + str(alia) + " already exists and refers to "
+ models.Alias.objects.get(name=alia).host.name)
else:
aliasobj = models.Alias(name=alia, host=hostobj)
aliasobj.save()
if pool or category or address:
addrobj = allocate_address(pool, hostobj, address, random, category)
pool = addrobj.pool
addrres = str(addrobj)
if duration:
addrobj.duration = (
addrobj.date + datetime.timedelta(days=duration))
addrobj.save()
if mac:
if addrobj:
addrobj.macaddr = mac
addrobj.save()
elif hostobj.address_set.all():
first_addr = hostobj.address_set.all()[0]
first_addr.macaddr = mac
first_addr.save()
LOGGER.info("Assigned address " + addrres + " to " + str(host)
+ " from pool " + pool.name)
return str(hostobj), addrres
else:
if mac:
addrobj = models.Address(macaddr=mac, host=hostobj)
addrobj.save()
return str(hostobj), None
def delete(pool=None, addresses=None, hosts=None):
"""Delete objects from the database: address, host or pool."""
if addresses:
for addr in addresses:
if not models.Address.objects.filter(addr=addr):
raise InexistantObjectError("The addresse \"" + addr
+ "\" was not found in the database")
else:
if pool is None:
pool = models.Address.objects.get(addr=addr).pool
addrobj = models.Address.objects.get(addr=addr)
if addrobj.macaddr:
newaddr = models.Address(macaddr=addrobj.macaddr,
host=addrobj.host, allocated=False)
newaddr.save()
LOGGER.info("Delete address " + str(addr) + " from pool "
+ str(pool.name))
pool.free(addr)
elif hosts:
for host in hosts:
hostobj = models.Host.objects.get(name=host)
# addresses are automatically deleted = considered as free
addrs = []
for addr in models.Address.objects.filter(host=hostobj):
if addr.addr:
addrs.append(addr.addr)
LOGGER.info("Delete host " + str(hostobj)
+ ", releasing addresses: " + ", ".join(addrs))
hostobj.delete()
elif pool:
# addresses are automatically deleted
LOGGER.info("Delete pool " + str(pool))
pool.delete()
def modify(pools=None, host=None, category=None, address=None, mac=None,
newname=None, alias=None, serial="", inventory="", duration=None,
lastuse=None, nodns=False, comment="", clearalias=False):
"""Modify the name of an object in the database."""
poolobjs = []
if not alias:
alias = []
if pools:
for pool in pools:
poolobjs.append(get_pool(pool))
hostobj = get_host(host)
addrobj = None
if models.Address.objects.filter(addr=address):
addrobj = models.Address.objects.get(addr=address)
if address and addrobj and (mac or duration or lastuse or comment):
if mac:
addrobj.macaddr = mac
LOGGER.info("Modify address " + str(addrobj) + ": assign MAC "
+ mac)
if duration:
addrobj.duration = (datetime.datetime.now() +
datetime.timedelta(days=duration))
LOGGER.info("Modify address " + str(addrobj)
+ ": new duration untill: " + str(addrobj.duration))
if lastuse:
addrobj.lastuse = datetime.datetime.fromtimestamp(lastuse)
if comment:
addrobj.comment = comment
addrobj.save()
elif host and hostobj:
if not (newname or mac or alias or serial or inventory
or nodns or clearalias):
raise MissingParameterError("Please provide the new name "
+ "or a new information for the host " + hostobj.name)
if mac:
addrs = hostobj.address_set.all()
LOGGER.info("Assign new MAC address " + mac + " to host " + host)
if addrs:
addrs[0].macaddr = mac
else:
addrs = [models.Address(macaddr=mac, host=hostobj)]
addrs[0].save()
if serial:
hostobj.serial = serial
LOGGER.info("Changed host " + hostobj.name + ": new serial: "
+ serial)
hostobj.save()
if inventory:
hostobj.inventory = inventory
LOGGER.info("Changed host " + hostobj.name
+ ": new inventory number: " + inventory)
hostobj.save()
if newname:
#anomalie de hostname avec espace
if not isValidHostname(hostname=newname):
raise PropertyFormatError(
"You must provide a valid name (without space, special character) for the new host.")
#anomalie9
#verifier si le new hostname a le meme alias dans les nouveaux alias et ancienne liste de alias.
#la nouvelle liste
if alias:
for alia in alias:
if alia[0] != '-' and alia[0] != '%':
if (alia == newname):
raise DuplicateObjectError("Host [" + newname + "] is the same as the new alias.")
#le nouveau hostname ne faut pas existe dans la liste de tous les alias de tous les host
if models.Alias.objects.filter(name=newname):
raise DuplicateObjectError("Host [" + newname + "] already exists in the list of alias.")
#le nouveau hostname ne faut pas existe dans la liste tous les host
if models.Host.objects.filter(name=newname):
raise DuplicateObjectError("Host [" + newname + "] already exists.")
#fin anomalie9
LOGGER.info("Changed name of host " + hostobj.name + " to " + newname)
hostobj.name = newname
hostobj.save()
if category:
category = category[0]
LOGGER.info("Changed category of host " + hostobj.name + " to "
+ category)
hostobj.category = category
hostobj.save()
if nodns:
LOGGER.info("Changed NODNS setting of host " + hostobj.name)
hostobj.nodns = not hostobj.nodns
hostobj.save()
if clearalias:
models.Alias.objects.filter(host=hostobj).delete()
LOGGER.info("Cleared all aliases for host " + host)
elif alias:
for alia in alias:
# % was introduced because of argparse, which think some
# argument beginning by - is an option...
# http://bugs.python.org/issue9334
if alia[0] == '-' or alia[0] == '%':
alia = alia[1:]
if models.Alias.objects.filter(name=alia, host=hostobj):
models.Alias.objects.filter(name=alia,
host=hostobj).delete()
LOGGER.info("Deleted alias \"" + alia + "\" for host "
+ host)
else:
if not isValidHostname(hostname=alia):
raise PropertyFormatError(
"You must provide a valid name (without space, special character) for the new host.")
#le nouveau alias exist pas dans la liste de alias
if models.Alias.objects.filter(name=alia):
raise DuplicateObjectError("Alias [" + alia + "] already exists.")
#anomalie9
#le nouveau alias exist pas dans la liste de host
elif models.Host.objects.filter(name=alia):
raise DuplicateObjectError("Host already exists as alias [" + alia + "]")
#le nouveau alias n'est pas identique que le nouveau hostname
elif newname and (alia == newname):
raise DuplicateObjectError("The new alias [" + alia + "] is the same as the new host name.")
#fin anomalie9
else:
LOGGER.info("New alias " + alia + " for host " + host)
aliasobj = models.Alias(name=alia, host=hostobj)
aliasobj.save()
elif pools and poolobjs:
poolobj = poolobjs[0]
if not category and not newname:
raise MissingParameterError("Please provide the new name or a "
+ "category for the pool " + poolobj.name)
if category:
category = ",".join(category)
LOGGER.info("Changed category of pool " + poolobj.name + " to "
+ category)
poolobj.category = category
if newname:
LOGGER.info("Changed namme of pool " + poolobj.name + " to "
+ newname)
poolobj.name = newname
poolobj.save()
else:
raise InexistantObjectError(
"Could not find the object to modify or wrong action.")
def quick_set_prop(prop=None, pool=None, host=None, del_=False):
"""Parse the property set format value=key and set the property."""
if del_:
set_prop(prop, None, pool, host, del_)
else:
if prop.find("=") < 0:
raise PropertyFormatError("Property format is property=value.")
prop_name = prop[:prop.find("=")]
prop_value = prop[prop.find("=") + 1:]
set_prop(prop_name, prop_value, pool, host, del_)
def set_prop(name=None, value=None, pool=None, host=None, del_=False):
"""Set or change a property."""
hostobj = None
poolobj = None
if not name:
raise MissingParameterError(
"You must specify a name for the property to set or delete.")
if host:
hostobj = get_host(host)
if del_:
LOGGER.info("Deleted property " + name + " of host " + str(host))
else:
LOGGER.info("Changed property " + name + " of host " + str(host) +
" to " + value)
elif pool is not None:
poolobj = get_pool(pool)
if del_:
LOGGER.info("Deleted property " + name + " of pool " + pool)
else:
LOGGER.info("Changed property " + name + " of pool " + pool
+ " to " + value)
else:
raise MissingParameterError(
"You must specify a pool or a host name to set the property of.")
if poolobj:
props = models.Property.objects.filter(pool=poolobj)
elif hostobj:
props = models.Property.objects.filter(host=hostobj)
else:
raise MissingParameterError(
"You must specify a pool or a host name to set the property of.")
if del_:
if props.filter(name=name):
props.get(name=name).delete()
else:
raise InexistantObjectError("Could not find property: " + name)
else:
if props.filter(name=name):
prop_obj = props.get(name=name)
prop_obj.value = value
else:
prop_obj = models.Property(name=name, value=value,
host=hostobj, pool=poolobj)
prop_obj.save()
def sort_addresses(addrs):
"""Sort addresses given as argument in place, all addresses must belong to
the same pool."""
if not addrs or not addrs[0].pool:
return addrs
if not addrs[0].pool.addr_range:
addrs[0].pool._update()
sortablefunc = addrs[0].pool.addr_range.sortable
return sorted(addrs, key=sortablefunc)
def set_log_author(author):
"""Set name of the user for logging."""
DBLOGHANDLER.author = author
def delete_logs(days=0):
"""Delete log entries older than *date*."""
if days != 0:
models.LogEntry.objects.filter(date__lt=datetime.datetime.now()
- datetime.timedelta(days=days)).delete()
def export(cmd):
"""Export SLAM's command allowing to recreate the current database from
scratch."""
res = ""
for pool in models.Pool.objects.all():
option = ""
if pool.addr_range_str:
option += " -p " + pool.addr_range_str
if pool.category:
option += " -c " + pool.category
res += cmd + " -a create -pn " + pool.name + option + "\n"
allocated = []
for host in models.Host.objects.all():
option = ""
if host.category:
option += " -c " + host.category
if host.serial:
option += " --serial " + host.serial
if host.inventory:
option += " --inventory " + host.inventory
for alias in models.Alias.objects.filter(host=host):
option += " --alias " + alias.name
for addr in models.Address.objects.filter(host=host):
if addr.addr:
option += " -A " + addr.addr
allocated.append(addr.addr)
break
res += cmd + " -a create -H " + host.name + option + "\n"
for prop in models.Property.objects.all():
option = ""
if prop.pool:
option += " -pn " + prop.pool.name
elif prop.host:
option += " -H " + prop.host.name
res += (cmd + " -a setprop " + prop.name + "=" + prop.value
+ option + "\n")
for addr in models.Address.objects.all():
if addr.macaddr and addr.host:
res += (cmd + " -a modify -H " + addr.host.name
+ " -m " + addr.macaddr + "\n")
if not addr.addr:
continue
option = ""
if addr.host:
option += " -H " + addr.host.name
if addr.pool:
option += " -pn " + addr.pool.name
if addr.duration:
option += " --duration " + int(
addr.duration - datetime.datetime.now().total_seconds())
if addr.lastuse:
option += " --last-use " + addr.lastuse.strftime("%s")
if addr.addr in allocated:
if addr.duration or addr.lastuse:
res += cmd + " -a modify -A " + addr.addr + option + "\n"
elif not addr.host:
res += cmd + " -a create -A " + addr.addr + option + "\n"
else:
res += cmd + " -a get -A " + addr.addr + option + "\n"
for gen in models.Config.objects.all():
type_ = ""
if not gen.conftype:
continue
if gen.conftype == "bind":
type_ = "bind"
elif gen.conftype == "rbind":
type_ = "revbind"
elif gen.conftype == "dhcp":
type_ = "dhcp"
elif gen.conftype == "quatt":
type_ = "quattor"
elif gen.conftype == "laldns":
type_ = "laldns"
else:
continue
option = ""
if gen.default:
option += "--default"
if gen.outputfile:
option += " -o " + gen.outputfile
if gen.headerfile:
for head in gen.headerfile.split(","):
option += " --header " + head
if gen.footerfile:
for foot in gen.footerfile.split(","):
option += " --footer " + foot
if gen.checkfile:
for check in gen.checkfile.split(","):
option += " --checkfile " + check
if gen.timeout:
option += " --timeout " + gen.timeout
if gen.domain:
option += " --domain " + gen.domain
res += cmd + " -a create " + type_ + "\n"
return res
| LAL/SLAM | src/slam/interface.py | Python | apache-2.0 | 32,758 |
import re
import click
from itertools import imap
from mongors import checks, rs, utils
re_addr_port = re.compile(r"(?P<addr>[^:]*)(?::(?P<port>\d*))?")
def cli():
common(obj={})
def sanitize_instances(ctx, param, value):
"""Sanitize instances: add default port and remove duplicates."""
instances = []
for address, port in (
re_addr_port.match(instance).groups()
for instance in value
):
port = port and port or 27017
if not (address, port) in instances:
instances.append((address, port))
return instances
@click.group()
@click.option("--timeout", "-t", type=click.FLOAT, default=10.0,
help="Timeout to wait for each instance to get up.")
@click.option("instances", '--instance', '-i', multiple=True,
required=True,
callback=sanitize_instances,
help="Replica set instances.")
@click.pass_context
def common(ctx, instances, timeout):
states = checks.wait_for_instances(*instances, timeout=timeout)
if not states[False]:
click.echo(click.style("Instances are ready for replica set.", fg="green"), err=True)
ctx.obj['instances'] = instances
else:
click.echo(
click.style(
"Some instances are not up and running: {}"
.format(', '.join(':'.join(imap(str, state))
for state in states[False])), fg="red"),
err=True
)
exit(1)
@common.command(help="Initiate and return a json status of the replicaset.")
@click.argument("name", required=True)
@click.option("--reconfig", "-r", is_flag=True, help="Reconfig an invalid replica set.")
@click.pass_context
def ensure(ctx, name, reconfig):
replica_set = rs.ReplicaSet(name, *ctx.obj['instances'])
try:
status = replica_set.ensure(reconfig=reconfig)
click.echo(utils.dumps(status))
click.echo(click.style("Replica set up and running", fg="green"), err=True)
except rs.ReplicaSetInvalid:
click.echo(click.style("Replica set is invalid! Try --reconfig option!", fg="red"))
exit(1)
| diefans/MongoRS | src/mongors/scripts/__init__.py | Python | apache-2.0 | 2,158 |
# Lint as: python3
"""A few methods to handle NaNs.
Library functions that take a numpy array.
Assumes dimensions are (location, time).
"""
import itertools
import matplotlib.pyplot as plt
import numpy as np
import scipy
import scipy.ndimage
def _assume_2d(array):
if len(array.shape) != 2:
raise ValueError(
'Functions in this library assume a 2d array with dimensions (location, time).'
)
def fillna_zero(array):
"""Fills all nans with zero."""
return np.nan_to_num(array)
def median_filter(array, filter_days=5):
"""Median filter.
Doesn't guarantee all nans will be filled.
Args:
array: A 2d numpy array with dimensions (location, time)
filter_days: An odd integer >= 1, size of the filter.
Returns:
A numpy array.
"""
_assume_2d(array)
return scipy.ndimage.generic_filter(
array, np.nanmedian, size=(1, filter_days), mode='nearest')
def mean_filter(array, filter_days=5):
"""Mean filter.
Doesn't guarantee all nans will be filled.
Args:
array: A 2d numpy array with dimensions (location, time)
filter_days: An odd integer >= 1, size of the filter.
Returns:
A numpy array.
"""
_assume_2d(array)
return scipy.ndimage.generic_filter(
array, np.nanmean, size=(1, filter_days), mode='nearest')
def fillna_beginning(array, value=0):
"""Fills consecutive NaNs at the beginning of an array.
Args:
array: A 2d numpy array with dimensions (location, time)
value: Float, the value to fill.
Returns:
A numpy array.
"""
def fillna_beginning_1d(array):
array = array.copy()
num_initial_nans = 0
while np.isnan(array[num_initial_nans]):
num_initial_nans += 1
if num_initial_nans == len(array):
break
array[:num_initial_nans] = value
return array
_assume_2d(array)
return np.apply_along_axis(fillna_beginning_1d, 1, array)
def fillna_ffill(array):
"""Forward fills an array.
Args:
array: A 2d numpy array with dimensions (location, time)
Returns:
A numpy array.
"""
_assume_2d(array)
mask = np.isnan(array)
idx = np.where(~mask, np.arange(mask.shape[1]), 0)
max_idx = np.maximum.accumulate(idx, axis=1)
result = array[np.arange(max_idx.shape[0])[:, None], max_idx]
return result
def fillna_bfill(array):
"""Backward fills an array.
Args:
array: A 2d numpy array with dimensions (location, time)
Returns:
A numpy array.
"""
_assume_2d(array)
flipped = np.flip(array)
filled = fillna_ffill(flipped)
return np.flip(filled)
def fillna_interp(array):
"""Applies 1-d linear interpolation.
Args:
array: A 2d numpy array with dimensions (location, time)
Returns:
A numpy array.
"""
def interp_1d(array):
indices = np.arange(array.shape[0])
values = np.where(np.isfinite(array))
# Interp requires at least 2 non-nan entries
if len(values[0]) < 2:
return array
f = scipy.interpolate.interp1d(
indices[values], array[values], bounds_error=False)
return np.where(np.isfinite(array), array, f(indices))
_assume_2d(array)
return np.apply_along_axis(interp_1d, 1, array)
def plot_nans(array, title, ax=None):
"""Plots nans."""
if ax is None:
plt.figure()
ax = plt.gca()
ax.imshow(np.isnan(array), interpolation='nearest')
ax.set_xlabel('time')
ax.set_ylabel('location')
ax.set_title(title)
def longest_nans(array):
"""Returns the longest consecutive nans in an array.
Args:
array: A 2d numpy array with dimensions (location, time)
Returns:
A numpy array.
"""
def longest_nans_1d(array):
mask = np.isnan(array)
grouped = [(el, sum(1
for element in group))
for el, group in itertools.groupby(mask)]
nan_groups = [g[1] for g in grouped if g[0] == 1]
if not nan_groups:
return 0
return max(nan_groups)
_assume_2d(array)
return np.apply_along_axis(longest_nans_1d, 1, array)
| HopkinsIDD/EpiForecastStatMech | epi_forecast_stat_mech/datasets/nanhandling.py | Python | apache-2.0 | 3,953 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# See: http://effbot.org/tkinterbook/button.htm
import tkinter as tk
root = tk.Tk()
button = tk.Button(root, text="Hello", background="#FF0000", foreground="#000000", activebackground="#0000FF", activeforeground="#FFFFFF")
button.pack()
root.mainloop()
| jeremiedecock/snippets | python/tkinter/python3/button_color.py | Python | mit | 1,422 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
EMAIL_MAX_LENGTH = getattr(settings, 'INVITATIONS_EMAIL_MAX_LENGTH', 254)
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('invitations', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='invitation',
name='inviter',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True, on_delete=django.db.models.deletion.CASCADE),
),
migrations.AlterField(
model_name='invitation',
name='email',
field=models.EmailField(unique=True, max_length=EMAIL_MAX_LENGTH, verbose_name='e-mail address'),
),
]
| EnHatch/django-invitations | invitations/migrations/0002_auto_20151126_0426.py | Python | gpl-3.0 | 893 |
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import requests
import hashlib
import os
import errno
import shutil
import six
import sys
import importlib
import paddle.dataset
import six.moves.cPickle as pickle
import glob
__all__ = [
'DATA_HOME',
'download',
'md5file',
'split',
'cluster_files_reader',
]
DATA_HOME = os.path.expanduser('~/.cache/paddle/dataset')
# When running unit tests, there could be multiple processes that
# trying to create DATA_HOME directory simultaneously, so we cannot
# use a if condition to check for the existence of the directory;
# instead, we use the filesystem as the synchronization mechanism by
# catching returned errors.
def must_mkdirs(path):
try:
os.makedirs(DATA_HOME)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
must_mkdirs(DATA_HOME)
def md5file(fname):
hash_md5 = hashlib.md5()
f = open(fname, "rb")
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
f.close()
return hash_md5.hexdigest()
def download(url, module_name, md5sum, save_name=None):
dirname = os.path.join(DATA_HOME, module_name)
if not os.path.exists(dirname):
os.makedirs(dirname)
filename = os.path.join(dirname,
url.split('/')[-1]
if save_name is None else save_name)
if os.path.exists(filename) and md5file(filename) == md5sum:
return filename
retry = 0
retry_limit = 3
while not (os.path.exists(filename) and md5file(filename) == md5sum):
if os.path.exists(filename):
sys.stderr.write("file %s md5 %s" % (md5file(filename), md5sum))
if retry < retry_limit:
retry += 1
else:
raise RuntimeError("Cannot download {0} within retry limit {1}".
format(url, retry_limit))
sys.stderr.write("Cache file %s not found, downloading %s" %
(filename, url))
r = requests.get(url, stream=True)
total_length = r.headers.get('content-length')
if total_length is None:
with open(filename, 'wb') as f:
shutil.copyfileobj(r.raw, f)
else:
with open(filename, 'wb') as f:
dl = 0
total_length = int(total_length)
for data in r.iter_content(chunk_size=4096):
if six.PY2:
data = six.b(data)
dl += len(data)
f.write(data)
done = int(50 * dl / total_length)
sys.stderr.write("\r[%s%s]" % ('=' * done,
' ' * (50 - done)))
sys.stdout.flush()
sys.stderr.write("\n")
sys.stdout.flush()
return filename
def fetch_all():
for module_name in [
x for x in dir(paddle.dataset) if not x.startswith("__")
]:
if "fetch" in dir(
importlib.import_module("paddle.dataset.%s" % module_name)):
getattr(
importlib.import_module("paddle.dataset.%s" % module_name),
"fetch")()
def split(reader, line_count, suffix="%05d.pickle", dumper=pickle.dump):
"""
you can call the function as:
split(paddle.dataset.cifar.train10(), line_count=1000,
suffix="imikolov-train-%05d.pickle")
the output files as:
|-imikolov-train-00000.pickle
|-imikolov-train-00001.pickle
|- ...
|-imikolov-train-00480.pickle
:param reader: is a reader creator
:param line_count: line count for each file
:param suffix: the suffix for the output files, should contain "%d"
means the id for each file. Default is "%05d.pickle"
:param dumper: is a callable function that dump object to file, this
function will be called as dumper(obj, f) and obj is the object
will be dumped, f is a file object. Default is cPickle.dump.
"""
if not callable(dumper):
raise TypeError("dumper should be callable.")
lines = []
indx_f = 0
for i, d in enumerate(reader()):
lines.append(d)
if i >= line_count and i % line_count == 0:
with open(suffix % indx_f, "w") as f:
dumper(lines, f)
lines = []
indx_f += 1
if lines:
with open(suffix % indx_f, "w") as f:
dumper(lines, f)
def cluster_files_reader(files_pattern,
trainer_count,
trainer_id,
loader=pickle.load):
"""
Create a reader that yield element from the given files, select
a file set according trainer count and trainer_id
:param files_pattern: the files which generating by split(...)
:param trainer_count: total trainer count
:param trainer_id: the trainer rank id
:param loader: is a callable function that load object from file, this
function will be called as loader(f) and f is a file object.
Default is cPickle.load
"""
def reader():
if not callable(loader):
raise TypeError("loader should be callable.")
file_list = glob.glob(files_pattern)
file_list.sort()
my_file_list = []
for idx, fn in enumerate(file_list):
if idx % trainer_count == trainer_id:
print("append file: %s" % fn)
my_file_list.append(fn)
for fn in my_file_list:
with open(fn, "r") as f:
lines = loader(f)
for line in lines:
yield line
return reader
| chengduoZH/Paddle | python/paddle/dataset/common.py | Python | apache-2.0 | 6,339 |
# -*- coding: utf-8 -*-
__author__ = 'chinfeng'
from wsgiref.simple_server import WSGIServer
from wsgiref.util import shift_path_info
from wsgiref import validate
import multiprocessing.pool
from gumpy.deco import *
import logging
logger = logging.getLogger(__name__)
class ThreadPoolWSGIServer(WSGIServer):
def __init__(self, thread_count, *args, **kwds):
super(self.__class__, self).__init__(*args, **kwds)
self._thread_count = thread_count
self._pool = multiprocessing.pool.ThreadPool(self._thread_count)
def process_request_thread(self, request, client_address):
try:
self.finish_request(request, client_address)
self.shutdown_request(request)
except:
self.handle_error(request, client_address)
self.shutdown_request(request)
def process_request(self, request, client_address):
self._pool.apply_async(self.process_request_thread, args=(request, client_address))
def serve_forever(self, *args, **kwds):
self._pool.apply_async(super(self.__class__, self).serve_forever, args=args, kwds=kwds)
def shutdown(self):
super(self.__class__, self).shutdown()
self._pool.terminate()
class TaskPoolWSGIServer(WSGIServer):
def __init__(self, executor, *args, **kwds):
super(self.__class__, self).__init__(*args, **kwds)
self._executor = executor
def process_request_thread(self, request, client_address):
try:
self.finish_request(request, client_address)
self.shutdown_request(request)
except:
self.handle_error(request, client_address)
self.shutdown_request(request)
def process_request(self, request, client_address):
self._executor.submit(self.process_request_thread, request, client_address)
def serve_forever(self, *args):
from threading import Thread
t = Thread(target=super(self.__class__, self).serve_forever)
t.setDaemon(True)
t.start()
@service
class WSGIServer(object):
def __init__(self):
self.daemon = True
self._apps = {}
def on_start(self):
try:
self._conf = self.__context__.configuration
self._port = self._conf.get('port', 8000)
self._mode = self._conf.get('mode', 'coroutine')
from wsgiref.simple_server import make_server
if self._mode == 'coroutine':
sc = functools.partial(TaskPoolWSGIServer, self.__executor__)
else:
sc = functools.partial(ThreadPoolWSGIServer, 20)
self._httpd = make_server('', self._port, self._wsgi_app, server_class=sc)
self._httpd.serve_forever()
except BaseException as e:
logger.error('simple_wsgi_serv httpd fails')
logger.exception(e)
def on_stop(self):
if self._httpd:
self._httpd.shutdown()
del self._httpd
@event
def on_configuration_changed(self, key, value):
if key == 'port':
ctx = self.__context__
ctx.stop().add_done_callback(lambda rt, bdl=ctx: bdl.start())
@bind('wsgi.application')
def wsgi_application(self, app):
validate.validator(app)
if hasattr(app, '__route__'):
self._apps[app.__route__] = app
else:
self._apps[app.__class__.__name__] = app
@wsgi_application.unbind
def unbind_wsgi_application(self, app):
if hasattr(app, '__route__'):
del self._apps[app.__route__]
else:
del self._apps[app.__class__.__name__]
def _wsgi_app(self, environ, start_response):
if self._apps:
app_route = shift_path_info(environ)
if app_route in self._apps:
environ['SCRIPT_NAME'] = ''
return self._apps[app_route](environ, start_response)
start_response('404 NOT FOUND', [('Content-type', 'text/plain'), ])
return ['no application deployed'.encode('utf-8')]
| chinfeng/gumpy | plugins/simple_wsgi_serv.py | Python | lgpl-3.0 | 4,046 |
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <[email protected]>
# Modified: Patrick Galbraith <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
from oslo.config import cfg
from sqlalchemy import (Column, DateTime, String, Text, Integer, ForeignKey,
Enum, Boolean, Unicode, UniqueConstraint, event)
from sqlalchemy.orm import relationship, backref
from designate.openstack.common import log as logging
from designate.openstack.common import timeutils
from designate.sqlalchemy.types import UUID
from designate.sqlalchemy.models import Base as CommonBase
from designate.sqlalchemy.models import SoftDeleteMixin
from designate import utils
from sqlalchemy.ext.declarative import declarative_base
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
RESOURCE_STATUSES = ['ACTIVE', 'PENDING', 'DELETED']
RECORD_TYPES = ['A', 'AAAA', 'CNAME', 'MX', 'SRV', 'TXT', 'SPF', 'NS', 'PTR',
'SSHFP']
TSIG_ALGORITHMS = ['hmac-md5', 'hmac-sha1', 'hmac-sha224', 'hmac-sha256',
'hmac-sha384', 'hmac-sha512']
class Base(CommonBase):
id = Column(UUID, default=utils.generate_uuid, primary_key=True)
version = Column(Integer, default=1, nullable=False)
created_at = Column(DateTime, default=timeutils.utcnow)
updated_at = Column(DateTime, onupdate=timeutils.utcnow)
__mapper_args__ = {
'version_id_col': version
}
__table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
Base = declarative_base(cls=Base)
class Quota(Base):
__tablename__ = 'quotas'
__table_args__ = (
UniqueConstraint('tenant_id', 'resource', name='unique_quota'),
{'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
)
tenant_id = Column(String(36), default=None, nullable=True)
resource = Column(String(32), nullable=False)
hard_limit = Column(Integer(), nullable=False)
class Server(Base):
__tablename__ = 'servers'
name = Column(String(255), nullable=False, unique=True)
class Tld(Base):
__tablename__ = 'tlds'
name = Column(String(255), nullable=False, unique=True)
description = Column(Unicode(160), nullable=True)
class Domain(SoftDeleteMixin, Base):
__tablename__ = 'domains'
__table_args__ = (
UniqueConstraint('name', 'deleted', name='unique_domain_name'),
{'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
)
tenant_id = Column(String(36), default=None, nullable=True)
name = Column(String(255), nullable=False)
email = Column(String(255), nullable=False)
description = Column(Unicode(160), nullable=True)
ttl = Column(Integer, default=CONF.default_ttl, nullable=False)
serial = Column(Integer, default=timeutils.utcnow_ts, nullable=False)
refresh = Column(Integer, default=CONF.default_soa_refresh, nullable=False)
retry = Column(Integer, default=CONF.default_soa_retry, nullable=False)
expire = Column(Integer, default=CONF.default_soa_expire, nullable=False)
minimum = Column(Integer, default=CONF.default_soa_minimum, nullable=False)
status = Column(Enum(name='resource_statuses', *RESOURCE_STATUSES),
nullable=False, server_default='ACTIVE',
default='ACTIVE')
recordsets = relationship('RecordSet',
backref=backref('domain', uselist=False),
cascade="all, delete-orphan",
passive_deletes=True)
parent_domain_id = Column(UUID, ForeignKey('domains.id'), default=None,
nullable=True)
class RecordSet(Base):
__tablename__ = 'recordsets'
__table_args__ = (
UniqueConstraint('domain_id', 'name', 'type', name='unique_recordset'),
{'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
)
tenant_id = Column(String(36), default=None, nullable=True)
domain_id = Column(UUID, ForeignKey('domains.id', ondelete='CASCADE'),
nullable=False)
name = Column(String(255), nullable=False)
type = Column(Enum(name='record_types', *RECORD_TYPES), nullable=False)
ttl = Column(Integer, default=None, nullable=True)
description = Column(Unicode(160), nullable=True)
records = relationship('Record',
backref=backref('recordset', uselist=False),
cascade="all, delete-orphan",
passive_deletes=True)
class Record(Base):
__tablename__ = 'records'
tenant_id = Column(String(36), default=None, nullable=True)
domain_id = Column(UUID, ForeignKey('domains.id', ondelete='CASCADE'),
nullable=False)
recordset_id = Column(UUID,
ForeignKey('recordsets.id', ondelete='CASCADE'),
nullable=False)
data = Column(Text, nullable=False)
priority = Column(Integer, default=None, nullable=True)
description = Column(Unicode(160), nullable=True)
hash = Column(String(32), nullable=False, unique=True)
managed = Column(Boolean, default=False)
managed_extra = Column(Unicode(100), default=None, nullable=True)
managed_plugin_type = Column(Unicode(50), default=None, nullable=True)
managed_plugin_name = Column(Unicode(50), default=None, nullable=True)
managed_resource_type = Column(Unicode(50), default=None, nullable=True)
managed_resource_region = Column(Unicode(100), default=None, nullable=True)
managed_resource_id = Column(UUID, default=None, nullable=True)
managed_tenant_id = Column(Unicode(36), default=None, nullable=True)
status = Column(Enum(name='resource_statuses', *RESOURCE_STATUSES),
nullable=False, server_default='ACTIVE',
default='ACTIVE')
def recalculate_hash(self):
"""
Calculates the hash of the record, used to ensure record uniqueness.
"""
md5 = hashlib.md5()
md5.update("%s:%s:%s" % (self.recordset_id, self.data, self.priority))
self.hash = md5.hexdigest()
@event.listens_for(Record, "before_insert")
def recalculate_record_hash_before_insert(mapper, connection, instance):
instance.recalculate_hash()
@event.listens_for(Record, "before_update")
def recalculate_record_hash_before_update(mapper, connection, instance):
instance.recalculate_hash()
class TsigKey(Base):
__tablename__ = 'tsigkeys'
name = Column(String(255), nullable=False, unique=True)
algorithm = Column(Enum(name='tsig_algorithms', *TSIG_ALGORITHMS),
nullable=False)
secret = Column(String(255), nullable=False)
class Blacklists(Base):
__tablename__ = 'blacklists'
pattern = Column(String(255), nullable=False, unique=True)
description = Column(Unicode(160), nullable=True)
| richm/designate | designate/storage/impl_sqlalchemy/models.py | Python | apache-2.0 | 7,386 |
# ********************************************************************** <====
from .. import InvalidArgumentError, TransportError
# ********************************************************************** ====>
import os.path
# ---------------------------------------------------------------------
# Read-write transports inherit from BaseWTransport. They need to support:
# * get
# * otype
# * init_base
# * put
# * delete
# * rename
# * folder_create
class BaseWTransport( object ):
"""
A parent class providing some common functionality for R/W tranports
"""
def folder_ensure( self, folder ):
"""Ensure a path exists in the repository. Create it if not"""
if not folder:
raise InvalidArgumentError('empty folder')
t = self.otype( folder )
if t == 'D':
return
elif t == 'F':
raise TransportError( "can't make folder: %s is a file" % folder )
(parent,name) = os.path.split( folder )
if parent:
self.folder_ensure( parent )
self.folder_create( folder )
def exists( self, path ):
"""
Test if a path exists in the repository
"""
check = self.otype( path )
return check in ('F','D')
def update( self, source, destname ):
"""
Create or update a file in the repository, as atomically as possible
"""
self.put( source, destname + '.new' )
self.rename( destname + '.new', destname )
| paulovn/artifact-manager | lib/artmgr/transport/basew.py | Python | gpl-2.0 | 1,523 |
# Copyright 2012, Contrail Systems, Inc.
#
"""
.. attention:: Fix the license string
"""
import requests
import re
import uuid
import json
import time
import socket
import netaddr
from netaddr import IPNetwork, IPSet, IPAddress
import gevent
import bottle
from neutron.common import constants
from neutron.common import exceptions
from neutron.api.v2 import attributes as attr
from cfgm_common import exceptions as vnc_exc
from vnc_api.vnc_api import *
_DEFAULT_HEADERS = {
'Content-type': 'application/json; charset="UTF-8"', }
# TODO find if there is a common definition
CREATE = 1
READ = 2
UPDATE = 3
DELETE = 4
IP_PROTOCOL_MAP = {constants.PROTO_NUM_TCP: constants.PROTO_NAME_TCP,
constants.PROTO_NUM_UDP: constants.PROTO_NAME_UDP,
constants.PROTO_NUM_ICMP: constants.PROTO_NAME_ICMP}
# SNAT defines
SNAT_SERVICE_TEMPLATE_FQ_NAME = ['default-domain', 'netns-snat-template']
_IFACE_ROUTE_TABLE_NAME_PREFIX = 'NEUTRON_IFACE_RT'
class DBInterface(object):
"""
An instance of this class forwards requests to vnc cfg api (web)server
"""
Q_URL_PREFIX = '/extensions/ct'
def __init__(self, admin_name, admin_password, admin_tenant_name,
api_srvr_ip, api_srvr_port, user_info=None,
contrail_extensions_enabled=True,
list_optimization_enabled=False,
apply_subnet_host_routes=False):
self._api_srvr_ip = api_srvr_ip
self._api_srvr_port = api_srvr_port
self._apply_subnet_host_routes = apply_subnet_host_routes
self._contrail_extensions_enabled = contrail_extensions_enabled
self._list_optimization_enabled = list_optimization_enabled
# Retry till a api-server is up
connected = False
while not connected:
try:
# TODO remove hardcode
self._vnc_lib = VncApi(admin_name, admin_password,
admin_tenant_name, api_srvr_ip,
api_srvr_port, '/', user_info=user_info)
connected = True
except requests.exceptions.RequestException as e:
gevent.sleep(3)
# TODO remove this backward compat code eventually
# changes 'net_fq_name_str pfx/len' key to 'net_id pfx/len' key
subnet_map = self._vnc_lib.kv_retrieve(key=None)
for kv_dict in subnet_map:
key = kv_dict['key']
if len(key.split()) == 1:
subnet_id = key
# uuid key, fixup value portion to 'net_id pfx/len' format
# if not already so
if len(kv_dict['value'].split(':')) == 1:
# new format already, skip
continue
net_fq_name = kv_dict['value'].split()[0].split(':')
try:
net_obj = self._virtual_network_read(fq_name=net_fq_name)
except NoIdError:
self._vnc_lib.kv_delete(subnet_id)
continue
new_subnet_key = '%s %s' % (net_obj.uuid,
kv_dict['value'].split()[1])
self._vnc_lib.kv_store(subnet_id, new_subnet_key)
else: # subnet key
if len(key.split()[0].split(':')) == 1:
# new format already, skip
continue
# delete old key, convert to new key format and save
old_subnet_key = key
self._vnc_lib.kv_delete(old_subnet_key)
subnet_id = kv_dict['value']
net_fq_name = key.split()[0].split(':')
try:
net_obj = self._virtual_network_read(fq_name=net_fq_name)
except NoIdError:
continue
new_subnet_key = '%s %s' % (net_obj.uuid, key.split()[1])
self._vnc_lib.kv_store(new_subnet_key, subnet_id)
#end __init__
# Helper routines
def _request_api_server(self, url, method, data=None, headers=None):
if method == 'GET':
return requests.get(url)
if method == 'POST':
return requests.post(url, data=data, headers=headers)
if method == 'DELETE':
return requests.delete(url)
#end _request_api_server
def _relay_request(self, request):
"""
Send received request to api server
"""
# chop neutron parts of url and add api server address
url_path = re.sub(self.Q_URL_PREFIX, '', request.environ['PATH_INFO'])
url = "http://%s:%s%s" % (self._api_srvr_ip, self._api_srvr_port,
url_path)
return self._request_api_server(
url, request.environ['REQUEST_METHOD'],
request.body, {'Content-type': request.environ['CONTENT_TYPE']})
#end _relay_request
def _validate_project_ids(self, context, project_ids):
if context and not context['is_admin']:
return [context['tenant']]
return_project_ids = []
for project_id in project_ids:
try:
return_project_ids.append(str(uuid.UUID(project_id)))
except ValueError:
continue
return return_project_ids
def _obj_to_dict(self, obj):
return self._vnc_lib.obj_to_dict(obj)
#end _obj_to_dict
def _get_plugin_property(self, property_in):
fq_name=['default-global-system-config'];
gsc_obj = self._vnc_lib.global_system_config_read(fq_name);
plugin_settings = gsc_obj.plugin_tuning.plugin_property
for each_setting in plugin_settings:
if each_setting.property == property_in:
return each_setting.value
return None
#end _get_plugin_property
def _ensure_instance_exists(self, instance_id):
instance_name = instance_id
instance_obj = VirtualMachine(instance_name)
try:
id = self._vnc_lib.obj_to_id(instance_obj)
instance_obj = self._vnc_lib.virtual_machine_read(id=id)
except NoIdError: # instance doesn't exist, create it
# check if instance_id is a uuid value or not
try:
uuid.UUID(instance_id)
instance_obj.uuid = instance_id
except ValueError:
# if instance_id is not a valid uuid, let
# virtual_machine_create generate uuid for the vm
pass
self._vnc_lib.virtual_machine_create(instance_obj)
return instance_obj
#end _ensure_instance_exists
def _ensure_default_security_group_exists(self, proj_id):
# check in api server
proj_obj = self._vnc_lib.project_read(id=proj_id)
sg_groups = proj_obj.get_security_groups()
for sg_group in sg_groups or []:
if sg_group['to'][-1] == 'default':
return
# does not exist hence create and add cache
sg_uuid = str(uuid.uuid4())
sg_obj = SecurityGroup(name='default', parent_obj=proj_obj)
sg_obj.uuid = sg_uuid
self._vnc_lib.security_group_create(sg_obj)
#allow all egress traffic
def_rule = {}
def_rule['port_range_min'] = 0
def_rule['port_range_max'] = 65535
def_rule['direction'] = 'egress'
def_rule['remote_ip_prefix'] = '0.0.0.0/0'
def_rule['remote_group_id'] = None
def_rule['protocol'] = 'any'
rule = self._security_group_rule_neutron_to_vnc(def_rule, CREATE)
self._security_group_rule_create(sg_obj.uuid, rule)
#allow ingress traffic from within default security group
def_rule = {}
def_rule['port_range_min'] = 0
def_rule['port_range_max'] = 65535
def_rule['direction'] = 'ingress'
def_rule['remote_ip_prefix'] = '0.0.0.0/0'
def_rule['remote_group_id'] = None
def_rule['protocol'] = 'any'
rule = self._security_group_rule_neutron_to_vnc(def_rule, CREATE)
self._security_group_rule_create(sg_obj.uuid, rule)
#end _ensure_default_security_group_exists
def _get_obj_tenant_id(self, q_type, obj_uuid):
# Seed the cache and return
if q_type == 'port':
port_obj = self._virtual_machine_interface_read(obj_uuid)
if port_obj.parent_type != "project":
net_id = port_obj.get_virtual_network_refs()[0]['uuid']
# recurse up type-hierarchy
tenant_id = self._get_obj_tenant_id('network', net_id)
else:
tenant_id = port_obj.parent_uuid.replace('-', '')
return tenant_id
if q_type == 'network':
net_obj = self._virtual_network_read(net_id=obj_uuid)
tenant_id = net_obj.parent_uuid.replace('-', '')
return tenant_id
return None
#end _get_obj_tenant_id
def _project_read(self, proj_id=None, fq_name=None):
proj_obj = self._vnc_lib.project_read(id=proj_id, fq_name=fq_name)
return proj_obj
#end _project_read
def _get_tenant_id_for_create(self, context, resource):
if context['is_admin'] and 'tenant_id' in resource:
tenant_id = resource['tenant_id']
elif ('tenant_id' in resource and
resource['tenant_id'] != context['tenant_id']):
reason = _('Cannot create resource for another tenant')
self._raise_contrail_exception('AdminRequired', reason=reason)
else:
tenant_id = context['tenant_id']
return tenant_id
# Encode and send an excption information to neutron. exc must be a
# valid exception class name in neutron, kwargs must contain all
# necessary arguments to create that exception
def _raise_contrail_exception(self, exc, **kwargs):
exc_info = {'exception': exc}
exc_info.update(kwargs)
bottle.abort(400, json.dumps(exc_info))
#end _raise_contrail_exception
def _security_group_rule_create(self, sg_id, sg_rule):
try:
sg_vnc = self._vnc_lib.security_group_read(id=sg_id)
except NoIdError:
self._raise_contrail_exception('SecurityGroupNotFound', id=sg_id)
rules = sg_vnc.get_security_group_entries()
if rules is None:
rules = PolicyEntriesType([sg_rule])
else:
rules.add_policy_rule(sg_rule)
sg_vnc.set_security_group_entries(rules)
self._vnc_lib.security_group_update(sg_vnc)
return
#end _security_group_rule_create
def _security_group_rule_find(self, sgr_id):
dom_projects = self._project_list_domain(None)
for project in dom_projects:
proj_id = project['uuid']
project_sgs = self._security_group_list_project(proj_id)
for sg_obj in project_sgs:
sgr_entries = sg_obj.get_security_group_entries()
if sgr_entries == None:
continue
for sg_rule in sgr_entries.get_policy_rule():
if sg_rule.get_rule_uuid() == sgr_id:
return sg_obj, sg_rule
return None, None
#end _security_group_rule_find
def _security_group_rule_delete(self, sg_obj, sg_rule):
rules = sg_obj.get_security_group_entries()
rules.get_policy_rule().remove(sg_rule)
sg_obj.set_security_group_entries(rules)
self._vnc_lib.security_group_update(sg_obj)
return
#end _security_group_rule_delete
def _security_group_delete(self, sg_id):
self._vnc_lib.security_group_delete(id=sg_id)
#end _security_group_delete
def _svc_instance_create(self, si_obj):
try:
si_uuid = self._vnc_lib.service_instance_create(si_obj)
except RefsExistError as e:
self._raise_contrail_exception('BadRequest',
resource='svc_instance', msg=str(e))
st_fq_name = ['default-domain', 'nat-template']
st_obj = self._vnc_lib.service_template_read(fq_name=st_fq_name)
si_obj.set_service_template(st_obj)
self._vnc_lib.service_instance_update(si_obj)
return si_uuid
#end _svc_instance_create
def _svc_instance_delete(self, si_id):
self._vnc_lib.service_instance_delete(id=si_id)
#end _svc_instance_delete
def _route_table_create(self, rt_obj):
rt_uuid = self._vnc_lib.route_table_create(rt_obj)
return rt_uuid
#end _route_table_create
def _route_table_delete(self, rt_id):
self._vnc_lib.route_table_delete(id=rt_id)
#end _route_table_delete
def _resource_create(self, resource_type, obj):
create_method = getattr(self._vnc_lib, resource_type + '_create')
try:
obj_uuid = create_method(obj)
except RefsExistError:
obj.uuid = str(uuid.uuid4())
obj.name += '-' + obj.uuid
obj.fq_name[-1] += '-' + obj.uuid
obj_uuid = create_method(obj)
except PermissionDenied as e:
self._raise_contrail_exception('BadRequest',
resource=resource_type, msg=str(e))
return obj_uuid
#end _resource_create
def _virtual_network_read(self, net_id=None, fq_name=None, fields=None):
net_obj = self._vnc_lib.virtual_network_read(id=net_id,
fq_name=fq_name,
fields=fields)
return net_obj
#end _virtual_network_read
def _virtual_network_update(self, net_obj):
try:
self._vnc_lib.virtual_network_update(net_obj)
except (PermissionDenied, RefsExistError) as e:
self._raise_contrail_exception('BadRequest',
resource='network', msg=str(e))
# read back to get subnet gw allocated by api-server
fq_name_str = json.dumps(net_obj.get_fq_name())
#end _virtual_network_update
def _virtual_network_delete(self, net_id):
fq_name_str = None
try:
net_obj = self._vnc_lib.virtual_network_read(id=net_id)
fq_name_str = json.dumps(net_obj.get_fq_name())
except NoIdError:
return
try:
if net_obj.get_floating_ip_pools():
fip_pools = net_obj.get_floating_ip_pools()
for fip_pool in fip_pools:
self._floating_ip_pool_delete(fip_pool_id=fip_pool['uuid'])
self._vnc_lib.virtual_network_delete(id=net_id)
except RefsExistError:
self._raise_contrail_exception('NetworkInUse', net_id=net_id)
#end _virtual_network_delete
def _virtual_network_list(self, parent_id=None, obj_uuids=None,
fields=None, detail=False, count=False):
return self._vnc_lib.virtual_networks_list(
parent_id=parent_id,
obj_uuids=obj_uuids,
fields=fields,
detail=detail,
count=count)
#end _virtual_network_list
def _virtual_machine_interface_read(self, port_id=None, fq_name=None,
fields=None):
back_ref_fields = ['logical_router_back_refs', 'instance_ip_back_refs', 'floating_ip_back_refs']
if fields:
n_extra_fields = list(set(fields + back_ref_fields))
else:
n_extra_fields = back_ref_fields
port_obj = self._vnc_lib.virtual_machine_interface_read(
id=port_id, fq_name=fq_name, fields=n_extra_fields)
return port_obj
#end _virtual_machine_interface_read
def _virtual_machine_interface_update(self, port_obj):
self._vnc_lib.virtual_machine_interface_update(port_obj)
#end _virtual_machine_interface_update
def _virtual_machine_interface_delete(self, port_id):
self._vnc_lib.virtual_machine_interface_delete(id=port_id)
#end _virtual_machine_interface_delete
def _virtual_machine_interface_list(self, parent_id=None, back_ref_id=None,
obj_uuids=None, fields=None):
back_ref_fields = ['logical_router_back_refs', 'instance_ip_back_refs', 'floating_ip_back_refs']
if fields:
n_extra_fields = list(set(fields + back_ref_fields))
else:
n_extra_fields = back_ref_fields
vmi_objs = self._vnc_lib.virtual_machine_interfaces_list(
parent_id=parent_id,
back_ref_id=back_ref_id,
obj_uuids=obj_uuids,
detail=True,
fields=n_extra_fields)
return vmi_objs
#end _virtual_machine_interface_list
def _instance_ip_create(self, iip_obj):
iip_uuid = self._vnc_lib.instance_ip_create(iip_obj)
return iip_uuid
#end _instance_ip_create
def _instance_ip_read(self, instance_ip_id=None, fq_name=None):
iip_obj = self._vnc_lib.instance_ip_read(id=instance_ip_id,
fq_name=fq_name)
return iip_obj
#end _instance_ip_read
def _instance_ip_update(self, iip_obj):
self._vnc_lib.instance_ip_update(iip_obj)
#end _instance_ip_update
def _instance_ip_delete(self, instance_ip_id):
self._vnc_lib.instance_ip_delete(id=instance_ip_id)
#end _instance_ip_delete
def _instance_ip_list(self, back_ref_id=None, obj_uuids=None, fields=None):
iip_objs = self._vnc_lib.instance_ips_list(detail=True,
back_ref_id=back_ref_id,
obj_uuids=obj_uuids,
fields=fields)
return iip_objs
#end _instance_ip_list
def _floating_ip_pool_create(self, fip_pool_obj):
fip_pool_uuid = self._vnc_lib.floating_ip_pool_create(fip_pool_obj)
return fip_pool_uuid
# end _floating_ip_pool_create
def _floating_ip_pool_delete(self, fip_pool_id):
fip_pool_uuid = self._vnc_lib.floating_ip_pool_delete(id=fip_pool_id)
# end _floating_ip_pool_delete
# find projects on a given domain
def _project_list_domain(self, domain_id):
# TODO till domain concept is not present in keystone
fq_name = ['default-domain']
resp_dict = self._vnc_lib.projects_list(parent_fq_name=fq_name)
return resp_dict['projects']
#end _project_list_domain
# find network ids on a given project
def _network_list_project(self, project_id, count=False):
if project_id:
try:
project_uuid = str(uuid.UUID(project_id))
except Exception:
print "Error in converting uuid %s" % (project_id)
else:
project_uuid = None
if count:
ret_val = self._virtual_network_list(parent_id=project_uuid,
count=True)
else:
ret_val = self._virtual_network_list(parent_id=project_uuid,
detail=True)
return ret_val
#end _network_list_project
# find router ids on a given project
def _router_list_project(self, project_id=None, detail=False):
if project_id:
try:
project_uuid = str(uuid.UUID(project_id))
except Exception:
print "Error in converting uuid %s" % (project_id)
return []
else:
project_uuid = None
resp = self._vnc_lib.logical_routers_list(parent_id=project_uuid,
detail=detail)
if detail:
return resp
return resp['logical-routers']
#end _router_list_project
def _ipam_list_project(self, project_id):
try:
project_uuid = str(uuid.UUID(project_id))
except Exception:
print "Error in converting uuid %s" % (project_id)
resp_dict = self._vnc_lib.network_ipams_list(parent_id=project_uuid)
return resp_dict['network-ipams']
#end _ipam_list_project
def _security_group_list_project(self, project_id):
if project_id:
try:
project_uuid = str(uuid.UUID(project_id))
# Trigger a project read to ensure project sync
project_obj = self._project_read(proj_id=project_uuid)
except Exception:
print "Error in converting uuid %s" % (project_id)
else:
project_uuid = None
sg_objs = self._vnc_lib.security_groups_list(parent_id=project_uuid,
detail=True)
return sg_objs
#end _security_group_list_project
def _security_group_entries_list_sg(self, sg_id):
try:
sg_uuid = str(uuid.UUID(sg_id))
except Exception:
print "Error in converting SG uuid %s" % (sg_id)
resp_dict = self._vnc_lib.security_groups_list(obj_uuids=[sg_uuid])
return resp_dict['security-groups']
#end _security_group_entries_list_sg
def _route_table_list_project(self, project_id):
try:
project_uuid = str(uuid.UUID(project_id))
except Exception:
print "Error in converting uuid %s" % (project_id)
resp_dict = self._vnc_lib.route_tables_list(parent_id=project_uuid)
return resp_dict['route-tables']
#end _route_table_list_project
def _svc_instance_list_project(self, project_id):
try:
project_uuid = str(uuid.UUID(project_id))
except Exception:
print "Error in converting uuid %s" % (project_id)
resp_dict = self._vnc_lib.service_instances_list(parent_id=project_id)
return resp_dict['service-instances']
#end _svc_instance_list_project
def _policy_list_project(self, project_id):
try:
project_uuid = str(uuid.UUID(project_id))
except Exception:
print "Error in converting uuid %s" % (project_id)
resp_dict = self._vnc_lib.network_policys_list(parent_id=project_uuid)
return resp_dict['network-policys']
#end _policy_list_project
def _logical_router_read(self, rtr_id=None, fq_name=None):
rtr_obj = self._vnc_lib.logical_router_read(id=rtr_id, fq_name=fq_name)
return rtr_obj
#end _logical_router_read
def _logical_router_update(self, rtr_obj):
self._vnc_lib.logical_router_update(rtr_obj)
fq_name_str = json.dumps(rtr_obj.get_fq_name())
#end _logical_router_update
def _logical_router_delete(self, rtr_id):
try:
self._vnc_lib.logical_router_delete(id=rtr_id)
except RefsExistError:
self._raise_contrail_exception('RouterInUse', router_id=rtr_id)
#end _logical_router_delete
def _floatingip_list(self, back_ref_id=None):
return self._vnc_lib.floating_ips_list(back_ref_id=back_ref_id,
detail=True)
#end _floatingip_list
# find floating ip pools a project has access to
def _fip_pool_refs_project(self, project_id):
project_obj = self._project_read(proj_id=project_id)
return project_obj.get_floating_ip_pool_refs()
#end _fip_pool_refs_project
def _network_list_shared_and_ext(self):
ret_list = []
nets = self._network_list_project(project_id=None)
for net in nets:
if net.get_router_external() and net.get_is_shared():
ret_list.append(net)
return ret_list
# end _network_list_router_external
def _network_list_router_external(self):
ret_list = []
nets = self._network_list_project(project_id=None)
for net in nets:
if not net.get_router_external():
continue
ret_list.append(net)
return ret_list
# end _network_list_router_external
def _network_list_shared(self):
ret_list = []
nets = self._network_list_project(project_id=None)
for net in nets:
if not net.get_is_shared():
continue
ret_list.append(net)
return ret_list
# end _network_list_shared
# find networks of floating ip pools project has access to
def _fip_pool_ref_networks(self, project_id):
ret_net_objs = self._network_list_shared()
proj_fip_pool_refs = self._fip_pool_refs_project(project_id)
if not proj_fip_pool_refs:
return ret_net_objs
for fip_pool_ref in proj_fip_pool_refs:
fip_uuid = fip_pool_ref['uuid']
fip_pool_obj = self._vnc_lib.floating_ip_pool_read(id=fip_uuid)
net_uuid = fip_pool_obj.parent_uuid
net_obj = self._virtual_network_read(net_id=net_uuid)
ret_net_objs.append(net_obj)
return ret_net_objs
#end _fip_pool_ref_networks
# find floating ip pools defined by network
def _fip_pool_list_network(self, net_id):
resp_dict = self._vnc_lib.floating_ip_pools_list(parent_id=net_id)
return resp_dict['floating-ip-pools']
#end _fip_pool_list_network
def _port_list(self, net_objs, port_objs, iip_objs):
ret_q_ports = []
memo_req = {'networks': {},
'subnets': {},
'instance-ips': {}}
for net_obj in net_objs:
# dictionary of iip_uuid to iip_obj
memo_req['networks'][net_obj.uuid] = net_obj
subnets_info = self._virtual_network_to_subnets(net_obj)
memo_req['subnets'][net_obj.uuid] = subnets_info
for iip_obj in iip_objs:
# dictionary of iip_uuid to iip_obj
memo_req['instance-ips'][iip_obj.uuid] = iip_obj
for port_obj in port_objs:
port_info = self._port_vnc_to_neutron(port_obj, memo_req)
ret_q_ports.append(port_info)
return ret_q_ports
#end _port_list
def _port_list_network(self, network_ids, count=False):
ret_list = []
net_objs = self._virtual_network_list(obj_uuids=network_ids,
fields=['virtual_machine_interface_back_refs'],
detail=True)
if not net_objs:
return ret_list
net_ids = [net_obj.uuid for net_obj in net_objs]
port_objs = self._virtual_machine_interface_list(back_ref_id=net_ids)
iip_objs = self._instance_ip_list(back_ref_id=net_ids)
return self._port_list(net_objs, port_objs, iip_objs)
#end _port_list_network
# find port ids on a given project
def _port_list_project(self, project_id, count=False):
if self._list_optimization_enabled:
port_objs = self._virtual_machine_interface_list(parent_id=project_id,
fields=['instance_ip_back_refs'])
if count:
return len(port_objs)
iip_objs = self._instance_ip_list()
return self._port_list([], port_objs, iip_objs)
else:
if count:
ret_val = 0
else:
ret_val = []
net_objs = self._virtual_network_list(project_id,
fields=['virtual_machine_interface_back_refs'],
detail=True)
if not net_objs:
return ret_val
if count:
for net_obj in net_objs:
port_back_refs = (
net_obj.get_virtual_machine_interface_back_refs() or [])
ret_val = ret_val + len(port_back_refs)
return ret_val
net_ids = [net_obj.uuid for net_obj in net_objs]
port_objs = self._virtual_machine_interface_list(back_ref_id=net_ids)
iip_objs = self._instance_ip_list(back_ref_id=net_ids)
return self._port_list(net_objs, port_objs, iip_objs)
#end _port_list_project
# Returns True if
# * no filter is specified
# OR
# * search-param is not present in filters
# OR
# * 1. search-param is present in filters AND
# 2. resource matches param-list AND
# 3. shared parameter in filters is False
def _filters_is_present(self, filters, key_name, match_value):
if filters:
if key_name in filters:
try:
if key_name == 'tenant_id':
filter_value = [str(uuid.UUID(t_id)) \
for t_id in filters[key_name]]
else:
filter_value = filters[key_name]
idx = filter_value.index(match_value)
except ValueError: # not in requested list
return False
return True
#end _filters_is_present
def _network_read(self, net_uuid):
net_obj = self._virtual_network_read(net_id=net_uuid)
return net_obj
#end _network_read
def _subnet_vnc_create_mapping(self, subnet_id, subnet_key):
self._vnc_lib.kv_store(subnet_id, subnet_key)
self._vnc_lib.kv_store(subnet_key, subnet_id)
#end _subnet_vnc_create_mapping
def _subnet_vnc_read_mapping(self, id=None, key=None):
if id:
try:
subnet_key = self._vnc_lib.kv_retrieve(id)
return subnet_key
except NoIdError:
self._raise_contrail_exception('SubnetNotFound',
subnet_id=id)
if key:
subnet_id = self._vnc_lib.kv_retrieve(key)
return subnet_id
#end _subnet_vnc_read_mapping
def _subnet_vnc_read_or_create_mapping(self, id, key):
# if subnet was created outside of neutron handle it and create
# neutron representation now (lazily)
try:
return self._subnet_vnc_read_mapping(key=key)
except NoIdError:
self._subnet_vnc_create_mapping(id, key)
return self._subnet_vnc_read_mapping(key=key)
#end _subnet_vnc_read_or_create_mapping
def _subnet_vnc_delete_mapping(self, subnet_id, subnet_key):
self._vnc_lib.kv_delete(subnet_id)
self._vnc_lib.kv_delete(subnet_key)
#end _subnet_vnc_delete_mapping
def _subnet_vnc_get_key(self, subnet_vnc, net_id):
pfx = subnet_vnc.subnet.get_ip_prefix()
pfx_len = subnet_vnc.subnet.get_ip_prefix_len()
network = IPNetwork('%s/%s' % (pfx, pfx_len))
return '%s %s/%s' % (net_id, str(network.ip), pfx_len)
#end _subnet_vnc_get_key
def _subnet_read(self, net_uuid, subnet_key):
try:
net_obj = self._virtual_network_read(net_id=net_uuid)
except NoIdError:
return None
ipam_refs = net_obj.get_network_ipam_refs()
if not ipam_refs:
return None
# TODO scope for optimization
for ipam_ref in ipam_refs:
subnet_vncs = ipam_ref['attr'].get_ipam_subnets()
for subnet_vnc in subnet_vncs:
if self._subnet_vnc_get_key(subnet_vnc,
net_uuid) == subnet_key:
return subnet_vnc
return None
#end _subnet_read
def _ip_address_to_subnet_id(self, ip_addr, net_obj):
# find subnet-id for ip-addr, called when instance-ip created
ipam_refs = net_obj.get_network_ipam_refs()
if ipam_refs:
for ipam_ref in ipam_refs:
subnet_vncs = ipam_ref['attr'].get_ipam_subnets()
for subnet_vnc in subnet_vncs:
cidr = '%s/%s' % (subnet_vnc.subnet.get_ip_prefix(),
subnet_vnc.subnet.get_ip_prefix_len())
if IPAddress(ip_addr) in IPSet([cidr]):
subnet_id = subnet_vnc.subnet_uuid
return subnet_id
return None
#end _ip_address_to_subnet_id
# Returns a list of dicts of subnet-id:cidr for a VN
def _virtual_network_to_subnets(self, net_obj):
ret_subnets = []
ipam_refs = net_obj.get_network_ipam_refs()
if ipam_refs:
for ipam_ref in ipam_refs:
subnet_vncs = ipam_ref['attr'].get_ipam_subnets()
for subnet_vnc in subnet_vncs:
subnet_id = subnet_vnc.subnet_uuid
cidr = '%s/%s' % (subnet_vnc.subnet.get_ip_prefix(),
subnet_vnc.subnet.get_ip_prefix_len())
ret_subnets.append({'id': subnet_id, 'cidr': cidr})
return ret_subnets
# end _virtual_network_to_subnets
# Conversion routines between VNC and Quantum objects
def _svc_instance_neutron_to_vnc(self, si_q, oper):
if oper == CREATE:
project_id = str(uuid.UUID(si_q['tenant_id']))
project_obj = self._project_read(proj_id=project_id)
net_id = si_q['external_net']
ext_vn = self._vnc_lib.virtual_network_read(id=net_id)
scale_out = ServiceScaleOutType(max_instances=1, auto_scale=False)
si_prop = ServiceInstanceType(
auto_policy=True,
left_virtual_network="",
right_virtual_network=ext_vn.get_fq_name_str(),
scale_out=scale_out)
si_prop.set_scale_out(scale_out)
si_vnc = ServiceInstance(name=si_q['name'],
parent_obj=project_obj,
service_instance_properties=si_prop)
return si_vnc
#end _svc_instance_neutron_to_vnc
def _svc_instance_vnc_to_neutron(self, si_obj):
si_q_dict = self._obj_to_dict(si_obj)
# replace field names
si_q_dict['id'] = si_obj.uuid
si_q_dict['tenant_id'] = si_obj.parent_uuid.replace('-', '')
si_q_dict['name'] = si_obj.name
si_props = si_obj.get_service_instance_properties()
if si_props:
vn_fq_name = si_props.get_right_virtual_network()
vn_obj = self._vnc_lib.virtual_network_read(fq_name_str=vn_fq_name)
si_q_dict['external_net'] = str(vn_obj.uuid) + ' ' + vn_obj.name
si_q_dict['internal_net'] = ''
return si_q_dict
#end _route_table_vnc_to_neutron
def _route_table_neutron_to_vnc(self, rt_q, oper):
if oper == CREATE:
project_id = str(uuid.UUID(rt_q['tenant_id']))
project_obj = self._project_read(proj_id=project_id)
rt_vnc = RouteTable(name=rt_q['name'],
parent_obj=project_obj)
if not rt_q['routes']:
return rt_vnc
for route in rt_q['routes']['route']:
try:
vm_obj = self._vnc_lib.virtual_machine_read(
id=route['next_hop'])
si_list = vm_obj.get_service_instance_refs()
if si_list:
fq_name = si_list[0]['to']
si_obj = self._vnc_lib.service_instance_read(
fq_name=fq_name)
route['next_hop'] = si_obj.get_fq_name_str()
except Exception as e:
pass
rt_vnc.set_routes(RouteTableType.factory(**rt_q['routes']))
else:
rt_vnc = self._vnc_lib.route_table_read(id=rt_q['id'])
for route in rt_q['routes']['route']:
try:
vm_obj = self._vnc_lib.virtual_machine_read(
id=route['next_hop'])
si_list = vm_obj.get_service_instance_refs()
if si_list:
fq_name = si_list[0]['to']
si_obj = self._vnc_lib.service_instance_read(
fq_name=fq_name)
route['next_hop'] = si_obj.get_fq_name_str()
except Exception as e:
pass
rt_vnc.set_routes(RouteTableType.factory(**rt_q['routes']))
return rt_vnc
#end _route_table_neutron_to_vnc
def _route_table_vnc_to_neutron(self, rt_obj):
rt_q_dict = self._obj_to_dict(rt_obj)
# replace field names
rt_q_dict['id'] = rt_obj.uuid
rt_q_dict['tenant_id'] = rt_obj.parent_uuid.replace('-', '')
rt_q_dict['name'] = rt_obj.name
rt_q_dict['fq_name'] = rt_obj.fq_name
# get route table routes
rt_q_dict['routes'] = rt_q_dict.pop('routes', None)
if rt_q_dict['routes']:
for route in rt_q_dict['routes']['route']:
if route['next_hop_type']:
route['next_hop'] = route['next_hop_type']
return rt_q_dict
#end _route_table_vnc_to_neutron
def _security_group_vnc_to_neutron(self, sg_obj):
sg_q_dict = {}
extra_dict = {}
extra_dict['contrail:fq_name'] = sg_obj.get_fq_name()
# replace field names
sg_q_dict['id'] = sg_obj.uuid
sg_q_dict['tenant_id'] = sg_obj.parent_uuid.replace('-', '')
if not sg_obj.display_name:
# for security groups created directly via vnc_api
sg_q_dict['name'] = sg_obj.get_fq_name()[-1]
else:
sg_q_dict['name'] = sg_obj.display_name
sg_q_dict['description'] = sg_obj.get_id_perms().get_description()
# get security group rules
sg_q_dict['security_group_rules'] = []
rule_list = self.security_group_rules_read(sg_obj.uuid, sg_obj)
if rule_list:
for rule in rule_list:
sg_q_dict['security_group_rules'].append(rule)
if self._contrail_extensions_enabled:
sg_q_dict.update(extra_dict)
return sg_q_dict
#end _security_group_vnc_to_neutron
def _security_group_neutron_to_vnc(self, sg_q, oper):
if oper == CREATE:
project_id = str(uuid.UUID(sg_q['tenant_id']))
def project_read(id):
try:
return self._project_read(proj_id=id)
except NoIdError:
return None
for i in range(10):
project_obj = project_read(project_id)
if project_obj:
break
gevent.sleep(2)
id_perms = IdPermsType(enable=True,
description=sg_q.get('description'))
sg_vnc = SecurityGroup(name=sg_q['name'],
parent_obj=project_obj,
id_perms=id_perms)
else:
sg_vnc = self._vnc_lib.security_group_read(id=sg_q['id'])
if 'name' in sg_q and sg_q['name']:
sg_vnc.display_name = sg_q['name']
if 'description' in sg_q:
id_perms = sg_vnc.get_id_perms()
id_perms.set_description(sg_q['description'])
sg_vnc.set_id_perms(id_perms)
return sg_vnc
#end _security_group_neutron_to_vnc
def _security_group_rule_vnc_to_neutron(self, sg_id, sg_rule, sg_obj=None):
sgr_q_dict = {}
if sg_id == None:
return sgr_q_dict
if not sg_obj:
try:
sg_obj = self._vnc_lib.security_group_read(id=sg_id)
except NoIdError:
self._raise_contrail_exception('SecurityGroupNotFound',
id=sg_id)
remote_cidr = None
remote_sg_uuid = None
saddr = sg_rule.get_src_addresses()[0]
daddr = sg_rule.get_dst_addresses()[0]
if saddr.get_security_group() == 'local':
direction = 'egress'
addr = daddr
elif daddr.get_security_group() == 'local':
direction = 'ingress'
addr = saddr
else:
self._raise_contrail_exception('SecurityGroupRuleNotFound',
id=sg_rule.get_rule_uuid())
if addr.get_subnet():
remote_cidr = '%s/%s' % (addr.get_subnet().get_ip_prefix(),
addr.get_subnet().get_ip_prefix_len())
elif addr.get_security_group():
if addr.get_security_group() != 'any' and \
addr.get_security_group() != 'local':
remote_sg = addr.get_security_group()
try:
if remote_sg != ':'.join(sg_obj.get_fq_name()):
remote_sg_obj = self._vnc_lib.security_group_read(fq_name_str=remote_sg)
else:
remote_sg_obj = sg_obj
remote_sg_uuid = remote_sg_obj.uuid
except NoIdError:
pass
sgr_q_dict['id'] = sg_rule.get_rule_uuid()
sgr_q_dict['tenant_id'] = sg_obj.parent_uuid.replace('-', '')
sgr_q_dict['security_group_id'] = sg_obj.uuid
sgr_q_dict['ethertype'] = 'IPv4'
sgr_q_dict['direction'] = direction
sgr_q_dict['protocol'] = sg_rule.get_protocol()
sgr_q_dict['port_range_min'] = sg_rule.get_dst_ports()[0].\
get_start_port()
sgr_q_dict['port_range_max'] = sg_rule.get_dst_ports()[0].\
get_end_port()
sgr_q_dict['remote_ip_prefix'] = remote_cidr
sgr_q_dict['remote_group_id'] = remote_sg_uuid
return sgr_q_dict
#end _security_group_rule_vnc_to_neutron
def _security_group_rule_neutron_to_vnc(self, sgr_q, oper):
if oper == CREATE:
port_min = 0
port_max = 65535
if sgr_q['port_range_min'] is not None:
port_min = sgr_q['port_range_min']
if sgr_q['port_range_max'] is not None:
port_max = sgr_q['port_range_max']
endpt = [AddressType(security_group='any')]
if sgr_q['remote_ip_prefix']:
cidr = sgr_q['remote_ip_prefix'].split('/')
pfx = cidr[0]
pfx_len = int(cidr[1])
endpt = [AddressType(subnet=SubnetType(pfx, pfx_len))]
elif sgr_q['remote_group_id']:
sg_obj = self._vnc_lib.security_group_read(
id=sgr_q['remote_group_id'])
endpt = [AddressType(security_group=sg_obj.get_fq_name_str())]
if sgr_q['direction'] == 'ingress':
dir = '>'
local = endpt
remote = [AddressType(security_group='local')]
else:
dir = '>'
remote = endpt
local = [AddressType(security_group='local')]
if not sgr_q['protocol']:
sgr_q['protocol'] = 'any'
sgr_uuid = str(uuid.uuid4())
rule = PolicyRuleType(rule_uuid=sgr_uuid, direction=dir,
protocol=sgr_q['protocol'],
src_addresses=local,
src_ports=[PortType(0, 65535)],
dst_addresses=remote,
dst_ports=[PortType(port_min, port_max)])
return rule
#end _security_group_rule_neutron_to_vnc
def _network_neutron_to_vnc(self, network_q, oper):
net_name = network_q.get('name', None)
try:
external_attr = network_q['router:external']
except KeyError:
external_attr = attr.ATTR_NOT_SPECIFIED
if oper == CREATE:
project_id = str(uuid.UUID(network_q['tenant_id']))
def project_read(id):
try:
return self._project_read(proj_id=id)
except NoIdError:
return None
for i in range(10):
project_obj = project_read(project_id)
if project_obj:
break
gevent.sleep(2)
id_perms = IdPermsType(enable=True)
net_obj = VirtualNetwork(net_name, project_obj, id_perms=id_perms)
if external_attr == attr.ATTR_NOT_SPECIFIED:
net_obj.router_external = False
else:
net_obj.router_external = external_attr
if 'shared' in network_q:
net_obj.is_shared = network_q['shared']
else:
net_obj.is_shared = False
else: # READ/UPDATE/DELETE
net_obj = self._virtual_network_read(net_id=network_q['id'])
if oper == UPDATE:
if 'shared' in network_q:
net_obj.is_shared = network_q['shared']
if external_attr is not attr.ATTR_NOT_SPECIFIED:
net_obj.router_external = external_attr
if 'name' in network_q and network_q['name']:
net_obj.display_name = network_q['name']
id_perms = net_obj.get_id_perms()
if 'admin_state_up' in network_q:
id_perms.enable = network_q['admin_state_up']
net_obj.set_id_perms(id_perms)
if 'contrail:policys' in network_q:
policy_fq_names = network_q['contrail:policys']
# reset and add with newly specified list
net_obj.set_network_policy_list([], [])
seq = 0
for p_fq_name in policy_fq_names:
domain_name, project_name, policy_name = p_fq_name
domain_obj = Domain(domain_name)
project_obj = Project(project_name, domain_obj)
policy_obj = NetworkPolicy(policy_name, project_obj)
net_obj.add_network_policy(policy_obj,
VirtualNetworkPolicyType(
sequence=SequenceType(seq, 0)))
seq = seq + 1
if 'vpc:route_table' in network_q:
rt_fq_name = network_q['vpc:route_table']
if rt_fq_name:
try:
rt_obj = self._vnc_lib.route_table_read(fq_name=rt_fq_name)
net_obj.set_route_table(rt_obj)
except NoIdError:
# TODO add route table specific exception
self._raise_contrail_exception('NetworkNotFound',
net_id=net_obj.uuid)
return net_obj
#end _network_neutron_to_vnc
def _network_vnc_to_neutron(self, net_obj, net_repr='SHOW'):
net_q_dict = {}
extra_dict = {}
id_perms = net_obj.get_id_perms()
perms = id_perms.permissions
net_q_dict['id'] = net_obj.uuid
if not net_obj.display_name:
# for nets created directly via vnc_api
net_q_dict['name'] = net_obj.get_fq_name()[-1]
else:
net_q_dict['name'] = net_obj.display_name
extra_dict['contrail:fq_name'] = net_obj.get_fq_name()
net_q_dict['tenant_id'] = net_obj.parent_uuid.replace('-', '')
net_q_dict['admin_state_up'] = id_perms.enable
if net_obj.is_shared:
net_q_dict['shared'] = True
else:
net_q_dict['shared'] = False
net_q_dict['status'] = (constants.NET_STATUS_ACTIVE if id_perms.enable
else constants.NET_STATUS_DOWN)
if net_obj.router_external:
net_q_dict['router:external'] = True
else:
net_q_dict['router:external'] = False
if net_repr == 'SHOW' or net_repr == 'LIST':
extra_dict['contrail:instance_count'] = 0
net_policy_refs = net_obj.get_network_policy_refs()
if net_policy_refs:
sorted_refs = sorted(
net_policy_refs,
key=lambda t:(t['attr'].sequence.major,
t['attr'].sequence.minor))
extra_dict['contrail:policys'] = \
[np_ref['to'] for np_ref in sorted_refs]
rt_refs = net_obj.get_route_table_refs()
if rt_refs:
extra_dict['vpc:route_table'] = \
[rt_ref['to'] for rt_ref in rt_refs]
ipam_refs = net_obj.get_network_ipam_refs()
net_q_dict['subnets'] = []
if ipam_refs:
extra_dict['contrail:subnet_ipam'] = []
for ipam_ref in ipam_refs:
subnets = ipam_ref['attr'].get_ipam_subnets()
for subnet in subnets:
sn_dict = self._subnet_vnc_to_neutron(subnet, net_obj,
ipam_ref['to'])
net_q_dict['subnets'].append(sn_dict['id'])
sn_ipam = {}
sn_ipam['subnet_cidr'] = sn_dict['cidr']
sn_ipam['ipam_fq_name'] = ipam_ref['to']
extra_dict['contrail:subnet_ipam'].append(sn_ipam)
if self._contrail_extensions_enabled:
net_q_dict.update(extra_dict)
return net_q_dict
#end _network_vnc_to_neutron
def _subnet_neutron_to_vnc(self, subnet_q):
cidr = IPNetwork(subnet_q['cidr'])
pfx = str(cidr.network)
pfx_len = int(cidr.prefixlen)
if cidr.version != 4 and cidr.version != 6:
self._raise_contrail_exception('BadRequest',
resource='subnet', msg='Unknown IP family')
elif cidr.version != int(subnet_q['ip_version']):
msg = _("cidr '%s' does not match the ip_version '%s'") \
%(subnet_q['cidr'], subnet_q['ip_version'])
self._raise_contrail_exception('InvalidInput', error_message=msg)
if 'gateway_ip' in subnet_q:
default_gw = subnet_q['gateway_ip']
else:
# Assigned first+1 from cidr
default_gw = str(IPAddress(cidr.first + 1))
if 'allocation_pools' in subnet_q:
alloc_pools = subnet_q['allocation_pools']
else:
# Assigned by address manager
alloc_pools = None
dhcp_option_list = None
if 'dns_nameservers' in subnet_q and subnet_q['dns_nameservers']:
dhcp_options=[]
dns_servers=" ".join(subnet_q['dns_nameservers'])
if dns_servers:
dhcp_options.append(DhcpOptionType(dhcp_option_name='6',
dhcp_option_value=dns_servers))
if dhcp_options:
dhcp_option_list = DhcpOptionsListType(dhcp_options)
host_route_list = None
if 'host_routes' in subnet_q and subnet_q['host_routes']:
host_routes=[]
for host_route in subnet_q['host_routes']:
host_routes.append(RouteType(prefix=host_route['destination'],
next_hop=host_route['nexthop']))
if host_routes:
host_route_list = RouteTableType(host_routes)
if 'enable_dhcp' in subnet_q:
dhcp_config = subnet_q['enable_dhcp']
else:
dhcp_config = None
sn_name=subnet_q.get('name')
subnet_vnc = IpamSubnetType(subnet=SubnetType(pfx, pfx_len),
default_gateway=default_gw,
enable_dhcp=dhcp_config,
dns_nameservers=None,
allocation_pools=alloc_pools,
addr_from_start=True,
dhcp_option_list=dhcp_option_list,
host_routes=host_route_list,
subnet_name=sn_name,
subnet_uuid=str(uuid.uuid4()))
return subnet_vnc
#end _subnet_neutron_to_vnc
def _subnet_vnc_to_neutron(self, subnet_vnc, net_obj, ipam_fq_name):
sn_q_dict = {}
sn_name = subnet_vnc.get_subnet_name()
if sn_name is not None:
sn_q_dict['name'] = sn_name
else:
sn_q_dict['name'] = ''
sn_q_dict['tenant_id'] = net_obj.parent_uuid.replace('-', '')
sn_q_dict['network_id'] = net_obj.uuid
sn_q_dict['ipv6_ra_mode'] = None
sn_q_dict['ipv6_address_mode'] = None
cidr = '%s/%s' % (subnet_vnc.subnet.get_ip_prefix(),
subnet_vnc.subnet.get_ip_prefix_len())
sn_q_dict['cidr'] = cidr
sn_q_dict['ip_version'] = IPNetwork(cidr).version # 4 or 6
subnet_key = self._subnet_vnc_get_key(subnet_vnc, net_obj.uuid)
sn_id = self._subnet_vnc_read_or_create_mapping(id=subnet_vnc.subnet_uuid,
key=subnet_key)
sn_q_dict['id'] = sn_id
sn_q_dict['gateway_ip'] = subnet_vnc.default_gateway
alloc_obj_list = subnet_vnc.get_allocation_pools()
allocation_pools = []
for alloc_obj in alloc_obj_list:
first_ip = alloc_obj.get_start()
last_ip = alloc_obj.get_end()
alloc_dict = {'first_ip':first_ip, 'last_ip':last_ip}
allocation_pools.append(alloc_dict)
if allocation_pools is None or not allocation_pools:
if (int(IPNetwork(sn_q_dict['gateway_ip']).network) ==
int(IPNetwork(cidr).network+1)):
first_ip = str(IPNetwork(cidr).network + 2)
else:
first_ip = str(IPNetwork(cidr).network + 1)
last_ip = str(IPNetwork(cidr).broadcast - 1)
cidr_pool = {'first_ip':first_ip, 'last_ip':last_ip}
allocation_pools.append(cidr_pool)
sn_q_dict['allocation_pools'] = allocation_pools
sn_q_dict['enable_dhcp'] = subnet_vnc.get_enable_dhcp()
nameserver_dict_list = list()
dhcp_option_list = subnet_vnc.get_dhcp_option_list()
if dhcp_option_list:
for dhcp_option in dhcp_option_list.dhcp_option:
if dhcp_option.get_dhcp_option_name() == '6':
dns_servers = dhcp_option.get_dhcp_option_value().split()
for dns_server in dns_servers:
nameserver_entry = {'address': dns_server,
'subnet_id': sn_id}
nameserver_dict_list.append(nameserver_entry)
sn_q_dict['dns_nameservers'] = nameserver_dict_list
host_route_dict_list = list()
host_routes = subnet_vnc.get_host_routes()
if host_routes:
for host_route in host_routes.route:
host_route_entry = {'destination': host_route.get_prefix(),
'nexthop': host_route.get_next_hop(),
'subnet_id': sn_id}
host_route_dict_list.append(host_route_entry)
sn_q_dict['routes'] = host_route_dict_list
if net_obj.is_shared:
sn_q_dict['shared'] = True
else:
sn_q_dict['shared'] = False
return sn_q_dict
#end _subnet_vnc_to_neutron
def _ipam_neutron_to_vnc(self, ipam_q, oper):
ipam_name = ipam_q.get('name', None)
if oper == CREATE:
project_id = str(uuid.UUID(ipam_q['tenant_id']))
project_obj = self._project_read(proj_id=project_id)
ipam_obj = NetworkIpam(ipam_name, project_obj)
else: # READ/UPDATE/DELETE
ipam_obj = self._vnc_lib.network_ipam_read(id=ipam_q['id'])
options_vnc = DhcpOptionsListType()
if ipam_q['mgmt']:
#for opt_q in ipam_q['mgmt'].get('options', []):
# options_vnc.add_dhcp_option(DhcpOptionType(opt_q['option'],
# opt_q['value']))
#ipam_mgmt_vnc = IpamType.factory(
# ipam_method = ipam_q['mgmt']['method'],
# dhcp_option_list = options_vnc)
ipam_obj.set_network_ipam_mgmt(IpamType.factory(**ipam_q['mgmt']))
return ipam_obj
#end _ipam_neutron_to_vnc
def _ipam_vnc_to_neutron(self, ipam_obj):
ipam_q_dict = self._obj_to_dict(ipam_obj)
# replace field names
ipam_q_dict['id'] = ipam_q_dict.pop('uuid')
ipam_q_dict['name'] = ipam_obj.name
ipam_q_dict['tenant_id'] = ipam_obj.parent_uuid.replace('-', '')
ipam_q_dict['mgmt'] = ipam_q_dict.pop('network_ipam_mgmt', None)
net_back_refs = ipam_q_dict.pop('virtual_network_back_refs', None)
if net_back_refs:
ipam_q_dict['nets_using'] = []
for net_back_ref in net_back_refs:
net_fq_name = net_back_ref['to']
ipam_q_dict['nets_using'].append(net_fq_name)
return ipam_q_dict
#end _ipam_vnc_to_neutron
def _policy_neutron_to_vnc(self, policy_q, oper):
policy_name = policy_q.get('name', None)
if oper == CREATE:
project_id = str(uuid.UUID(policy_q['tenant_id']))
project_obj = self._project_read(proj_id=project_id)
policy_obj = NetworkPolicy(policy_name, project_obj)
else: # READ/UPDATE/DELETE
policy_obj = self._vnc_lib.network_policy_read(id=policy_q['id'])
policy_obj.set_network_policy_entries(
PolicyEntriesType.factory(**policy_q['entries']))
return policy_obj
#end _policy_neutron_to_vnc
def _policy_vnc_to_neutron(self, policy_obj):
policy_q_dict = self._obj_to_dict(policy_obj)
# replace field names
policy_q_dict['id'] = policy_q_dict.pop('uuid')
policy_q_dict['name'] = policy_obj.name
policy_q_dict['tenant_id'] = policy_obj.parent_uuid.replace('-', '')
policy_q_dict['entries'] = policy_q_dict.pop('network_policy_entries',
None)
net_back_refs = policy_obj.get_virtual_network_back_refs()
if net_back_refs:
policy_q_dict['nets_using'] = []
for net_back_ref in net_back_refs:
net_fq_name = net_back_ref['to']
policy_q_dict['nets_using'].append(net_fq_name)
return policy_q_dict
#end _policy_vnc_to_neutron
def _router_neutron_to_vnc(self, router_q, oper):
rtr_name = router_q.get('name', None)
if oper == CREATE:
project_id = str(uuid.UUID(router_q['tenant_id']))
def project_read(id):
try:
return self._project_read(proj_id=id)
except NoIdError:
return None
for i in range(10):
project_obj = project_read(project_id)
if project_obj:
break
gevent.sleep(2)
id_perms = IdPermsType(enable=True)
rtr_obj = LogicalRouter(rtr_name, project_obj, id_perms=id_perms)
else: # READ/UPDATE/DELETE
rtr_obj = self._logical_router_read(rtr_id=router_q['id'])
id_perms = rtr_obj.get_id_perms()
if 'admin_state_up' in router_q:
id_perms.enable = router_q['admin_state_up']
rtr_obj.set_id_perms(id_perms)
if 'name' in router_q and router_q['name']:
rtr_obj.display_name = router_q['name']
return rtr_obj
#end _router_neutron_to_vnc
def _router_vnc_to_neutron(self, rtr_obj, rtr_repr='SHOW'):
rtr_q_dict = {}
extra_dict = {}
extra_dict['contrail:fq_name'] = rtr_obj.get_fq_name()
rtr_q_dict['id'] = rtr_obj.uuid
if not rtr_obj.display_name:
rtr_q_dict['name'] = rtr_obj.get_fq_name()[-1]
else:
rtr_q_dict['name'] = rtr_obj.display_name
rtr_q_dict['tenant_id'] = rtr_obj.parent_uuid.replace('-', '')
rtr_q_dict['admin_state_up'] = rtr_obj.get_id_perms().enable
rtr_q_dict['shared'] = False
rtr_q_dict['status'] = constants.NET_STATUS_ACTIVE
rtr_q_dict['gw_port_id'] = None
rtr_q_dict['external_gateway_info'] = None
vn_refs = rtr_obj.get_virtual_network_refs()
if vn_refs:
rtr_q_dict['external_gateway_info'] = {'network_id':
vn_refs[0]['uuid']}
if self._contrail_extensions_enabled:
rtr_q_dict.update(extra_dict)
return rtr_q_dict
#end _router_vnc_to_neutron
def _floatingip_neutron_to_vnc(self, fip_q, oper):
if oper == CREATE:
# TODO for now create from default pool, later
# use first available pool on net
net_id = fip_q['floating_network_id']
try:
fq_name = self._fip_pool_list_network(net_id)[0]['fq_name']
except IndexError:
# IndexError could happens when an attempt to
# retrieve a floating ip pool from a private network.
msg = "Network %s doesn't provide a floatingip pool" % net_id
self._raise_contrail_exception('BadRequest',
resource="floatingip", msg=msg)
fip_pool_obj = self._vnc_lib.floating_ip_pool_read(fq_name=fq_name)
fip_name = str(uuid.uuid4())
fip_obj = FloatingIp(fip_name, fip_pool_obj)
fip_obj.uuid = fip_name
proj_id = str(uuid.UUID(fip_q['tenant_id']))
proj_obj = self._project_read(proj_id=proj_id)
fip_obj.set_project(proj_obj)
else: # READ/UPDATE/DELETE
fip_obj = self._vnc_lib.floating_ip_read(id=fip_q['id'])
if fip_q.get('port_id'):
try:
port_obj = self._virtual_machine_interface_read(
port_id=fip_q['port_id'])
except NoIdError:
self._raise_contrail_exception('PortNotFound',
resource='floatingip',
port_id=fip_q['port_id'])
fip_obj.set_virtual_machine_interface(port_obj)
else:
fip_obj.set_virtual_machine_interface_list([])
if fip_q.get('fixed_ip_address'):
fip_obj.set_floating_ip_fixed_ip_address(fip_q['fixed_ip_address'])
else:
# fixed_ip_address not specified, pick from port_obj in create,
# reset in case of disassociate
port_refs = fip_obj.get_virtual_machine_interface_refs()
if not port_refs:
fip_obj.set_floating_ip_fixed_ip_address(None)
else:
port_obj = self._virtual_machine_interface_read(
port_id=port_refs[0]['uuid'], fields=['instance_ip_back_refs'])
iip_refs = port_obj.get_instance_ip_back_refs()
if iip_refs:
iip_obj = self._instance_ip_read(instance_ip_id=iip_refs[0]['uuid'])
fip_obj.set_floating_ip_fixed_ip_address(iip_obj.get_instance_ip_address())
return fip_obj
#end _floatingip_neutron_to_vnc
def _floatingip_vnc_to_neutron(self, fip_obj):
fip_q_dict = {}
floating_net_id = self._vnc_lib.fq_name_to_id('virtual-network',
fip_obj.get_fq_name()[:-2])
tenant_id = fip_obj.get_project_refs()[0]['uuid'].replace('-', '')
port_id = None
fixed_ip = None
router_id = None
port_refs = fip_obj.get_virtual_machine_interface_refs()
if port_refs:
port_id = port_refs[0]['uuid']
internal_net_id = None
# find router_id from port
router_list = self._router_list_project(
fip_obj.get_project_refs()[0]['uuid'], detail=True)
for router_obj in router_list or []:
for net in router_obj.get_virtual_network_refs() or []:
if net['uuid'] != floating_net_id:
continue
for vmi in (router_obj.get_virtual_machine_interface_refs()
or []):
vmi_obj = self._vnc_lib.virtual_machine_interface_read(
id=vmi['uuid'])
if internal_net_id is None:
port_obj = self._virtual_machine_interface_read(port_id=port_id)
internal_net_id = port_obj.get_virtual_network_refs()[0]['uuid']
if (vmi_obj.get_virtual_network_refs()[0]['uuid'] ==
internal_net_id):
router_id = router_obj.uuid
break
if router_id:
break
if router_id:
break
fip_q_dict['id'] = fip_obj.uuid
fip_q_dict['tenant_id'] = tenant_id
fip_q_dict['floating_ip_address'] = fip_obj.get_floating_ip_address()
fip_q_dict['floating_network_id'] = floating_net_id
fip_q_dict['router_id'] = router_id
fip_q_dict['port_id'] = port_id
fip_q_dict['fixed_ip_address'] = fip_obj.get_floating_ip_fixed_ip_address()
fip_q_dict['status'] = constants.PORT_STATUS_ACTIVE
return fip_q_dict
#end _floatingip_vnc_to_neutron
def _port_neutron_to_vnc(self, port_q, net_obj, oper):
if oper == CREATE:
project_id = str(uuid.UUID(port_q['tenant_id']))
proj_obj = self._project_read(proj_id=project_id)
id_perms = IdPermsType(enable=True)
port_uuid = str(uuid.uuid4())
if port_q.get('name'):
port_name = port_q['name']
else:
port_name = port_uuid
port_obj = VirtualMachineInterface(port_name, proj_obj,
id_perms=id_perms)
port_obj.uuid = port_uuid
port_obj.set_virtual_network(net_obj)
if ('mac_address' in port_q and port_q['mac_address']):
mac_addrs_obj = MacAddressesType()
mac_addrs_obj.set_mac_address([port_q['mac_address']])
port_obj.set_virtual_machine_interface_mac_addresses(mac_addrs_obj)
port_obj.set_security_group_list([])
if ('security_groups' not in port_q or
port_q['security_groups'].__class__ is object):
sg_obj = SecurityGroup("default", proj_obj)
port_obj.add_security_group(sg_obj)
else: # READ/UPDATE/DELETE
port_obj = self._virtual_machine_interface_read(port_id=port_q['id'])
if 'name' in port_q and port_q['name']:
port_obj.display_name = port_q['name']
if port_q.get('device_owner') != constants.DEVICE_OWNER_ROUTER_INTF:
instance_name = port_q.get('device_id')
if instance_name:
try:
instance_obj = self._ensure_instance_exists(instance_name)
port_obj.set_virtual_machine(instance_obj)
except RefsExistError as e:
self._raise_contrail_exception('BadRequest',
resource='port', msg=str(e))
if 'device_owner' in port_q:
port_obj.set_virtual_machine_interface_device_owner(port_q.get('device_owner'))
if 'security_groups' in port_q:
port_obj.set_security_group_list([])
for sg_id in port_q.get('security_groups') or []:
# TODO optimize to not read sg (only uuid/fqn needed)
sg_obj = self._vnc_lib.security_group_read(id=sg_id)
port_obj.add_security_group(sg_obj)
id_perms = port_obj.get_id_perms()
if 'admin_state_up' in port_q:
id_perms.enable = port_q['admin_state_up']
port_obj.set_id_perms(id_perms)
if ('extra_dhcp_opts' in port_q):
dhcp_options = []
if port_q['extra_dhcp_opts']:
for option_pair in port_q['extra_dhcp_opts']:
option = \
DhcpOptionType(dhcp_option_name=option_pair['opt_name'],
dhcp_option_value=option_pair['opt_value'])
dhcp_options.append(option)
if dhcp_options:
olist = DhcpOptionsListType(dhcp_options)
port_obj.set_virtual_machine_interface_dhcp_option_list(olist)
else:
port_obj.set_virtual_machine_interface_dhcp_option_list(None)
if ('allowed_address_pairs' in port_q):
aap_array = []
if port_q['allowed_address_pairs']:
for address_pair in port_q['allowed_address_pairs']:
mac_refs = \
port_obj.get_virtual_machine_interface_mac_addresses()
mode = u'active-standby';
if 'mac_address' not in address_pair:
if mac_refs:
address_pair['mac_address'] = mac_refs.mac_address[0]
cidr = address_pair['ip_address'].split('/')
if len(cidr) == 1:
subnet=SubnetType(cidr[0], 32);
elif len(cidr) == 2:
subnet=SubnetType(cidr[0], int(cidr[1]));
else:
self._raise_contrail_exception(
'BadRequest', resource='port',
msg='Invalid address pair argument')
ip_back_refs = port_obj.get_instance_ip_back_refs()
if ip_back_refs:
for ip_back_ref in ip_back_refs:
iip_uuid = ip_back_ref['uuid']
try:
ip_obj = self._instance_ip_read(instance_ip_id=\
ip_back_ref['uuid'])
except NoIdError:
continue
ip_addr = ip_obj.get_instance_ip_address()
if ((ip_addr == address_pair['ip_address']) and
(mac_refs.mac_address[0] == address_pair['mac_address'])):
self._raise_contrail_exception(
'AddressPairMatchesPortFixedIPAndMac')
aap = AllowedAddressPair(subnet,
address_pair['mac_address'], mode)
aap_array.append(aap)
if aap_array:
aaps = AllowedAddressPairs()
aaps.set_allowed_address_pair(aap_array)
port_obj.set_virtual_machine_interface_allowed_address_pairs(aaps)
else:
port_obj.set_virtual_machine_interface_allowed_address_pairs(None)
if 'fixed_ips' in port_q:
net_id = (port_q.get('network_id') or
port_obj.get_virtual_network_refs()[0]['uuid'])
port_obj_ips = None
for fixed_ip in port_q.get('fixed_ips', []):
if 'ip_address' in fixed_ip:
# read instance ip addrs on port only once
if port_obj_ips == None:
port_obj_ips = []
ip_back_refs = getattr(port_obj, 'instance_ip_back_refs', None)
if ip_back_refs:
for ip_back_ref in ip_back_refs:
try:
ip_obj = self._instance_ip_read(
instance_ip_id=ip_back_ref['uuid'])
except NoIdError:
continue
port_obj_ips.append(ip_obj.get_instance_ip_address())
ip_addr = fixed_ip['ip_address']
if ip_addr in port_obj_ips:
continue
if self._ip_addr_in_net_id(ip_addr, net_id):
self._raise_contrail_exception(
'IpAddressInUse', net_id=net_id,
ip_address=ip_addr)
return port_obj
#end _port_neutron_to_vnc
def _port_vnc_to_neutron(self, port_obj, port_req_memo=None):
port_q_dict = {}
extra_dict = {}
extra_dict['contrail:fq_name'] = port_obj.get_fq_name()
if not port_obj.display_name:
# for ports created directly via vnc_api
port_q_dict['name'] = port_obj.get_fq_name()[-1]
else:
port_q_dict['name'] = port_obj.display_name
port_q_dict['id'] = port_obj.uuid
net_refs = port_obj.get_virtual_network_refs()
if net_refs:
net_id = net_refs[0]['uuid']
else:
# TODO hack to force network_id on default port
# as neutron needs it
net_id = self._vnc_lib.obj_to_id(VirtualNetwork())
if port_req_memo is None:
# create a memo only for this port's conversion in this method
port_req_memo = {}
if 'networks' not in port_req_memo:
port_req_memo['networks'] = {}
if 'subnets' not in port_req_memo:
port_req_memo['subnets'] = {}
try:
net_obj = port_req_memo['networks'][net_id]
except KeyError:
net_obj = self._virtual_network_read(net_id=net_id)
port_req_memo['networks'][net_id] = net_obj
subnets_info = self._virtual_network_to_subnets(net_obj)
port_req_memo['subnets'][net_id] = subnets_info
if port_obj.parent_type != "project":
proj_id = net_obj.parent_uuid.replace('-', '')
else:
proj_id = port_obj.parent_uuid.replace('-', '')
port_q_dict['tenant_id'] = proj_id
port_q_dict['network_id'] = net_id
# TODO RHS below may need fixing
port_q_dict['mac_address'] = ''
mac_refs = port_obj.get_virtual_machine_interface_mac_addresses()
if mac_refs:
port_q_dict['mac_address'] = mac_refs.mac_address[0]
dhcp_options_list = port_obj.get_virtual_machine_interface_dhcp_option_list()
if dhcp_options_list and dhcp_options_list.dhcp_option:
dhcp_options = []
for dhcp_option in dhcp_options_list.dhcp_option:
pair = {"opt_value": dhcp_option.dhcp_option_value,
"opt_name": dhcp_option.dhcp_option_name}
dhcp_options.append(pair)
port_q_dict['extra_dhcp_opts'] = dhcp_options
allowed_address_pairs = port_obj.get_virtual_machine_interface_allowed_address_pairs()
if allowed_address_pairs and allowed_address_pairs.allowed_address_pair:
address_pairs = []
for aap in allowed_address_pairs.allowed_address_pair:
pair = {"ip_address": '%s/%s' % (aap.ip.get_ip_prefix(),
aap.ip.get_ip_prefix_len()),
"mac_address": aap.mac}
address_pairs.append(pair)
port_q_dict['allowed_address_pairs'] = address_pairs
port_q_dict['fixed_ips'] = []
ip_back_refs = getattr(port_obj, 'instance_ip_back_refs', None)
if ip_back_refs:
for ip_back_ref in ip_back_refs:
iip_uuid = ip_back_ref['uuid']
# fetch it from request context cache/memo if there
try:
ip_obj = port_req_memo['instance-ips'][iip_uuid]
except KeyError:
try:
ip_obj = self._instance_ip_read(
instance_ip_id=ip_back_ref['uuid'])
except NoIdError:
continue
ip_addr = ip_obj.get_instance_ip_address()
ip_q_dict = {}
ip_q_dict['ip_address'] = ip_addr
ip_q_dict['subnet_id'] = self._ip_address_to_subnet_id(ip_addr,
net_obj)
port_q_dict['fixed_ips'].append(ip_q_dict)
port_q_dict['security_groups'] = []
sg_refs = port_obj.get_security_group_refs()
for sg_ref in sg_refs or []:
port_q_dict['security_groups'].append(sg_ref['uuid'])
port_q_dict['admin_state_up'] = port_obj.get_id_perms().enable
# port can be router interface or vm interface
# for perf read logical_router_back_ref only when we have to
port_parent_name = port_obj.parent_name
router_refs = getattr(port_obj, 'logical_router_back_refs', None)
if router_refs is not None:
port_q_dict['device_id'] = router_refs[0]['uuid']
elif port_obj.parent_type == 'virtual-machine':
port_q_dict['device_id'] = port_obj.parent_name
elif port_obj.get_virtual_machine_refs() is not None:
port_q_dict['device_id'] = \
port_obj.get_virtual_machine_refs()[0]['to'][-1]
else:
port_q_dict['device_id'] = ''
port_q_dict['device_owner'] = \
port_obj.get_virtual_machine_interface_device_owner() or '';
if port_q_dict['device_id']:
port_q_dict['status'] = constants.PORT_STATUS_ACTIVE
else:
port_q_dict['status'] = constants.PORT_STATUS_DOWN
if self._contrail_extensions_enabled:
port_q_dict.update(extra_dict)
return port_q_dict
#end _port_vnc_to_neutron
def _port_get_host_prefixes(self, host_routes, subnet_cidr):
"""This function returns the host prefixes
Eg. If host_routes have the below routes
---------------------------
|destination | next hop |
---------------------------
| 10.0.0.0/24 | 8.0.0.2 |
| 12.0.0.0/24 | 10.0.0.4 |
| 14.0.0.0/24 | 12.0.0.23 |
| 16.0.0.0/24 | 8.0.0.4 |
| 15.0.0.0/24 | 16.0.0.2 |
| 20.0.0.0/24 | 8.0.0.12 |
---------------------------
subnet_cidr is 8.0.0.0/24
This function returns the dictionary
'8.0.0.2' : ['10.0.0.0/24', '12.0.0.0/24', '14.0.0.0/24']
'8.0.0.4' : ['16.0.0.0/24', '15.0.0.0/24']
'8.0.0.12': ['20.0.0.0/24']
"""
temp_host_routes = list(host_routes)
cidr_ip_set = IPSet([subnet_cidr])
host_route_dict = {}
for route in temp_host_routes[:]:
next_hop = route.get_next_hop()
if IPAddress(next_hop) in cidr_ip_set:
if next_hop in host_route_dict:
host_route_dict[next_hop].append(route.get_prefix())
else:
host_route_dict[next_hop] = [route.get_prefix()]
temp_host_routes.remove(route)
# look for indirect routes
if temp_host_routes:
for ipaddr in host_route_dict:
self._port_update_prefixes(host_route_dict[ipaddr],
temp_host_routes)
return host_route_dict
def _port_update_prefixes(self, matched_route_list, unmatched_host_routes):
process_host_routes = True
while process_host_routes:
process_host_routes = False
for route in unmatched_host_routes:
ip_addr = IPAddress(route.get_next_hop())
if ip_addr in IPSet(matched_route_list):
matched_route_list.append(route.get_prefix())
unmatched_host_routes.remove(route)
process_host_routes = True
def _port_check_and_add_iface_route_table(self, fixed_ips, net_obj,
port_obj):
ipam_refs = net_obj.get_network_ipam_refs()
if not ipam_refs:
return
for ipam_ref in ipam_refs:
subnets = ipam_ref['attr'].get_ipam_subnets()
for subnet in subnets:
host_routes = subnet.get_host_routes()
if host_routes is None:
continue
subnet_key = self._subnet_vnc_get_key(subnet, net_obj.uuid)
sn_id = self._subnet_vnc_read_mapping(key=subnet_key)
subnet_cidr = '%s/%s' % (subnet.subnet.get_ip_prefix(),
subnet.subnet.get_ip_prefix_len())
for ip_addr in [fixed_ip['ip_address'] for fixed_ip in \
fixed_ips if fixed_ip['subnet_id'] == sn_id]:
host_prefixes = self._port_get_host_prefixes(host_routes.route,
subnet_cidr)
if ip_addr in host_prefixes:
self._port_add_iface_route_table(host_prefixes[ip_addr],
port_obj, sn_id)
def _port_add_iface_route_table(self, route_prefix_list, port_obj,
subnet_id):
project_obj = self._project_read(proj_id=port_obj.parent_uuid)
intf_rt_name = '%s_%s_%s' % (_IFACE_ROUTE_TABLE_NAME_PREFIX,
subnet_id, port_obj.uuid)
intf_rt_fq_name = list(project_obj.get_fq_name())
intf_rt_fq_name.append(intf_rt_name)
try:
intf_route_table_obj = self._vnc_lib.interface_route_table_read(
fq_name=intf_rt_fq_name)
except vnc_exc.NoIdError:
route_table = RouteTableType(intf_rt_name)
route_table.set_route([])
intf_route_table = InterfaceRouteTable(
interface_route_table_routes=route_table,
parent_obj=project_obj,
name=intf_rt_name)
intf_route_table_id = self._vnc_lib.interface_route_table_create(
intf_route_table)
intf_route_table_obj = self._vnc_lib.interface_route_table_read(
id=intf_route_table_id)
rt_routes = intf_route_table_obj.get_interface_route_table_routes()
routes = rt_routes.get_route()
# delete any old routes
routes = []
for prefix in route_prefix_list:
routes.append(RouteType(prefix=prefix))
rt_routes.set_route(routes)
intf_route_table_obj.set_interface_route_table_routes(rt_routes)
self._vnc_lib.interface_route_table_update(intf_route_table_obj)
port_obj.add_interface_route_table(intf_route_table_obj)
self._vnc_lib.virtual_machine_interface_update(port_obj)
def _port_update_iface_route_table(self, net_obj, subnet_cidr, subnet_id,
new_host_routes, old_host_routes=None):
old_host_prefixes = {}
if old_host_routes:
old_host_prefixes = self._port_get_host_prefixes(old_host_routes.route,
subnet_cidr)
new_host_prefixes = self._port_get_host_prefixes(new_host_routes,
subnet_cidr)
for ipaddr, prefixes in old_host_prefixes.items():
if ipaddr in new_host_prefixes:
need_update = False
if len(prefixes) == len(new_host_prefixes[ipaddr]):
for prefix in prefixes:
if prefix not in new_host_prefixes[ipaddr]:
need_update = True
break
else:
need_update= True
if need_update:
old_host_prefixes.pop(ipaddr)
else:
# both the old and new are same. No need to do
# anything
old_host_prefixes.pop(ipaddr)
new_host_prefixes.pop(ipaddr)
if not new_host_prefixes and not old_host_prefixes:
# nothing to be done as old_host_routes and
# new_host_routes match exactly
return
# get the list of all the ip objs for this network
ipobjs = self._instance_ip_list(back_ref_id=[net_obj.uuid])
for ipobj in ipobjs:
ipaddr = ipobj.get_instance_ip_address()
if ipaddr in old_host_prefixes:
self._port_remove_iface_route_table(ipobj, subnet_id)
continue
if ipaddr in new_host_prefixes:
port_back_refs = ipobj.get_virtual_machine_interface_refs()
for port_ref in port_back_refs:
port_obj = self._virtual_machine_interface_read(
port_id=port_ref['uuid'])
self._port_add_iface_route_table(new_host_prefixes[ipaddr],
port_obj, subnet_id)
def _port_remove_iface_route_table(self, ipobj, subnet_id):
port_refs = ipobj.get_virtual_machine_interface_refs()
for port_ref in port_refs or []:
port_obj = self._virtual_machine_interface_read(port_id=port_ref['uuid'])
intf_rt_name = '%s_%s_%s' % (_IFACE_ROUTE_TABLE_NAME_PREFIX,
subnet_id, port_obj.uuid)
for rt_ref in port_obj.get_interface_route_table_refs() or []:
if rt_ref['to'][2] != intf_rt_name:
continue
try:
intf_route_table_obj = self._vnc_lib.interface_route_table_read(
id=rt_ref['uuid'])
port_obj.del_interface_route_table(intf_route_table_obj)
self._vnc_lib.virtual_machine_interface_update(port_obj)
self._vnc_lib.interface_route_table_delete(id=rt_ref['uuid'])
except vnc_exc.NoIdError:
pass
# public methods
# network api handlers
def network_create(self, network_q):
net_obj = self._network_neutron_to_vnc(network_q, CREATE)
try:
net_uuid = self._resource_create('virtual_network', net_obj)
except RefsExistError:
self._raise_contrail_exception('BadRequest',
resource='network', msg='Network Already exists')
if net_obj.router_external:
fip_pool_obj = FloatingIpPool('floating-ip-pool', net_obj)
self._floating_ip_pool_create(fip_pool_obj)
ret_network_q = self._network_vnc_to_neutron(net_obj, net_repr='SHOW')
return ret_network_q
#end network_create
def network_read(self, net_uuid, fields=None):
# see if we can return fast...
#if fields and (len(fields) == 1) and fields[0] == 'tenant_id':
# tenant_id = self._get_obj_tenant_id('network', net_uuid)
# return {'id': net_uuid, 'tenant_id': tenant_id}
try:
net_obj = self._network_read(net_uuid)
except NoIdError:
self._raise_contrail_exception('NetworkNotFound', net_id=net_uuid)
return self._network_vnc_to_neutron(net_obj, net_repr='SHOW')
#end network_read
def network_update(self, net_id, network_q):
net_obj = self._virtual_network_read(net_id=net_id)
router_external = net_obj.get_router_external()
shared = net_obj.get_is_shared()
network_q['id'] = net_id
net_obj = self._network_neutron_to_vnc(network_q, UPDATE)
if net_obj.router_external and not router_external:
fip_pools = net_obj.get_floating_ip_pools()
fip_pool_obj = FloatingIpPool('floating-ip-pool', net_obj)
self._floating_ip_pool_create(fip_pool_obj)
if router_external and not net_obj.router_external:
fip_pools = net_obj.get_floating_ip_pools()
if fip_pools:
for fip_pool in fip_pools:
try:
pool_id = fip_pool['uuid']
self._floating_ip_pool_delete(fip_pool_id=pool_id)
except RefsExistError:
self._raise_contrail_exception('NetworkInUse',
net_id=net_id)
if shared and not net_obj.is_shared:
for vmi in net_obj.get_virtual_machine_interface_back_refs() or []:
vmi_obj = self._virtual_machine_interface_read(port_id=vmi['uuid'])
if (vmi_obj.parent_type == 'project' and
vmi_obj.parent_uuid != net_obj.parent_uuid):
self._raise_contrail_exception(
'InvalidSharedSetting',
network=net_obj.display_name)
self._virtual_network_update(net_obj)
ret_network_q = self._network_vnc_to_neutron(net_obj, net_repr='SHOW')
return ret_network_q
#end network_update
def network_delete(self, net_id):
self._virtual_network_delete(net_id=net_id)
#end network_delete
# TODO request based on filter contents
def network_list(self, context=None, filters=None):
ret_dict = {}
def _collect_without_prune(net_ids):
for net_id in net_ids:
try:
net_obj = self._network_read(net_id)
net_info = self._network_vnc_to_neutron(net_obj,
net_repr='LIST')
ret_dict[net_id] = net_info
except NoIdError:
pass
#end _collect_without_prune
# collect phase
all_net_objs = [] # all n/ws in all projects
if context and not context['is_admin']:
if filters and 'id' in filters:
_collect_without_prune(filters['id'])
elif filters and 'name' in filters:
net_objs = self._network_list_project(context['tenant'])
all_net_objs.extend(net_objs)
all_net_objs.extend(self._network_list_shared())
all_net_objs.extend(self._network_list_router_external())
elif (filters and 'shared' in filters and filters['shared'][0] and
'router:external' not in filters):
all_net_objs.extend(self._network_list_shared())
elif (filters and 'router:external' in filters and
'shared' not in filters):
all_net_objs.extend(self._network_list_router_external())
elif (filters and 'router:external' in filters and
'shared' in filters):
all_net_objs.extend(self._network_list_shared_and_ext())
else:
project_uuid = str(uuid.UUID(context['tenant']))
if not filters:
all_net_objs.extend(self._network_list_router_external())
all_net_objs.extend(self._network_list_shared())
all_net_objs.extend(self._network_list_project(project_uuid))
# admin role from here on
elif filters and 'tenant_id' in filters:
# project-id is present
if 'id' in filters:
# required networks are also specified,
# just read and populate ret_dict
# prune is skipped because all_net_objs is empty
_collect_without_prune(filters['id'])
else:
# read all networks in project, and prune below
proj_ids = self._validate_project_ids(context, filters['tenant_id'])
for p_id in proj_ids:
all_net_objs.extend(self._network_list_project(p_id))
if 'router:external' in filters:
all_net_objs.extend(self._network_list_router_external())
elif filters and 'id' in filters:
# required networks are specified, just read and populate ret_dict
# prune is skipped because all_net_objs is empty
_collect_without_prune(filters['id'])
elif filters and 'name' in filters:
net_objs = self._network_list_project(None)
all_net_objs.extend(net_objs)
elif filters and 'shared' in filters:
if filters['shared'][0] == True:
nets = self._network_list_shared()
for net in nets:
net_info = self._network_vnc_to_neutron(net,
net_repr='LIST')
ret_dict[net.uuid] = net_info
elif filters and 'router:external' in filters:
nets = self._network_list_router_external()
if filters['router:external'][0] == True:
for net in nets:
net_info = self._network_vnc_to_neutron(net, net_repr='LIST')
ret_dict[net.uuid] = net_info
else:
# read all networks in all projects
all_net_objs.extend(self._virtual_network_list(detail=True))
# prune phase
for net_obj in all_net_objs:
if net_obj.uuid in ret_dict:
continue
net_fq_name = unicode(net_obj.get_fq_name())
if not self._filters_is_present(filters, 'contrail:fq_name',
net_fq_name):
continue
if not self._filters_is_present(
filters, 'name', net_obj.get_display_name() or net_obj.name):
continue
if net_obj.is_shared == None:
is_shared = False
else:
is_shared = net_obj.is_shared
if not self._filters_is_present(filters, 'shared',
is_shared):
continue
try:
net_info = self._network_vnc_to_neutron(net_obj,
net_repr='LIST')
except NoIdError:
continue
ret_dict[net_obj.uuid] = net_info
ret_list = []
for net in ret_dict.values():
ret_list.append(net)
return ret_list
#end network_list
def network_count(self, filters=None):
nets_info = self.network_list(filters=filters)
return len(nets_info)
#end network_count
# subnet api handlers
def subnet_create(self, subnet_q):
net_id = subnet_q['network_id']
net_obj = self._virtual_network_read(net_id=net_id)
ipam_fq_name = subnet_q.get('contrail:ipam_fq_name')
if ipam_fq_name:
domain_name, project_name, ipam_name = ipam_fq_name
domain_obj = Domain(domain_name)
project_obj = Project(project_name, domain_obj)
netipam_obj = NetworkIpam(ipam_name, project_obj)
else: # link with project's default ipam or global default ipam
try:
ipam_fq_name = net_obj.get_fq_name()[:-1]
ipam_fq_name.append('default-network-ipam')
netipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
except NoIdError:
netipam_obj = NetworkIpam()
ipam_fq_name = netipam_obj.get_fq_name()
subnet_vnc = self._subnet_neutron_to_vnc(subnet_q)
subnet_key = self._subnet_vnc_get_key(subnet_vnc, net_id)
# Locate list of subnets to which this subnet has to be appended
net_ipam_ref = None
ipam_refs = net_obj.get_network_ipam_refs()
if ipam_refs:
for ipam_ref in ipam_refs:
if ipam_ref['to'] == ipam_fq_name:
net_ipam_ref = ipam_ref
break
if not net_ipam_ref:
# First link from net to this ipam
vnsn_data = VnSubnetsType([subnet_vnc])
net_obj.add_network_ipam(netipam_obj, vnsn_data)
else: # virtual-network already linked to this ipam
for subnet in net_ipam_ref['attr'].get_ipam_subnets():
if subnet_key == self._subnet_vnc_get_key(subnet, net_id):
existing_sn_id = self._subnet_vnc_read_mapping(key=subnet_key)
# duplicate !!
msg = _("Cidr %s overlaps with another subnet of subnet %s"
) % (subnet_q['cidr'], existing_sn_id)
self._raise_contrail_exception('BadRequest',
resource='subnet', msg=msg)
vnsn_data = net_ipam_ref['attr']
vnsn_data.ipam_subnets.append(subnet_vnc)
# TODO: Add 'ref_update' API that will set this field
net_obj._pending_field_updates.add('network_ipam_refs')
self._virtual_network_update(net_obj)
# allocate an id to the subnet and store mapping with
# api-server
subnet_id = subnet_vnc.subnet_uuid
self._subnet_vnc_create_mapping(subnet_id, subnet_key)
# Read in subnet from server to get updated values for gw etc.
subnet_vnc = self._subnet_read(net_obj.uuid, subnet_key)
subnet_info = self._subnet_vnc_to_neutron(subnet_vnc, net_obj,
ipam_fq_name)
return subnet_info
#end subnet_create
def subnet_read(self, subnet_id):
subnet_key = self._subnet_vnc_read_mapping(id=subnet_id)
net_id = subnet_key.split()[0]
try:
net_obj = self._network_read(net_id)
except NoIdError:
self._raise_contrail_exception('SubnetNotFound',
subnet_id=subnet_id)
ipam_refs = net_obj.get_network_ipam_refs()
if ipam_refs:
for ipam_ref in ipam_refs:
subnet_vncs = ipam_ref['attr'].get_ipam_subnets()
for subnet_vnc in subnet_vncs:
if self._subnet_vnc_get_key(subnet_vnc, net_id) == \
subnet_key:
ret_subnet_q = self._subnet_vnc_to_neutron(
subnet_vnc, net_obj, ipam_ref['to'])
return ret_subnet_q
return {}
#end subnet_read
def subnet_update(self, subnet_id, subnet_q):
if 'gateway_ip' in subnet_q:
if subnet_q['gateway_ip'] != None:
self._raise_contrail_exception(
'BadRequest', resource='subnet',
msg="update of gateway is not supported")
if 'allocation_pools' in subnet_q:
if subnet_q['allocation_pools'] != None:
self._raise_contrail_exception(
'BadRequest', resource='subnet',
msg="update of allocation_pools is not allowed")
subnet_key = self._subnet_vnc_read_mapping(id=subnet_id)
net_id = subnet_key.split()[0]
net_obj = self._network_read(net_id)
ipam_refs = net_obj.get_network_ipam_refs()
subnet_found = False
if ipam_refs:
for ipam_ref in ipam_refs:
subnets = ipam_ref['attr'].get_ipam_subnets()
for subnet_vnc in subnets:
if self._subnet_vnc_get_key(subnet_vnc,
net_id) == subnet_key:
subnet_found = True
break
if subnet_found:
if 'name' in subnet_q:
if subnet_q['name'] != None:
subnet_vnc.set_subnet_name(subnet_q['name'])
if 'gateway_ip' in subnet_q:
if subnet_q['gateway_ip'] != None:
subnet_vnc.set_default_gateway(subnet_q['gateway_ip'])
if 'enable_dhcp' in subnet_q:
if subnet_q['enable_dhcp'] != None:
subnet_vnc.set_enable_dhcp(subnet_q['enable_dhcp'])
if 'dns_nameservers' in subnet_q:
if subnet_q['dns_nameservers'] != None:
dhcp_options=[]
dns_servers=" ".join(subnet_q['dns_nameservers'])
if dns_servers:
dhcp_options.append(DhcpOptionType(dhcp_option_name='6',
dhcp_option_value=dns_servers))
if dhcp_options:
subnet_vnc.set_dhcp_option_list(DhcpOptionsListType(dhcp_options))
else:
subnet_vnc.set_dhcp_option_list(None)
if 'host_routes' in subnet_q:
if subnet_q['host_routes'] != None:
host_routes=[]
for host_route in subnet_q['host_routes']:
host_routes.append(RouteType(prefix=host_route['destination'],
next_hop=host_route['nexthop']))
if self._apply_subnet_host_routes:
old_host_routes = subnet_vnc.get_host_routes()
subnet_cidr = '%s/%s' % (subnet_vnc.subnet.get_ip_prefix(),
subnet_vnc.subnet.get_ip_prefix_len())
self._port_update_iface_route_table(net_obj,
subnet_cidr,
subnet_id,
host_routes,
old_host_routes)
if host_routes:
subnet_vnc.set_host_routes(RouteTableType(host_routes))
else:
subnet_vnc.set_host_routes(None)
net_obj._pending_field_updates.add('network_ipam_refs')
self._virtual_network_update(net_obj)
ret_subnet_q = self._subnet_vnc_to_neutron(
subnet_vnc, net_obj, ipam_ref['to'])
return ret_subnet_q
return {}
# end subnet_update
def subnet_delete(self, subnet_id):
subnet_key = self._subnet_vnc_read_mapping(id=subnet_id)
net_id = subnet_key.split()[0]
net_obj = self._network_read(net_id)
ipam_refs = net_obj.get_network_ipam_refs()
if ipam_refs:
for ipam_ref in ipam_refs:
orig_subnets = ipam_ref['attr'].get_ipam_subnets()
new_subnets = [subnet_vnc for subnet_vnc in orig_subnets
if self._subnet_vnc_get_key(subnet_vnc,
net_id) != subnet_key]
if len(orig_subnets) != len(new_subnets):
# matched subnet to be deleted
ipam_ref['attr'].set_ipam_subnets(new_subnets)
net_obj._pending_field_updates.add('network_ipam_refs')
try:
self._virtual_network_update(net_obj)
except RefsExistError:
self._raise_contrail_exception('SubnetInUse',
subnet_id=subnet_id)
self._subnet_vnc_delete_mapping(subnet_id, subnet_key)
return
#end subnet_delete
def subnets_list(self, context, filters=None):
ret_subnets = []
all_net_objs = []
if filters and 'id' in filters:
# required subnets are specified,
# just read in corresponding net_ids
net_ids = []
for subnet_id in filters['id']:
subnet_key = self._subnet_vnc_read_mapping(id=subnet_id)
net_id = subnet_key.split()[0]
net_ids.append(net_id)
all_net_objs.extend(self._virtual_network_list(obj_uuids=net_ids,
detail=True))
else:
if not context['is_admin']:
proj_id = context['tenant']
else:
proj_id = None
net_objs = self._network_list_project(proj_id)
all_net_objs.extend(net_objs)
net_objs = self._network_list_shared()
all_net_objs.extend(net_objs)
ret_dict = {}
for net_obj in all_net_objs:
if net_obj.uuid in ret_dict:
continue
ret_dict[net_obj.uuid] = 1
ipam_refs = net_obj.get_network_ipam_refs()
if ipam_refs:
for ipam_ref in ipam_refs:
subnet_vncs = ipam_ref['attr'].get_ipam_subnets()
for subnet_vnc in subnet_vncs:
sn_info = self._subnet_vnc_to_neutron(subnet_vnc,
net_obj,
ipam_ref['to'])
sn_id = sn_info['id']
sn_proj_id = sn_info['tenant_id']
sn_net_id = sn_info['network_id']
sn_name = sn_info['name']
if (filters and 'shared' in filters and
filters['shared'][0] == True):
if not net_obj.is_shared:
continue
elif filters:
if not self._filters_is_present(filters, 'id',
sn_id):
continue
if not self._filters_is_present(filters,
'tenant_id',
sn_proj_id):
continue
if not self._filters_is_present(filters,
'network_id',
sn_net_id):
continue
if not self._filters_is_present(filters,
'name',
sn_name):
continue
ret_subnets.append(sn_info)
return ret_subnets
#end subnets_list
def subnets_count(self, context, filters=None):
subnets_info = self.subnets_list(context, filters)
return len(subnets_info)
#end subnets_count
# ipam api handlers
def ipam_create(self, ipam_q):
# TODO remove below once api-server can read and create projects
# from keystone on startup
#self._ensure_project_exists(ipam_q['tenant_id'])
ipam_obj = self._ipam_neutron_to_vnc(ipam_q, CREATE)
try:
ipam_uuid = self._vnc_lib.network_ipam_create(ipam_obj)
except RefsExistError as e:
self._raise_contrail_exception('BadRequest',
resource='ipam', msg=str(e))
return self._ipam_vnc_to_neutron(ipam_obj)
#end ipam_create
def ipam_read(self, ipam_id):
try:
ipam_obj = self._vnc_lib.network_ipam_read(id=ipam_id)
except NoIdError:
# TODO add ipam specific exception
self._raise_contrail_exception('NetworkNotFound',
net_id=ipam_id)
return self._ipam_vnc_to_neutron(ipam_obj)
#end ipam_read
def ipam_update(self, ipam_id, ipam_q):
ipam_q['id'] = ipam_id
ipam_obj = self._ipam_neutron_to_vnc(ipam_q, UPDATE)
self._vnc_lib.network_ipam_update(ipam_obj)
return self._ipam_vnc_to_neutron(ipam_obj)
#end ipam_update
def ipam_delete(self, ipam_id):
self._vnc_lib.network_ipam_delete(id=ipam_id)
#end ipam_delete
# TODO request based on filter contents
def ipam_list(self, context=None, filters=None):
ret_list = []
# collect phase
all_ipams = [] # all ipams in all projects
if filters and 'tenant_id' in filters:
project_ids = self._validate_project_ids(context,
filters['tenant_id'])
for p_id in project_ids:
project_ipams = self._ipam_list_project(p_id)
all_ipams.append(project_ipams)
else: # no filters
dom_projects = self._project_list_domain(None)
for project in dom_projects:
proj_id = project['uuid']
project_ipams = self._ipam_list_project(proj_id)
all_ipams.append(project_ipams)
# prune phase
for project_ipams in all_ipams:
for proj_ipam in project_ipams:
# TODO implement same for name specified in filter
proj_ipam_id = proj_ipam['uuid']
if not self._filters_is_present(filters, 'id', proj_ipam_id):
continue
ipam_info = self.ipam_read(proj_ipam['uuid'])
ret_list.append(ipam_info)
return ret_list
#end ipam_list
def ipam_count(self, filters=None):
ipam_info = self.ipam_list(filters=filters)
return len(ipam_info)
#end ipam_count
# policy api handlers
def policy_create(self, policy_q):
# TODO remove below once api-server can read and create projects
# from keystone on startup
#self._ensure_project_exists(policy_q['tenant_id'])
policy_obj = self._policy_neutron_to_vnc(policy_q, CREATE)
try:
policy_uuid = self._vnc_lib.network_policy_create(policy_obj)
except RefsExistError as e:
self._raise_contrail_exception('BadRequest',
resource='policy', msg=str(e))
return self._policy_vnc_to_neutron(policy_obj)
#end policy_create
def policy_read(self, policy_id):
try:
policy_obj = self._vnc_lib.network_policy_read(id=policy_id)
except NoIdError:
raise policy.PolicyNotFound(id=policy_id)
return self._policy_vnc_to_neutron(policy_obj)
#end policy_read
def policy_update(self, policy_id, policy):
policy_q = policy
policy_q['id'] = policy_id
policy_obj = self._policy_neutron_to_vnc(policy_q, UPDATE)
self._vnc_lib.network_policy_update(policy_obj)
return self._policy_vnc_to_neutron(policy_obj)
#end policy_update
def policy_delete(self, policy_id):
self._vnc_lib.network_policy_delete(id=policy_id)
#end policy_delete
# TODO request based on filter contents
def policy_list(self, context=None, filters=None):
ret_list = []
# collect phase
all_policys = [] # all policys in all projects
if filters and 'tenant_id' in filters:
project_ids = self._validate_project_ids(context,
filters['tenant_id'])
for p_id in project_ids:
project_policys = self._policy_list_project(p_id)
all_policys.append(project_policys)
else: # no filters
dom_projects = self._project_list_domain(None)
for project in dom_projects:
proj_id = project['uuid']
project_policys = self._policy_list_project(proj_id)
all_policys.append(project_policys)
# prune phase
for project_policys in all_policys:
for proj_policy in project_policys:
# TODO implement same for name specified in filter
proj_policy_id = proj_policy['uuid']
if not self._filters_is_present(filters, 'id', proj_policy_id):
continue
policy_info = self.policy_read(proj_policy['uuid'])
ret_list.append(policy_info)
return ret_list
#end policy_list
def policy_count(self, filters=None):
policy_info = self.policy_list(filters=filters)
return len(policy_info)
#end policy_count
def _router_add_gateway(self, router_q, rtr_obj):
ext_gateway = router_q.get('external_gateway_info', None)
old_ext_gateway = rtr_obj.get_virtual_network_refs()
if ext_gateway or old_ext_gateway:
network_id = ext_gateway.get('network_id', None)
if network_id:
if old_ext_gateway and network_id == old_ext_gateway[0]['uuid']:
return
try:
net_obj = self._virtual_network_read(net_id=network_id)
if not net_obj.get_router_external():
self._raise_contrail_exception(
'BadRequest', resource='router',
msg="Network %s is not a valid external network" % network_id)
except NoIdError:
self._raise_contrail_exception('NetworkNotFound',
net_id=network_id)
self._router_set_external_gateway(rtr_obj, net_obj)
else:
self._router_clear_external_gateway(rtr_obj)
def _router_set_external_gateway(self, router_obj, ext_net_obj):
project_obj = self._project_read(proj_id=router_obj.parent_uuid)
# Get netns SNAT service template
try:
st_obj = self._vnc_lib.service_template_read(
fq_name=SNAT_SERVICE_TEMPLATE_FQ_NAME)
except NoIdError:
self._raise_contrail_exception('BadRequest', resouce='router',
msg="Unable to set or clear the default gateway")
# Get the service instance if it exists
si_name = 'si_' + router_obj.uuid
si_fq_name = project_obj.get_fq_name() + [si_name]
try:
si_obj = self._vnc_lib.service_instance_read(fq_name=si_fq_name)
si_uuid = si_obj.uuid
except NoIdError:
si_obj = None
# Get route table for default route it it exists
rt_name = 'rt_' + router_obj.uuid
rt_fq_name = project_obj.get_fq_name() + [rt_name]
try:
rt_obj = self._vnc_lib.route_table_read(fq_name=rt_fq_name)
rt_uuid = rt_obj.uuid
except NoIdError:
rt_obj = None
# Set the service instance
si_created = False
if not si_obj:
si_obj = ServiceInstance(si_name, parent_obj=project_obj)
si_created = True
#TODO(ethuleau): For the fail-over SNAT set scale out to 2
si_prop_obj = ServiceInstanceType(
scale_out=ServiceScaleOutType(max_instances=1,
auto_scale=True),
auto_policy=True)
# set right interface in order of [right, left] to match template
left_if = ServiceInstanceInterfaceType()
right_if = ServiceInstanceInterfaceType(
virtual_network=ext_net_obj.get_fq_name_str())
si_prop_obj.set_interface_list([right_if, left_if])
si_prop_obj.set_ha_mode('active-standby')
si_obj.set_service_instance_properties(si_prop_obj)
si_obj.set_service_template(st_obj)
if si_created:
si_uuid = self._vnc_lib.service_instance_create(si_obj)
else:
self._vnc_lib.service_instance_update(si_obj)
# Set the route table
route_obj = RouteType(prefix="0.0.0.0/0",
next_hop=si_obj.get_fq_name_str())
rt_created = False
if not rt_obj:
rt_obj = RouteTable(name=rt_name, parent_obj=project_obj)
rt_created = True
rt_obj.set_routes(RouteTableType.factory([route_obj]))
if rt_created:
rt_uuid = self._vnc_lib.route_table_create(rt_obj)
else:
self._vnc_lib.route_table_update(rt_obj)
# Associate route table to all private networks connected onto
# that router
for intf in router_obj.get_virtual_machine_interface_refs() or []:
port_id = intf['uuid']
net_id = self.port_read(port_id)['network_id']
try:
net_obj = self._vnc_lib.virtual_network_read(id=net_id)
except NoIdError:
self._raise_contrail_exception(
'NetworkNotFound', net_id=net_id)
net_obj.set_route_table(rt_obj)
self._vnc_lib.virtual_network_update(net_obj)
# Add logical gateway virtual network
router_obj.set_virtual_network(ext_net_obj)
self._vnc_lib.logical_router_update(router_obj)
def _router_clear_external_gateway(self, router_obj):
project_obj = self._project_read(proj_id=router_obj.parent_uuid)
# Get the service instance if it exists
si_name = 'si_' + router_obj.uuid
si_fq_name = project_obj.get_fq_name() + [si_name]
try:
si_obj = self._vnc_lib.service_instance_read(fq_name=si_fq_name)
si_uuid = si_obj.uuid
except NoIdError:
si_obj = None
# Get route table for default route it it exists
rt_name = 'rt_' + router_obj.uuid
rt_fq_name = project_obj.get_fq_name() + [rt_name]
try:
rt_obj = self._vnc_lib.route_table_read(fq_name=rt_fq_name)
rt_uuid = rt_obj.uuid
except NoIdError:
rt_obj = None
# Delete route table
if rt_obj:
# Disassociate route table to all private networks connected
# onto that router
for net_ref in rt_obj.get_virtual_network_back_refs() or []:
try:
net_obj = self._vnc_lib.virtual_network_read(
id=net_ref['uuid'])
except NoIdError:
continue
net_obj.del_route_table(rt_obj)
self._vnc_lib.virtual_network_update(net_obj)
self._vnc_lib.route_table_delete(id=rt_obj.uuid)
# Delete service instance
if si_obj:
self._vnc_lib.service_instance_delete(id=si_uuid)
# Clear logical gateway virtual network
router_obj.set_virtual_network_list([])
self._vnc_lib.logical_router_update(router_obj)
def _set_snat_routing_table(self, router_obj, network_id):
project_obj = self._project_read(proj_id=router_obj.parent_uuid)
rt_name = 'rt_' + router_obj.uuid
rt_fq_name = project_obj.get_fq_name() + [rt_name]
try:
rt_obj = self._vnc_lib.route_table_read(fq_name=rt_fq_name)
rt_uuid = rt_obj.uuid
except NoIdError:
# No route table set with that router ID, the gateway is not set
return
try:
net_obj = self._vnc_lib.virtual_network_read(id=network_id)
except NoIdError:
raise exceptions.NetworkNotFound(net_id=ext_net_id)
net_obj.set_route_table(rt_obj)
self._vnc_lib.virtual_network_update(net_obj)
def _clear_snat_routing_table(self, router_obj, network_id):
project_obj = self._project_read(proj_id=router_obj.parent_uuid)
rt_name = 'rt_' + router_obj.uuid
rt_fq_name = project_obj.get_fq_name() + [rt_name]
try:
rt_obj = self._vnc_lib.route_table_read(fq_name=rt_fq_name)
rt_uuid = rt_obj.uuid
except NoIdError:
# No route table set with that router ID, the gateway is not set
return
try:
net_obj = self._vnc_lib.virtual_network_read(id=network_id)
except NoIdError:
raise exceptions.NetworkNotFound(net_id=ext_net_id)
net_obj.del_route_table(rt_obj)
self._vnc_lib.virtual_network_update(net_obj)
# router api handlers
def router_create(self, router_q):
#self._ensure_project_exists(router_q['tenant_id'])
rtr_obj = self._router_neutron_to_vnc(router_q, CREATE)
rtr_uuid = self._resource_create('logical_router', rtr_obj)
self._router_add_gateway(router_q, rtr_obj)
ret_router_q = self._router_vnc_to_neutron(rtr_obj, rtr_repr='SHOW')
return ret_router_q
#end router_create
def router_read(self, rtr_uuid, fields=None):
# see if we can return fast...
if fields and (len(fields) == 1) and fields[0] == 'tenant_id':
tenant_id = self._get_obj_tenant_id('router', rtr_uuid)
return {'id': rtr_uuid, 'tenant_id': tenant_id}
try:
rtr_obj = self._logical_router_read(rtr_uuid)
except NoIdError:
self._raise_contrail_exception('RouterNotFound',
router_id=rtr_uuid)
return self._router_vnc_to_neutron(rtr_obj, rtr_repr='SHOW')
#end router_read
def router_update(self, rtr_id, router_q):
router_q['id'] = rtr_id
rtr_obj = self._router_neutron_to_vnc(router_q, UPDATE)
self._logical_router_update(rtr_obj)
self._router_add_gateway(router_q, rtr_obj)
ret_router_q = self._router_vnc_to_neutron(rtr_obj, rtr_repr='SHOW')
return ret_router_q
#end router_update
def router_delete(self, rtr_id):
try:
rtr_obj = self._logical_router_read(rtr_id)
if rtr_obj.get_virtual_machine_interface_refs():
self._raise_contrail_exception('RouterInUse',
router_id=rtr_id)
except NoIdError:
self._raise_contrail_exception('RouterNotFound',
router_id=rtr_id)
self._router_clear_external_gateway(rtr_obj)
self._logical_router_delete(rtr_id=rtr_id)
#end router_delete
# TODO request based on filter contents
def router_list(self, context=None, filters=None):
ret_list = []
if filters and 'shared' in filters:
if filters['shared'][0] == True:
# no support for shared routers
return ret_list
# collect phase
all_rtrs = [] # all n/ws in all projects
if filters and 'tenant_id' in filters:
# project-id is present
if 'id' in filters:
# required routers are also specified,
# just read and populate ret_list
# prune is skipped because all_rtrs is empty
for rtr_id in filters['id']:
try:
rtr_obj = self._logical_router_read(rtr_id)
rtr_info = self._router_vnc_to_neutron(rtr_obj,
rtr_repr='LIST')
ret_list.append(rtr_info)
except NoIdError:
pass
else:
# read all routers in project, and prune below
project_ids = self._validate_project_ids(context,
filters['tenant_id'])
for p_id in project_ids:
if 'router:external' in filters:
all_rtrs.append(self._fip_pool_ref_routers(p_id))
else:
project_rtrs = self._router_list_project(p_id)
all_rtrs.append(project_rtrs)
elif filters and 'id' in filters:
# required routers are specified, just read and populate ret_list
# prune is skipped because all_rtrs is empty
for rtr_id in filters['id']:
try:
rtr_obj = self._logical_router_read(rtr_id)
rtr_info = self._router_vnc_to_neutron(rtr_obj,
rtr_repr='LIST')
ret_list.append(rtr_info)
except NoIdError:
pass
else:
# read all routers in all projects
project_rtrs = self._router_list_project()
all_rtrs.append(project_rtrs)
# prune phase
for project_rtrs in all_rtrs:
for proj_rtr in project_rtrs:
proj_rtr_id = proj_rtr['uuid']
if not self._filters_is_present(filters, 'id', proj_rtr_id):
continue
proj_rtr_fq_name = unicode(proj_rtr['fq_name'])
if not self._filters_is_present(filters, 'contrail:fq_name',
proj_rtr_fq_name):
continue
try:
rtr_obj = self._logical_router_read(proj_rtr['uuid'])
if not self._filters_is_present(
filters, 'name',
rtr_obj.get_display_name() or rtr_obj.name):
continue
rtr_info = self._router_vnc_to_neutron(rtr_obj,
rtr_repr='LIST')
except NoIdError:
continue
ret_list.append(rtr_info)
return ret_list
#end router_list
def router_count(self, filters=None):
rtrs_info = self.router_list(filters=filters)
return len(rtrs_info)
#end router_count
def _check_for_dup_router_subnet(self, router_id,
network_id, subnet_id, subnet_cidr):
try:
rports = self.port_list(filters={'device_id': [router_id]})
# It's possible these ports are on the same network, but
# different subnets.
new_ipnet = netaddr.IPNetwork(subnet_cidr)
for p in rports:
for ip in p['fixed_ips']:
if ip['subnet_id'] == subnet_id:
msg = (_("Router %s already has a port "
"on subnet %s") % (router_id, subnet_id))
self._raise_contrail_exception(
'BadRequest', resource='router', msg=msg)
sub_id = ip['subnet_id']
subnet = self.subnet_read(sub_id)
cidr = subnet['cidr']
ipnet = netaddr.IPNetwork(cidr)
match1 = netaddr.all_matching_cidrs(new_ipnet, [cidr])
match2 = netaddr.all_matching_cidrs(ipnet, [subnet_cidr])
if match1 or match2:
data = {'subnet_cidr': subnet_cidr,
'subnet_id': subnet_id,
'cidr': cidr,
'sub_id': sub_id}
msg = (_("Cidr %(subnet_cidr)s of subnet "
"%(subnet_id)s overlaps with cidr %(cidr)s "
"of subnet %(sub_id)s") % data)
self._raise_contrail_exception(
'BadRequest', resource='router', msg=msg)
except NoIdError:
pass
def add_router_interface(self, context, router_id, port_id=None, subnet_id=None):
router_obj = self._logical_router_read(router_id)
if port_id:
port = self.port_read(port_id)
if (port['device_owner'] == constants.DEVICE_OWNER_ROUTER_INTF and
port['device_id']):
self._raise_contrail_exception('PortInUse',
net_id=port['network_id'],
port_id=port['id'],
device_id=port['device_id'])
fixed_ips = [ip for ip in port['fixed_ips']]
if len(fixed_ips) != 1:
self._raise_contrail_exception(
'BadRequest', resource='router',
msg='Router port must have exactly one fixed IP')
subnet_id = fixed_ips[0]['subnet_id']
subnet = self.subnet_read(subnet_id)
self._check_for_dup_router_subnet(router_id,
port['network_id'],
subnet['id'],
subnet['cidr'])
elif subnet_id:
subnet = self.subnet_read(subnet_id)
if not subnet['gateway_ip']:
self._raise_contrail_exception(
'BadRequest', resource='router',
msg='Subnet for router interface must have a gateway IP')
self._check_for_dup_router_subnet(router_id,
subnet['network_id'],
subnet_id,
subnet['cidr'])
fixed_ip = {'ip_address': subnet['gateway_ip'],
'subnet_id': subnet['id']}
port = self.port_create(context, {'tenant_id': subnet['tenant_id'],
'network_id': subnet['network_id'],
'fixed_ips': [fixed_ip],
'admin_state_up': True,
'device_id': router_id,
'device_owner': constants.DEVICE_OWNER_ROUTER_INTF,
'name': ''})
port_id = port['id']
else:
self._raise_contrail_exception(
'BadRequest', resource='router',
msg='Either port or subnet must be specified')
self._set_snat_routing_table(router_obj, subnet['network_id'])
vmi_obj = self._vnc_lib.virtual_machine_interface_read(id=port_id)
vmi_obj.set_virtual_machine_interface_device_owner(
constants.DEVICE_OWNER_ROUTER_INTF)
self._vnc_lib.virtual_machine_interface_update(vmi_obj)
router_obj.add_virtual_machine_interface(vmi_obj)
self._logical_router_update(router_obj)
info = {'id': router_id,
'tenant_id': subnet['tenant_id'],
'port_id': port_id,
'subnet_id': subnet_id}
return info
# end add_router_interface
def remove_router_interface(self, router_id, port_id=None, subnet_id=None):
router_obj = self._logical_router_read(router_id)
subnet = None
if port_id:
port_db = self.port_read(port_id)
if (port_db['device_owner'] != constants.DEVICE_OWNER_ROUTER_INTF
or port_db['device_id'] != router_id):
self._raise_contrail_exception('RouterInterfaceNotFound',
router_id=router_id,
port_id=port_id)
port_subnet_id = port_db['fixed_ips'][0]['subnet_id']
if subnet_id and (port_subnet_id != subnet_id):
self._raise_contrail_exception('SubnetMismatchForPort',
port_id=port_id,
subnet_id=subnet_id)
subnet_id = port_subnet_id
subnet = self.subnet_read(subnet_id)
network_id = subnet['network_id']
elif subnet_id:
subnet = self.subnet_read(subnet_id)
network_id = subnet['network_id']
for intf in router_obj.get_virtual_machine_interface_refs() or []:
port_id = intf['uuid']
port_db = self.port_read(port_id)
if subnet_id == port_db['fixed_ips'][0]['subnet_id']:
break
else:
msg = _('Subnet %s not connected to router %s') % (subnet_id,
router_id)
self._raise_contrail_exception('BadRequest',
resource='router', msg=msg)
self._clear_snat_routing_table(router_obj, subnet['network_id'])
port_obj = self._virtual_machine_interface_read(port_id)
router_obj.del_virtual_machine_interface(port_obj)
self._vnc_lib.logical_router_update(router_obj)
self.port_delete(port_id)
info = {'id': router_id,
'tenant_id': subnet['tenant_id'],
'port_id': port_id,
'subnet_id': subnet_id}
return info
# end remove_router_interface
# floatingip api handlers
def floatingip_create(self, fip_q):
try:
fip_obj = self._floatingip_neutron_to_vnc(fip_q, CREATE)
except Exception, e:
#logging.exception(e)
msg = _('Internal error when trying to create floating ip. '
'Please be sure the network %s is an external '
'network.') % (fip_q['floating_network_id'])
self._raise_contrail_exception('BadRequest',
resource='floatingip', msg=msg)
try:
fip_uuid = self._vnc_lib.floating_ip_create(fip_obj)
except Exception as e:
self._raise_contrail_exception('IpAddressGenerationFailure',
net_id=fip_q['floating_network_id'])
fip_obj = self._vnc_lib.floating_ip_read(id=fip_uuid)
return self._floatingip_vnc_to_neutron(fip_obj)
#end floatingip_create
def floatingip_read(self, fip_uuid):
try:
fip_obj = self._vnc_lib.floating_ip_read(id=fip_uuid)
except NoIdError:
self._raise_contrail_exception('FloatingIPNotFound',
floatingip_id=fip_uuid)
return self._floatingip_vnc_to_neutron(fip_obj)
#end floatingip_read
def floatingip_update(self, fip_id, fip_q):
fip_q['id'] = fip_id
fip_obj = self._floatingip_neutron_to_vnc(fip_q, UPDATE)
self._vnc_lib.floating_ip_update(fip_obj)
return self._floatingip_vnc_to_neutron(fip_obj)
#end floatingip_update
def floatingip_delete(self, fip_id):
self._vnc_lib.floating_ip_delete(id=fip_id)
#end floatingip_delete
def floatingip_list(self, context, filters=None):
# Read in floating ips with either
# - port(s) as anchor
# - project(s) as anchor
# - none as anchor (floating-ip collection)
ret_list = []
proj_ids = None
port_ids = None
if filters:
if 'tenant_id' in filters:
proj_ids = self._validate_project_ids(context,
filters['tenant_id'])
elif 'port_id' in filters:
port_ids = filters['port_id']
else: # no filters
if not context['is_admin']:
proj_ids = [str(uuid.UUID(context['tenant']))]
if port_ids:
fip_objs = self._floatingip_list(back_ref_id=port_ids)
elif proj_ids:
fip_objs = self._floatingip_list(back_ref_id=proj_ids)
else:
fip_objs = self._floatingip_list()
for fip_obj in fip_objs:
if 'floating_ip_address' in filters:
if (fip_obj.get_floating_ip_address() not in
filters['floating_ip_address']):
continue
ret_list.append(self._floatingip_vnc_to_neutron(fip_obj))
return ret_list
#end floatingip_list
def floatingip_count(self, context, filters=None):
floatingip_info = self.floatingip_list(context, filters)
return len(floatingip_info)
#end floatingip_count
def _ip_addr_in_net_id(self, ip_addr, net_id):
"""Checks if ip address is present in net-id."""
net_ip_list = [ipobj.get_instance_ip_address() for ipobj in
self._instance_ip_list(back_ref_id=[net_id])]
return ip_addr in net_ip_list
def _create_instance_ip(self, net_obj, port_obj, ip_addr=None,
subnet_uuid=None, ip_family="v4"):
ip_name = str(uuid.uuid4())
ip_obj = InstanceIp(name=ip_name)
ip_obj.uuid = ip_name
if subnet_uuid:
ip_obj.set_subnet_uuid(subnet_uuid)
ip_obj.set_virtual_machine_interface(port_obj)
ip_obj.set_virtual_network(net_obj)
ip_obj.set_instance_ip_family(ip_family)
if ip_addr:
ip_obj.set_instance_ip_address(ip_addr)
ip_id = self._instance_ip_create(ip_obj)
return ip_id
# end _create_instance_ip
def _port_create_instance_ip(self, net_obj, port_obj, port_q, ip_family="v4"):
created_iip_ids = []
fixed_ips = port_q.get('fixed_ips')
if fixed_ips is None:
return
for fixed_ip in fixed_ips:
try:
ip_addr = fixed_ip.get('ip_address')
subnet_id = fixed_ip.get('subnet_id')
ip_id = self._create_instance_ip(net_obj, port_obj, ip_addr, subnet_id, ip_family)
created_iip_ids.append(ip_id)
except vnc_exc.HttpError as e:
# Resources are not available
for iip_id in created_iip_ids:
self._instance_ip_delete(instance_ip_id=iip_id)
raise e
for iip in getattr(port_obj, 'instance_ip_back_refs', []):
if iip['uuid'] not in created_iip_ids:
iip_obj = self._instance_ip_delete(instance_ip_id=iip['uuid'])
# end _port_create_instance_ip
# port api handlers
def port_create(self, context, port_q):
net_id = port_q['network_id']
net_obj = self._network_read(net_id)
tenant_id = self._get_tenant_id_for_create(context, port_q);
proj_id = str(uuid.UUID(tenant_id))
# initialize port object
port_obj = self._port_neutron_to_vnc(port_q, net_obj, CREATE)
# determine creation of v4 and v6 ip object
ip_obj_v4_create = False
ip_obj_v6_create = False
ipam_refs = net_obj.get_network_ipam_refs()
for ipam_ref in ipam_refs:
subnet_vncs = ipam_ref['attr'].get_ipam_subnets()
for subnet_vnc in subnet_vncs:
cidr = '%s/%s' %(subnet_vnc.subnet.get_ip_prefix(),
subnet_vnc.subnet.get_ip_prefix_len())
if (IPNetwork(cidr).version == 4):
ip_obj_v4_create = True
if (IPNetwork(cidr).version == 6):
ip_obj_v6_create = True
# create the object
port_id = self._resource_create('virtual_machine_interface', port_obj)
try:
if 'fixed_ips' in port_q:
self._port_create_instance_ip(net_obj, port_obj, port_q)
elif net_obj.get_network_ipam_refs():
if (ip_obj_v4_create is True):
self._port_create_instance_ip(net_obj, port_obj,
{'fixed_ips':[{'ip_address': None}]}, ip_family="v4")
if (ip_obj_v6_create is True):
self._port_create_instance_ip(net_obj, port_obj,
{'fixed_ips':[{'ip_address': None}]}, ip_family="v6")
except vnc_exc.HttpError:
# failure in creating the instance ip. Roll back
self._virtual_machine_interface_delete(port_id=port_id)
self._raise_contrail_exception('IpAddressGenerationFailure',
net_id=net_obj.uuid)
# TODO below reads back default parent name, fix it
port_obj = self._virtual_machine_interface_read(port_id=port_id)
ret_port_q = self._port_vnc_to_neutron(port_obj)
# create interface route table for the port if
# subnet has a host route for this port ip.
if self._apply_subnet_host_routes:
self._port_check_and_add_iface_route_table(ret_port_q['fixed_ips'],
net_obj, port_obj)
# update cache on successful creation
tenant_id = proj_id.replace('-', '')
ncurports = self.port_count({'tenant_id': tenant_id})
return ret_port_q
#end port_create
# TODO add obj param and let caller use below only as a converter
def port_read(self, port_id):
try:
port_obj = self._virtual_machine_interface_read(port_id=port_id)
except NoIdError:
self._raise_contrail_exception('PortNotFound', port_id=port_id)
ret_port_q = self._port_vnc_to_neutron(port_obj)
return ret_port_q
#end port_read
def port_update(self, port_id, port_q):
# if ip address passed then use it
req_ip_addrs = []
req_ip_subnets = []
port_q['id'] = port_id
port_obj = self._port_neutron_to_vnc(port_q, None, UPDATE)
net_id = port_obj.get_virtual_network_refs()[0]['uuid']
net_obj = self._network_read(net_id)
self._virtual_machine_interface_update(port_obj)
try:
self._port_create_instance_ip(net_obj, port_obj, port_q)
except vnc_exc.HttpError:
self._raise_contrail_exception('IpAddressGenerationFailure',
net_id=net_obj.uuid)
port_obj = self._virtual_machine_interface_read(port_id=port_id)
ret_port_q = self._port_vnc_to_neutron(port_obj)
return ret_port_q
#end port_update
def port_delete(self, port_id):
port_obj = self._port_neutron_to_vnc({'id': port_id}, None, DELETE)
if port_obj.parent_type == 'virtual-machine':
instance_id = port_obj.parent_uuid
else:
vm_refs = port_obj.get_virtual_machine_refs()
if vm_refs:
instance_id = vm_refs[0]['uuid']
else:
instance_id = None
if port_obj.get_logical_router_back_refs():
self._raise_contrail_exception('L3PortInUse', port_id=port_id,
device_owner=constants.DEVICE_OWNER_ROUTER_INTF)
# release instance IP address
iip_back_refs = getattr(port_obj, 'instance_ip_back_refs', None)
if iip_back_refs:
for iip_back_ref in iip_back_refs:
# if name contains IP address then this is shared ip
iip_obj = self._vnc_lib.instance_ip_read(
id=iip_back_ref['uuid'])
# in case of shared ip only delete the link to the VMI
if len(iip_obj.name.split(' ')) > 1:
iip_obj.del_virtual_machine_interface(port_obj)
self._instance_ip_update(iip_obj)
else:
self._instance_ip_delete(
instance_ip_id=iip_back_ref['uuid'])
# disassociate any floating IP used by instance
fip_back_refs = getattr(port_obj, 'floating_ip_back_refs', None)
if fip_back_refs:
for fip_back_ref in fip_back_refs:
self.floatingip_update(fip_back_ref['uuid'], {'port_id': None})
tenant_id = self._get_obj_tenant_id('port', port_id)
self._virtual_machine_interface_delete(port_id=port_id)
# delete any interface route table associatd with the port
for rt_ref in port_obj.get_interface_route_table_refs() or []:
try:
self._vnc_lib.interface_route_table_delete(id=rt_ref['uuid'])
except vnc_exc.NoIdError:
pass
# delete instance if this was the last port
try:
if instance_id:
self._vnc_lib.virtual_machine_delete(id=instance_id)
except RefsExistError:
pass
#end port_delete
def port_list(self, context=None, filters=None):
project_obj = None
ret_q_ports = []
all_project_ids = []
# TODO used to find dhcp server field. support later...
if (filters.get('device_owner') == 'network:dhcp' or
'network:dhcp' in filters.get('device_owner', [])):
return ret_q_ports
if not 'device_id' in filters:
# Listing from back references
if not filters:
# TODO once vmi is linked to project in schema, use project_id
# to limit scope of list
if not context['is_admin']:
project_id = str(uuid.UUID(context['tenant']))
else:
project_id = None
# read all VMI and IIP in detail one-shot
if self._list_optimization_enabled:
all_port_gevent = gevent.spawn(self._virtual_machine_interface_list,
parent_id=project_id)
else:
all_port_gevent = gevent.spawn(self._virtual_machine_interface_list)
port_iip_gevent = gevent.spawn(self._instance_ip_list)
port_net_gevent = gevent.spawn(self._virtual_network_list,
parent_id=project_id,
detail=True)
gevent.joinall([all_port_gevent, port_iip_gevent, port_net_gevent])
all_port_objs = all_port_gevent.value
port_iip_objs = port_iip_gevent.value
port_net_objs = port_net_gevent.value
ret_q_ports = self._port_list(port_net_objs, all_port_objs,
port_iip_objs)
elif 'tenant_id' in filters:
all_project_ids = self._validate_project_ids(context,
filters['tenant_id'])
elif 'name' in filters:
all_project_ids = [str(uuid.UUID(context['tenant']))]
elif 'id' in filters:
# TODO optimize
for port_id in filters['id']:
try:
port_info = self.port_read(port_id)
except NoIdError:
continue
ret_q_ports.append(port_info)
for proj_id in all_project_ids:
ret_q_ports = self._port_list_project(proj_id)
if 'network_id' in filters:
ret_q_ports = self._port_list_network(filters['network_id'])
# prune phase
ret_list = []
for port_obj in ret_q_ports:
if not self._filters_is_present(filters, 'name',
port_obj['name']):
continue
ret_list.append(port_obj)
return ret_list
# Listing from parent to children
device_ids = filters['device_id']
for dev_id in device_ids:
try:
# TODO optimize
port_objs = self._virtual_machine_interface_list(
parent_id=dev_id,
back_ref_id=dev_id)
if not port_objs:
raise NoIdError(None)
for port_obj in port_objs:
port_info = self._port_vnc_to_neutron(port_obj)
ret_q_ports.append(port_info)
except NoIdError:
try:
router_obj = self._logical_router_read(rtr_id=dev_id)
intfs = router_obj.get_virtual_machine_interface_refs()
for intf in (intfs or []):
try:
port_info = self.port_read(intf['uuid'])
except NoIdError:
continue
ret_q_ports.append(port_info)
except NoIdError:
continue
return ret_q_ports
#end port_list
def port_count(self, filters=None):
if (filters.get('device_owner') == 'network:dhcp' or
'network:dhcp' in filters.get('device_owner', [])):
return 0
if 'tenant_id' in filters:
if isinstance(filters['tenant_id'], list):
project_id = str(uuid.UUID(filters['tenant_id'][0]))
else:
project_id = str(uuid.UUID(filters['tenant_id']))
nports = len(self._port_list_project(project_id))
else:
# across all projects - TODO very expensive,
# get only a count from api-server!
nports = len(self.port_list(filters=filters))
return nports
#end port_count
# security group api handlers
def security_group_create(self, sg_q):
sg_obj = self._security_group_neutron_to_vnc(sg_q, CREATE)
sg_uuid = self._resource_create('security_group', sg_obj)
#allow all egress traffic
def_rule = {}
def_rule['port_range_min'] = 0
def_rule['port_range_max'] = 65535
def_rule['direction'] = 'egress'
def_rule['remote_ip_prefix'] = '0.0.0.0/0'
def_rule['remote_group_id'] = None
def_rule['protocol'] = 'any'
rule = self._security_group_rule_neutron_to_vnc(def_rule, CREATE)
self._security_group_rule_create(sg_uuid, rule)
ret_sg_q = self._security_group_vnc_to_neutron(sg_obj)
return ret_sg_q
#end security_group_create
def security_group_update(self, sg_id, sg_q):
sg_q['id'] = sg_id
sg_obj = self._security_group_neutron_to_vnc(sg_q, UPDATE)
self._vnc_lib.security_group_update(sg_obj)
ret_sg_q = self._security_group_vnc_to_neutron(sg_obj)
return ret_sg_q
#end security_group_update
def security_group_read(self, sg_id):
try:
sg_obj = self._vnc_lib.security_group_read(id=sg_id)
except NoIdError:
self._raise_contrail_exception('SecurityGroupNotFound', id=sg_id)
return self._security_group_vnc_to_neutron(sg_obj)
#end security_group_read
def security_group_delete(self, sg_id):
try:
sg_obj = self._vnc_lib.security_group_read(id=sg_id)
if sg_obj.name == 'default':
self._raise_contrail_exception(
'SecurityGroupCannotRemoveDefault')
except NoIdError:
return
try:
self._security_group_delete(sg_id)
except RefsExistError:
self._raise_contrail_exception('SecurityGroupInUse', id=sg_id)
#end security_group_delete
def security_group_list(self, context, filters=None):
ret_list = []
# collect phase
all_sgs = [] # all sgs in all projects
if context and not context['is_admin']:
for i in range(10):
project_sgs = self._security_group_list_project(str(uuid.UUID(context['tenant'])))
if project_sgs:
break
gevent.sleep(3)
all_sgs.append(project_sgs)
else: # admin context
if filters and 'tenant_id' in filters:
project_ids = self._validate_project_ids(context,
filters['tenant_id'])
for p_id in project_ids:
project_sgs = self._security_group_list_project(p_id)
all_sgs.append(project_sgs)
else: # no filters
all_sgs.append(self._security_group_list_project(None))
# prune phase
for project_sgs in all_sgs:
for sg_obj in project_sgs:
if not self._filters_is_present(filters, 'id', sg_obj.uuid):
continue
if not self._filters_is_present(filters, 'name',
sg_obj.get_display_name() or sg_obj.name):
continue
sg_info = self._security_group_vnc_to_neutron(sg_obj)
ret_list.append(sg_info)
return ret_list
#end security_group_list
def _convert_protocol(self, value):
if value is None:
return
try:
val = int(value)
#TODO(ethuleau): support all protocol numbers
if val >= 0 and val <= 255 and IP_PROTOCOL_MAP.has_key(val):
return IP_PROTOCOL_MAP[val]
self._raise_contrail_exception(
'SecurityGroupRuleInvalidProtocol',
protocol=value, values=IP_PROTOCOL_MAP.values())
except (ValueError, TypeError):
if value.lower() in IP_PROTOCOL_MAP.values():
return value.lower()
self._raise_contrail_exception(
'SecurityGroupRuleInvalidProtocol',
protocol=value, values=IP_PROTOCOL_MAP.values())
except AttributeError:
self._raise_contrail_exception(
'SecurityGroupRuleInvalidProtocol',
protocol=value, values=IP_PROTOCOL_MAP.values())
def _validate_port_range(self, rule):
"""Check that port_range is valid."""
if (rule['port_range_min'] is None and
rule['port_range_max'] is None):
return
if not rule['protocol']:
self._raise_contrail_exception(
'SecurityGroupProtocolRequiredWithPorts')
if rule['protocol'] in [constants.PROTO_NAME_TCP, constants.PROTO_NAME_UDP]:
if (rule['port_range_min'] is not None and
rule['port_range_min'] <= rule['port_range_max']):
pass
else:
self._raise_contrail_exception('SecurityGroupInvalidPortRange')
elif rule['protocol'] == constants.PROTO_NAME_ICMP:
for attr, field in [('port_range_min', 'type'),
('port_range_max', 'code')]:
if rule[attr] > 255:
self._raise_contrail_exception(
'SecurityGroupInvalidIcmpValue', field=field,
attr=attr, value=rule[attr])
if (rule['port_range_min'] is None and
rule['port_range_max']):
self._raise_contrail_exception('SecurityGroupMissingIcmpType',
value=rule['port_range_max'])
def security_group_rule_create(self, sgr_q):
sgr_q['protocol'] = self._convert_protocol(sgr_q['protocol'])
self._validate_port_range(sgr_q)
sg_id = sgr_q['security_group_id']
sg_rule = self._security_group_rule_neutron_to_vnc(sgr_q, CREATE)
self._security_group_rule_create(sg_id, sg_rule)
ret_sg_rule_q = self._security_group_rule_vnc_to_neutron(sg_id,
sg_rule)
return ret_sg_rule_q
#end security_group_rule_create
def security_group_rule_read(self, sgr_id):
sg_obj, sg_rule = self._security_group_rule_find(sgr_id)
if sg_obj and sg_rule:
return self._security_group_rule_vnc_to_neutron(sg_obj.uuid,
sg_rule, sg_obj)
self._raise_contrail_exception('SecurityGroupRuleNotFound', id=sgr_id)
#end security_group_rule_read
def security_group_rule_delete(self, sgr_id):
sg_obj, sg_rule = self._security_group_rule_find(sgr_id)
if sg_obj and sg_rule:
return self._security_group_rule_delete(sg_obj, sg_rule)
self._raise_contrail_exception('SecurityGroupRuleNotFound', id=sgr_id)
#end security_group_rule_delete
def security_group_rules_read(self, sg_id, sg_obj=None):
try:
if not sg_obj:
sg_obj = self._vnc_lib.security_group_read(id=sg_id)
sgr_entries = sg_obj.get_security_group_entries()
sg_rules = []
if sgr_entries == None:
return
for sg_rule in sgr_entries.get_policy_rule():
sg_info = self._security_group_rule_vnc_to_neutron(sg_obj.uuid,
sg_rule,
sg_obj)
sg_rules.append(sg_info)
except NoIdError:
self._raise_contrail_exception('SecurityGroupNotFound', id=sg_id)
return sg_rules
#end security_group_rules_read
def security_group_rule_list(self, context=None, filters=None):
ret_list = []
# collect phase
all_sgs = []
if filters and 'tenant_id' in filters:
project_ids = self._validate_project_ids(context,
filters['tenant_id'])
for p_id in project_ids:
project_sgs = self._security_group_list_project(p_id)
all_sgs.append(project_sgs)
else: # no filters
all_sgs.append(self._security_group_list_project(None))
# prune phase
for project_sgs in all_sgs:
for sg_obj in project_sgs:
# TODO implement same for name specified in filter
if not self._filters_is_present(filters, 'id', sg_obj.uuid):
continue
sgr_info = self.security_group_rules_read(sg_obj.uuid, sg_obj)
if sgr_info:
ret_list.extend(sgr_info)
return ret_list
#end security_group_rule_list
#route table api handlers
def route_table_create(self, rt_q):
rt_obj = self._route_table_neutron_to_vnc(rt_q, CREATE)
try:
rt_uuid = self._route_table_create(rt_obj)
except RefsExistError as e:
self._raise_contrail_exception('BadRequest',
resource='route_table', msg=str(e))
ret_rt_q = self._route_table_vnc_to_neutron(rt_obj)
return ret_rt_q
#end security_group_create
def route_table_read(self, rt_id):
try:
rt_obj = self._vnc_lib.route_table_read(id=rt_id)
except NoIdError:
# TODO add route table specific exception
self._raise_contrail_exception('NetworkNotFound', net_id=rt_id)
return self._route_table_vnc_to_neutron(rt_obj)
#end route_table_read
def route_table_update(self, rt_id, rt_q):
rt_q['id'] = rt_id
rt_obj = self._route_table_neutron_to_vnc(rt_q, UPDATE)
self._vnc_lib.route_table_update(rt_obj)
return self._route_table_vnc_to_neutron(rt_obj)
#end policy_update
def route_table_delete(self, rt_id):
self._route_table_delete(rt_id)
#end route_table_delete
def route_table_list(self, context, filters=None):
ret_list = []
# collect phase
all_rts = [] # all rts in all projects
if filters and 'tenant_id' in filters:
project_ids = self._validate_project_ids(context,
filters['tenant_id'])
for p_id in project_ids:
project_rts = self._route_table_list_project(p_id)
all_rts.append(project_rts)
elif filters and 'name' in filters:
p_id = str(uuid.UUID(context['tenant']))
project_rts = self._route_table_list_project(p_id)
all_rts.append(project_rts)
else: # no filters
dom_projects = self._project_list_domain(None)
for project in dom_projects:
proj_id = project['uuid']
project_rts = self._route_table_list_project(proj_id)
all_rts.append(project_rts)
# prune phase
for project_rts in all_rts:
for proj_rt in project_rts:
# TODO implement same for name specified in filter
proj_rt_id = proj_rt['uuid']
if not self._filters_is_present(filters, 'id', proj_rt_id):
continue
rt_info = self.route_table_read(proj_rt_id)
if not self._filters_is_present(filters, 'name',
rt_info['name']):
continue
ret_list.append(rt_info)
return ret_list
#end route_table_list
#service instance api handlers
def svc_instance_create(self, si_q):
si_obj = self._svc_instance_neutron_to_vnc(si_q, CREATE)
si_uuid = self._svc_instance_create(si_obj)
ret_si_q = self._svc_instance_vnc_to_neutron(si_obj)
return ret_si_q
#end svc_instance_create
def svc_instance_read(self, si_id):
try:
si_obj = self._vnc_lib.service_instance_read(id=si_id)
except NoIdError:
# TODO add svc instance specific exception
self._raise_contrail_exception('NetworkNotFound', net_id=si_id)
return self._svc_instance_vnc_to_neutron(si_obj)
#end svc_instance_read
def svc_instance_delete(self, si_id):
self._svc_instance_delete(si_id)
#end svc_instance_delete
def svc_instance_list(self, context, filters=None):
ret_list = []
# collect phase
all_sis = [] # all sis in all projects
if filters and 'tenant_id' in filters:
project_ids = self._validate_project_ids(context,
filters['tenant_id'])
for p_id in project_ids:
project_sis = self._svc_instance_list_project(p_id)
all_sis.append(project_sis)
elif filters and 'name' in filters:
p_id = str(uuid.UUID(context['tenant']))
project_sis = self._svc_instance_list_project(p_id)
all_sis.append(project_sis)
else: # no filters
dom_projects = self._project_list_domain(None)
for project in dom_projects:
proj_id = project['uuid']
project_sis = self._svc_instance_list_project(proj_id)
all_sis.append(project_sis)
# prune phase
for project_sis in all_sis:
for proj_si in project_sis:
# TODO implement same for name specified in filter
proj_si_id = proj_si['uuid']
if not self._filters_is_present(filters, 'id', proj_si_id):
continue
si_info = self.svc_instance_read(proj_si_id)
if not self._filters_is_present(filters, 'name',
si_info['name']):
continue
ret_list.append(si_info)
return ret_list
#end svc_instance_list
#end class DBInterface
| Juniper/contrail-dev-controller | src/config/vnc_openstack/vnc_openstack/neutron_plugin_db.py | Python | apache-2.0 | 163,825 |
# -*- coding: utf-8 -*-
import time
from multiprocessing.pool import Pool
import colored
import numpy as np
from dnutils import out, stop, trace, getlogger, ProgressBar, StatusMsg, bf, loggers, newlogger, logs, edict, ifnone, \
ifnot, allnone, allnot, first, sleep, __version__ as version, waitabout
import unittest
from dnutils.logs import expose, inspect, exposure
from dnutils.stats import Gaussian, stopwatch, print_stopwatches, get_stopwatch
from dnutils.tools import LinearScale
loggers({
'default': newlogger(logs.console),
'results': newlogger(logs.console, logs.FileHandler('dnutils-test.log'))
})
def wait():
time.sleep(1)
bfctnames = {
'out': colored.stylize('out()', colored.attr('bold')),
'stop': colored.stylize('stop()', colored.attr('bold')),
'trace': colored.stylize('trace()', colored.attr('bold'))
}
class EDictTest(unittest.TestCase):
def test_xpath(self):
d = edict({'a': [{'b': {'c': 'hello'}}, {'b': {'c': 'world'}}]}, recursive=1)
msg = 'xpath query with indexing'
self.assertEqual(d.xpath('a/[0]/b/c'), 'hello', msg)
self.assertEqual(d.xpath('a/[1]/b/c'), 'world', msg)
self.assertTrue(type(d.xpath('a')) is list, msg)
self.assertTrue(type(d.xpath('a/[0]')) is edict)
d = edict()
d.set_xpath('a/b/c', 'hello, world!', force=True)
assert d.xpath('a/b/d') is None
assert d.xpath('a/b/c') == 'hello, world!'
def test_default(self):
d = edict(default=list)
d['a'].append('first item')
self.assertEqual(d['a'][0], 'first item')
self.assertTrue(d.xpath('a/[1]') is None)
def test_getset(self):
d = edict()
d['a'] = 1
d['b'] = 2
with self.assertRaises(KeyError):
d['c']
self.assertIsNone(d.get('c'))
self.assertEqual(d.get('c', 3), 3)
self.assertDictEqual(d, {'a': 1, 'b': 2})
def test_projection(self):
d = edict({'one': 1, 'two': 2})
d_ = edict(d)
self.assertDictEqual(d.project('one'), {'one': 1})
self.assertDictEqual(d, d_)
class ConditionalTest(unittest.TestCase):
def test_ifnone(self):
self.assertEqual(ifnone(None, 'hello'), 'hello')
self.assertEqual(ifnone('hello', None), 'hello')
self.assertEqual(ifnone(None, 1, transform=str), 1)
self.assertEqual(ifnone(1, 1, transform=str), '1')
self.assertEqual(ifnone(0, 1, transform=str), '0')
def test_ifnot(self):
self.assertEqual(ifnot(None, 'hello'), 'hello')
self.assertEqual(ifnot('hello', None), 'hello')
self.assertEqual(ifnot('', None), None)
self.assertEqual(ifnot(None, 1, transform=str), 1)
self.assertEqual(ifnot(1, 1, transform=str), '1')
self.assertEqual(ifnot(0, 1, transform=str), 1)
def test_allnone(self):
self.assertTrue(allnone([None, None, None]))
self.assertFalse(allnone([0, 0, 0]))
self.assertFalse(allnone([None, None, 1]))
self.assertFalse(allnone([None, None, 0]))
def test_allnot(self):
self.assertTrue(allnot([None, None, None]))
self.assertTrue(allnot([0, 0, 0]))
self.assertFalse(allnot([None, None, 1]))
self.assertTrue(allnot([None, None, 0]))
class GaussianTest(unittest.TestCase):
def test_multivariate(self):
mean = [5., 4.]
cov = [[1., -0.3], [-0.3, 1.]]
data = np.random.multivariate_normal(np.array(mean), np.array(cov), size=50000)
gauss = Gaussian()
for d in data:
gauss.update(d)
for e1, e2 in zip(gauss.mean, mean):
self.assertAlmostEqual(e1, e2, 1, 'means differ too much:\n%s\n!=\n%s' % (mean, gauss.mean))
for e1, e2 in zip(np.nditer(np.array(gauss.cov)), np.nditer(np.array(cov))):
self.assertAlmostEqual(round(float(e1), 1), e2, 1, 'covariances differ too much: %s != %s' % (cov, gauss.cov))
def test_univariate(self):
mu, sigma = 0.5, 0.1
data = np.random.normal(mu, sigma, 1000)
g = Gaussian(data=data)
self.assertAlmostEqual(mu, float(g.mean), 1)
self.assertAlmostEqual(sigma, np.sqrt(float(g.cov)), 1)
class StopWatchTest(unittest.TestCase):
def test_watch(self):
mean = .2
std = .05
times = np.random.normal(mean, std, 100)
for t in times:
with stopwatch('/test'):
sleep(t)
print_stopwatches()
w = get_stopwatch('/test')
self.assertAlmostEqual(w.avg, mean, 1, 'means differ too much:\n%s\n!=\n%s' % (w.avg, mean))
self.assertAlmostEqual(w.std, std, 1, 'stddevs differ too much:\n%s\n!=\n%s' % (w.std, std))
class IteratorTest(unittest.TestCase):
def test_first(self):
self.assertEqual(first([0, 1, 2]), 0)
self.assertEqual(first(None), None)
self.assertEqual(first([]), None)
def gen():
for i in range(3):
yield i
self.assertEqual(first(gen()), 0)
self.assertEqual(first(gen(), str, 'no elements'), '0')
self.assertEqual(first([], str, 'no elements'), 'no elements')
class ScaleTest(unittest.TestCase):
def test_linearscale(self):
scale = LinearScale([0, 100], [0, 1])
self.assertEqual(scale(50), .5)
with self.assertRaises(ValueError):
scale(-50)
scale(150)
scale.strict = False
self.assertEqual(scale(-50), -.5)
self.assertEqual(scale(150), 1.5)
def exposure_proc(*_):
for _ in range(10):
waitabout(1)
# use the exposure as a file lock
with exposure('/vars/myexposure'):
n = inspect(expose('/vars/myexposure'))
expose('/vars/myexposure', n + 1)
assert n + 1 == inspect(expose('/vars/myexposure'))
class ExposureTest(unittest.TestCase):
def test_expose_inspect(self):
expose('/vars/myexposure', 'a', 'b', 'c')
self.assertEqual(inspect('/vars/myexposure'), ['a', 'b', 'c'])
expose('/vars/myexposure2', 2)
self.assertEqual(inspect('/vars/myexposure2'), 2)
expose('/vars/myexposure', 0)
pool = Pool(4)
pool.map(exposure_proc, [[] for _ in range(5)])
pool.close()
pool.join()
if __name__ == '__main__':
print('Welcome to dnutils version %s.' % version)
logger = getlogger('results', logs.DEBUG)
logger.info('Initialized. Running all tests...')
wait()
logger.info('Testing log levels...')
logger.debug('this is the debug level')
logger.info('this is the info level')
logger.warning('this is the warning level')
logger.error('this is the error level')
logger.critical('this is the critical level')
logger.critical('wait a second...')
wait()
logger.debug('This debug message spreads over\nmultiple lines and should be\naligned with appropriate indentation.')
wait()
logger.level = logs.ERROR
logger.info('If you see this message, something went wrong with the log levels.')
logger.level = logs.DEBUG
logger.info('Testing the debug functions.')
wait()
out('the %s function always prints the code location where it is called so it can be found again later swiftly.' %
bfctnames['out'])
wait()
out('it', 'also', 'accepts', 'multiple', 'arguments', 'which', 'are', 'being', 'concatenated')
stop('the %s function is equivalent to %s except for it stops until you hit return:' % (bfctnames['stop'],
bfctnames['out']))
trace('the %s function gives you a stack trace of the current position' % bfctnames['trace'])
logger.info('testing the', bf('ProgressBar'), 'and', bf('StatusMsg'), '...')
bar = ProgressBar(steps=10, color='deep_sky_blue_4c')
for i in range(11):
bar.update(value=i/10., label='step %d' % (i+1))
time.sleep(.5)
bar.finish()
logger.info('testing the', bf(StatusMsg), '(you should see 5 "OK" and 5 "ERROR" messages)')
wait()
for i in range(20):
bar = StatusMsg('this is a Linux-style status bar (%.2d)...' % (i+1))
bar.status = StatusMsg.OK
wait()
bar.finish()
| danielnyga/dnutils | tests/testp3.py | Python | mit | 8,270 |
"""
file systems on-disk formats (ext2, fat32, ntfs, ...)
and related disk formats (mbr, ...)
"""
| larsks/pydonet | lib/pydonet/construct/formats/filesystem/__init__.py | Python | gpl-2.0 | 103 |
import os
from flask import Flask, request, redirect, url_for, render_template, flash, send_from_directory, Response
from werkzeug.utils import secure_filename
from midi2rdf import midi2rdf
from subprocess import Popen
UPLOAD_FOLDER = '/tmp/'
VIRTUOSO_LOAD = '/scratch/amp/midi/virtuoso-load/'
ALLOWED_EXTENSIONS = set(['mid', 'midi'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.secret_key = "asdklfjlakjsdhflkjh"
@app.route('/')
def hello_world():
# dump = midi2rdf('/Users/Albert/src/midi2rdf/examples/ghostbusters.mid')
return render_template('index.html')
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/upload', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
local_filename = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(local_filename)
dump = midi2rdf(local_filename, 'turtle')
# If user accepts, we load the dump in the triplestore
if request.form.get("cloud"):
with open(VIRTUOSO_LOAD + filename + '.ttl', 'w') as rdffile:
rdffile.write(dump)
Popen('/usr/local/virtuoso-opensource/bin/isql 1112 < /home/amp/src/midi2rdf-current/src/virtuoso-load.sql', shell=True)
return Response(dump, mimetype="application/n-quads", headers={"Content-disposition": "attachment; filename={}".format(filename + '.ttl')})
# return redirect(url_for('uploaded_file', filename=filename))
return redirect('/')
if __name__ == '__main__':
app.run(host="0.0.0.0", port=8092, debug=True)
| albertmeronyo/midi-rdf | src/service.py | Python | mit | 2,205 |
# ping.py
# Test Fox's connectivity
from discord.ext import commands
class Ping:
def __init__(self, bot):
self.bot = bot
@commands.command()
async def ping(self):
await self.bot.say("Pong!")
def setup(bot):
bot.add_cog(Ping(bot))
| plusreed/foxpy | plugins/core/ping.py | Python | mit | 267 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pclases
def descontar_material_adicional_balas(pdp, articulo, restar = True):
"""
Descuenta el material adicional correspondiente al artículo según
la formulación que indique la línea de fabricación.
Si "restar" es True, descuenta. Si es False, añade la cantidad (para
cuando se elimine un rollo del parte, por ejemplo).
Si es necesario, se dejará material con existencias en negativo, aunque
se avisará al usuario de la incidencia.
"""
linea = pclases.LineaDeProduccion.select(pclases.LineaDeProduccion.q.nombre.contains('de fibra'))
if linea.count() == 0:
print "WARNING: La línea de fibra no está correctamente dada de alta."
else:
linea = linea[0]
formulacion = linea.formulacion
for ca in [ca_con_p for ca_con_p in formulacion.consumosAdicionales if ca_con_p.productoCompra != None]:
if ca.nombre == "antiuvi" and not articulo.productoVenta.camposEspecificosBala.antiuv:
break
if ca.nombre == "negro" and articulo.productoVenta.camposEspecificosBala.color.upper() != "NEGRO":
break
if ca.nombre == "titanio" and articulo.productoVenta.camposEspecificosBala.color.upper() != "TITANIO":
break
# print " >>> Descuento de %s" % ca.productoCompra.descripcion
if restar:
cantidad = ca.cantidad * -1
else:
cantidad = ca.cantidad
# WTF: Casos especiales (AAAARRRGHHHH)
if "%" in ca.unidad:
peso = articulo.bala.pesobala
cantidad = (cantidad * peso) / 100
#antes = ca.productoCompra.existencias
# ca.productoCompra.existencias += cantidad
consumo = pclases.Consumo(parteDeProduccion = pdp, # @UnusedVariable
productoCompra = ca.productoCompra,
actualizado = True,
antes = -2,
# despues = ca.productoCompra.existencias,
despues = -2,
cantidad = -cantidad)
else:
#antes = ca.productoCompra.existencias
# ca.productoCompra.existencias += cantidad
consumo = pclases.Consumo(parteDeProduccion = pdp, # @UnusedVariable
productoCompra = ca.productoCompra,
actualizado = True,
antes = -2,
# despues = ca.productoCompra.existencias,
despues = -2,
cantidad = -cantidad)
# OJO: No verifico que las unidades del catálogo de productos sean las mismas que en la formulación
def descontar_material_adicional_rollos(pdp, articulo, restar = True):
"""
Descuenta el material adicional correspondiente al artículo según
la formulación que indique la línea de fabricación.
Si "restar" es True, descuenta. Si es False, añade la cantidad (para
cuando se elimine un rollo del parte, por ejemplo).
Si es necesario, se dejará material con existencias en negativo, aunque
se avisará al usuario de la incidencia.
"""
linea = pclases.LineaDeProduccion.select(pclases.LineaDeProduccion.q.nombre.contains('de geotextiles'))
if linea.count() == 0:
print "WARNING: La línea de geotextiles no está correctamente dada de alta."
else:
linea = linea[0]
formulacion = linea.formulacion
# Descuento de plástico.
plastico = [ca.productoCompra for ca in formulacion.consumosAdicionales if "plastico" in ca.nombre][0]
if plastico != None:
try:
ca = [ca_con_p for ca_con_p in formulacion.consumosAdicionales
if "plastico" in ca_con_p.nombre][0]
if restar:
cantidad = ca.cantidad * -1
else:
cantidad = ca.cantidad
if "5.5" in ca.unidad:
ancho = articulo.productoVenta.camposEspecificosRollo.ancho
cantidad *= ancho/5.5
antes = plastico.existencias
plastico.existencias += cantidad
consumo_plastico = pclases.Consumo(parteDeProduccion = pdp, # @UnusedVariable
productoCompra = plastico,
actualizado = True,
antes = antes,
despues = plastico.existencias,
cantidad = -cantidad)
except IndexError:
print "WARNING: No se encontró formulación para el plástico de envolver"
for ca in [ca_con_p for ca_con_p in formulacion.consumosAdicionales
if ca_con_p.productoCompra != None and not "plastico" in ca_con_p.nombre]:
# print " >>> Descuento de %s" % ca.productoCompra.descripcion
if restar:
cantidad = ca.cantidad * -1
else:
cantidad = ca.cantidad
# WTF: Casos especiales (AAAARRRGHHHH)
if "%" in ca.unidad:
peso = articulo.rollo.peso
cantidad = (cantidad * peso) / 100
antes = ca.productoCompra.existencias
# ca.productoCompra.existencias += cantidad
consumo = pclases.Consumo(parteDeProduccion = pdp, # @UnusedVariable
productoCompra = ca.productoCompra,
actualizado = True,
antes = antes,
despues = ca.productoCompra.existencias,
cantidad = -cantidad)
elif "u" and "5.5" in ca.unidad:
ancho = articulo.productoVenta.camposEspecificosRollo.ancho
anchosestandar = (1.83, 2.75, 5.5) # NOTA: OJO: WTF: Very very very harcoded, gñe.
for a in anchosestandar:
if ancho not in anchosestandar and ancho < a:
ancho = a
cantidad *= ancho/5.5
antes = ca.productoCompra.existencias
# ca.productoCompra.existencias += cantidad
consumo = pclases.Consumo(parteDeProduccion = pdp, # @UnusedVariable
productoCompra = ca.productoCompra,
actualizado = True,
antes = antes,
despues = ca.productoCompra.existencias,
cantidad = -cantidad)
# Bueno. Empieza el más difícil todavía. Si se descuentan unidades completas por cada 5.5
# metros de ancho, hay que vigilar que cada 2 ó 3 descuentos queden números enteros en la BD.
# Si el ancho es 2.75 no hay problema. Pero si es 1.83 deben quedar las unidades como x.33, x.66 ó x.00
if round(abs(1.0 - ca.productoCompra.existencias % 1.0), 1) == 0:
ca.productoCompra.existencias = round(ca.productoCompra.existencias, 0)
elif "5.5" in ca.unidad:
ancho = articulo.productoVenta.camposEspecificosRollo.ancho
cantidad *= ancho/5.5
antes = ca.productoCompra.existencias
# ca.productoCompra.existencias += cantidad
consumo = pclases.Consumo(parteDeProduccion = pdp, # @UnusedVariable
productoCompra = ca.productoCompra,
actualizado = True,
antes = antes,
despues = ca.productoCompra.existencias,
cantidad = -cantidad)
else:
antes = ca.productoCompra.existencias
# ca.productoCompra.existencias += cantidad
consumo = pclases.Consumo(parteDeProduccion = pdp, # @UnusedVariable
productoCompra = ca.productoCompra,
actualizado = True,
antes = antes,
despues = ca.productoCompra.existencias,
cantidad = -cantidad)
# OJO: No verifico que las unidades del catálogo de productos sean las mismas que en la formulación
for pdp in pclases.ParteDeProduccion.select(orderBy = "fecha"):
print "Descontando M.A. de parte %d (%s)..." % (pdp.id, pdp.fecha.strftime("%d/%m/%Y"))
for a in pdp.articulos:
if pdp.es_de_balas():
print " Descontando M.A. de balas..."
descontar_material_adicional_balas(pdp, a)
else:
print " Descontando M.A. de rollos..."
descontar_material_adicional_rollos(pdp, a)
| pacoqueen/ginn | ginn/framework/tmp_consumo.py | Python | gpl-2.0 | 9,513 |
from setuptools import setup, find_packages
import os
import sys
import subprocess
from distutils.errors import DistutilsPlatformError, DistutilsInternalError
from setuptools.command.install import install
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('django-socketio-events')
npm_global_dependencies = {
"node-gyp": "*"
}
# local dependencies defined in django-socketio-events/package.json
class custom_install(install):
def run_npm_global(self):
""" h/t https://github.com/elbaschid/virtual-node/blob/master/setup.py """
for name, version in npm_global_dependencies.items():
# packages are installed globally to make sure that they are
# installed in the virtualenv rather than the current directory.
# it is also necessary for packages containing scripts, e.g. less
dep_name = '%s@%s' % (name, version)
self.run_cmd(['npm', 'install', '-g', dep_name])
def run_npm_local(self):
self.run_cmd(['npm', 'install'], 'django-socketio-events')
def run_cmd(self, cmd, cwd=None, extra_env=None):
""" h/t https://github.com/elbaschid/virtual-node/blob/master/setup.py """
all_output = []
cmd_parts = []
for part in cmd:
if len(part) > 45:
part = part[:20] + "..." + part[-20:]
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
cmd_parts.append(part)
cmd_desc = ' '.join(cmd_parts)
logger.debug(" ** Running command %s" % cmd_desc)
# output
stdout = subprocess.PIPE
# env
if extra_env:
env = os.environ.copy()
if extra_env:
env.update(extra_env)
else:
env = None
# execute
try:
proc = subprocess.Popen(
[' '.join(cmd)], stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env, shell=True)
except Exception:
e = sys.exc_info()[1]
logger.error("Error %s while executing command %s" % (e, cmd_desc))
raise
stdout = proc.stdout
while stdout:
line = stdout.readline()
if not line:
break
line = line.rstrip()
all_output.append(line)
logger.info(line)
proc.wait()
# error handler
if proc.returncode:
for s in all_output:
logger.critical(s)
raise OSError("Command %s failed with error code %s"
% (cmd_desc, proc.returncode))
return proc.returncode, all_output
def run(self):
if not os.environ.get('VIRTUAL_ENV', None):
raise DistutilsPlatformError('You must install django-socketio-events into a virtualenv. Aborting.')
# print "* * * 1) Do egg install"
# hack: i have to do this twice to ensure node is available
# for npm install
# install.do_egg_install(self)
# 1) Install npm depencies into virtualenv/virtual-node
print "* * * \t 2) installing npm dependencies"
self.run_npm_global()
self.run_npm_local()
print "* * * 3) Re-do egg install with npm dependencies intact"
install.do_egg_install(self)
setup(
cmdclass={'install': custom_install},
name='django-socketio-events',
version='0.1.2',
author='Patrick Paul',
author_email='[email protected]',
packages=find_packages() + ['nodjango.node_modules'],
include_package_data=True,
scripts=[],
url='https://github.com/pztrick/django-socketio-events/',
license='MIT-LICENSE.txt',
description='Django package for socket.io via nodejs',
long_description=open('README.md').read(),
install_requires=[
"Django >= 1.7.0",
"virtual-node == 0.1.0",
"socketIO-client == 0.6.1"
],
)
| pztrick/django-socketio-events | setup.py | Python | mit | 4,008 |
#!/usr/bin/python
# -*- coding: utf-8 -*
# Copyright: (c) 2017, Julien Stroheker <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_acs
version_added: "2.4"
short_description: Manage an Azure Container Service(ACS) instance
description:
- Create, update and delete an Azure Container Service(ACS) instance.
options:
resource_group:
description:
- Name of a resource group where the Container Services exists or will be created.
required: true
name:
description:
- Name of the Azure Container Services(ACS) instance.
required: true
state:
description:
- Assert the state of the ACS. Use C(present) to create or update an ACS and C(absent) to delete it.
default: present
choices:
- absent
- present
location:
description:
- Valid azure location. Defaults to location of the resource group.
orchestration_platform:
description:
- Specifies the Container Orchestration Platform to use. Currently can be either C(DCOS), C(Kubernetes) or C(Swarm).
- The I(service_principal) must be defined if set to C(Kubernetes).
choices:
- 'DCOS'
- 'Kubernetes'
- 'Swarm'
required: true
master_profile:
description:
- Master profile suboptions.
required: true
suboptions:
count:
description:
- Number of masters (VMs) in the container service cluster. Allowed values are C(1), C(3), and C(5).
required: true
choices:
- 1
- 3
- 5
vm_size:
description:
- The VM Size of each of the Agent Pool VM's (e.g. C(Standard_F1) / C(Standard_D2v2)).
required: true
version_added: 2.5
dns_prefix:
description:
- The DNS Prefix to use for the Container Service master nodes.
required: true
linux_profile:
description:
- The Linux profile suboptions.
required: true
suboptions:
admin_username:
description:
- The Admin Username for the Cluster.
required: true
ssh_key:
description:
- The Public SSH Key used to access the cluster.
required: true
agent_pool_profiles:
description:
- The agent pool profile suboptions.
required: true
suboptions:
name:
description:
- Unique name of the agent pool profile in the context of the subscription and resource group.
required: true
count:
description:
- Number of agents (VMs) to host docker containers. Allowed values must be in the range of 1 to 100 (inclusive).
required: true
dns_prefix:
description:
- The DNS Prefix given to Agents in this Agent Pool.
required: true
vm_size:
description:
- The VM Size of each of the Agent Pool VM's (e.g. C(Standard_F1) / C(Standard_D2v2)).
required: true
service_principal:
description:
- The service principal suboptions.
- Required when I(orchestration_platform=Kubernetes).
suboptions:
client_id:
description:
- The ID for the Service Principal.
client_secret:
description:
- The secret password associated with the service principal.
diagnostics_profile:
description:
- Should VM Diagnostics be enabled for the Container Service VM's.
required: true
type: bool
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Julien Stroheker (@julienstroheker)"
'''
EXAMPLES = '''
- name: Create an azure container services instance running Kubernetes
azure_rm_acs:
name: acctestcontservice1
location: eastus
resource_group: myResourceGroup
orchestration_platform: Kubernetes
master_profile:
- count: 3
dns_prefix: acsk8smasterdns
vm_size: Standard_D2_v2
linux_profile:
- admin_username: azureuser
ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA...
service_principal:
- client_id: "cf72ca99-f6b9-4004-b0e0-bee10c521948"
client_secret: "mySPNp@ssw0rd!"
agent_pool_profiles:
- name: default
count: 5
dns_prefix: acsk8sagent
vm_size: Standard_D2_v2
diagnostics_profile: false
tags:
Environment: Production
- name: Create an azure container services instance running DCOS
azure_rm_acs:
name: acctestcontservice2
location: eastus
resource_group: myResourceGroup
orchestration_platform: DCOS
master_profile:
- count: 3
dns_prefix: acsdcosmasterdns
vm_size: Standard_D2_v2
linux_profile:
- admin_username: azureuser
ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA...
agent_pool_profiles:
- name: default
count: 5
dns_prefix: acscdcosagent
vm_size: Standard_D2_v2
diagnostics_profile: false
tags:
Environment: Production
- name: Create an azure container services instance running Swarm
azure_rm_acs:
name: acctestcontservice3
location: eastus
resource_group: myResourceGroup
orchestration_platform: Swarm
master_profile:
- count: 3
dns_prefix: acsswarmmasterdns
vm_size: Standard_D2_v2
linux_profile:
- admin_username: azureuser
ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA...
agent_pool_profiles:
- name: default
count: 5
dns_prefix: acsswarmagent
vm_size: Standard_D2_v2
diagnostics_profile: false
tags:
Environment: Production
# Deletes the specified container service in the specified subscription and resource group.
# The operation does not delete other resources created as part of creating a container service,
# including storage accounts, VMs, and availability sets. All the other resources created with the container
# service are part of the same resource group and can be deleted individually.
- name: Remove an azure container services instance
azure_rm_acs:
name: acctestcontservice3
location: eastus
resource_group: myResourceGroup
state: absent
orchestration_platform: Swarm
master_profile:
- count: 1
vm_size: Standard_A0
dns_prefix: acstestingmasterdns5
linux_profile:
- admin_username: azureuser
ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA...
agent_pool_profiles:
- name: default
count: 4
dns_prefix: acctestagent15
vm_size: Standard_A0
diagnostics_profile: false
tags:
Ansible: azure_rm_acs
'''
RETURN = '''
state:
description: Current state of the Azure Container Service(ACS).
returned: always
type: dict
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.containerservice.models import (
ContainerService, ContainerServiceOrchestratorProfile, ContainerServiceCustomProfile,
ContainerServiceServicePrincipalProfile, ContainerServiceMasterProfile,
ContainerServiceAgentPoolProfile, ContainerServiceWindowsProfile,
ContainerServiceLinuxProfile, ContainerServiceSshConfiguration,
ContainerServiceDiagnosticsProfile, ContainerServiceSshPublicKey,
ContainerServiceVMDiagnostics
)
except ImportError:
# This is handled in azure_rm_common
pass
def create_agent_pool_profile_instance(agentpoolprofile):
'''
Helper method to serialize a dict to a ContainerServiceAgentPoolProfile
:param: agentpoolprofile: dict with the parameters to setup the ContainerServiceAgentPoolProfile
:return: ContainerServiceAgentPoolProfile
'''
return ContainerServiceAgentPoolProfile(
name=agentpoolprofile['name'],
count=agentpoolprofile['count'],
dns_prefix=agentpoolprofile['dns_prefix'],
vm_size=agentpoolprofile['vm_size']
)
def create_orch_platform_instance(orchestrator):
'''
Helper method to serialize a dict to a ContainerServiceOrchestratorProfile
:param: orchestrator: dict with the parameters to setup the ContainerServiceOrchestratorProfile
:return: ContainerServiceOrchestratorProfile
'''
return ContainerServiceOrchestratorProfile(
orchestrator_type=orchestrator,
)
def create_service_principal_profile_instance(spnprofile):
'''
Helper method to serialize a dict to a ContainerServiceServicePrincipalProfile
:param: spnprofile: dict with the parameters to setup the ContainerServiceServicePrincipalProfile
:return: ContainerServiceServicePrincipalProfile
'''
return ContainerServiceServicePrincipalProfile(
client_id=spnprofile[0]['client_id'],
secret=spnprofile[0]['client_secret']
)
def create_linux_profile_instance(linuxprofile):
'''
Helper method to serialize a dict to a ContainerServiceLinuxProfile
:param: linuxprofile: dict with the parameters to setup the ContainerServiceLinuxProfile
:return: ContainerServiceLinuxProfile
'''
return ContainerServiceLinuxProfile(
admin_username=linuxprofile[0]['admin_username'],
ssh=create_ssh_configuration_instance(linuxprofile[0]['ssh_key'])
)
def create_ssh_configuration_instance(sshconf):
'''
Helper method to serialize a dict to a ContainerServiceSshConfiguration
:param: sshconf: dict with the parameters to setup the ContainerServiceSshConfiguration
:return: ContainerServiceSshConfiguration
'''
listssh = []
key = ContainerServiceSshPublicKey(key_data=str(sshconf))
listssh.append(key)
return ContainerServiceSshConfiguration(
public_keys=listssh
)
def create_master_profile_instance(masterprofile):
'''
Helper method to serialize a dict to a ContainerServiceMasterProfile
Note: first_consecutive_static_ip is specifically set to None, for Azure server doesn't accept
request body with this property. This should be an inconsistency bug before Azure client SDK
and Azure server.
:param: masterprofile: dict with the parameters to setup the ContainerServiceMasterProfile
:return: ContainerServiceMasterProfile
'''
return ContainerServiceMasterProfile(
count=masterprofile[0]['count'],
dns_prefix=masterprofile[0]['dns_prefix'],
vm_size=masterprofile[0]['vm_size'],
first_consecutive_static_ip=None
)
def create_diagnostics_profile_instance(diagprofile):
'''
Helper method to serialize a dict to a ContainerServiceDiagnosticsProfile
:param: diagprofile: dict with the parameters to setup the ContainerServiceDiagnosticsProfile
:return: ContainerServiceDiagnosticsProfile
'''
return ContainerServiceDiagnosticsProfile(
vm_diagnostics=create_vm_diagnostics_instance(diagprofile)
)
def create_vm_diagnostics_instance(vmdiag):
'''
Helper method to serialize a dict to a ContainerServiceVMDiagnostics
:param: vmdiag: dict with the parameters to setup the ContainerServiceVMDiagnostics
:return: ContainerServiceVMDiagnostics
'''
return ContainerServiceVMDiagnostics(
enabled=vmdiag
)
def create_acs_dict(acs):
'''
Helper method to deserialize a ContainerService to a dict
:param: acs: ContainerService or AzureOperationPoller with the Azure callback object
:return: dict with the state on Azure
'''
service_principal_profile_dict = None
if acs.orchestrator_profile.orchestrator_type == 'Kubernetes':
service_principal_profile_dict = create_service_principal_profile_dict(acs.service_principal_profile)
return dict(
id=acs.id,
name=acs.name,
location=acs.location,
tags=acs.tags,
orchestrator_profile=create_orchestrator_profile_dict(acs.orchestrator_profile),
master_profile=create_master_profile_dict(acs.master_profile),
linux_profile=create_linux_profile_dict(acs.linux_profile),
service_principal_profile=service_principal_profile_dict,
diagnostics_profile=create_diagnotstics_profile_dict(acs.diagnostics_profile),
provisioning_state=acs.provisioning_state,
agent_pool_profiles=create_agent_pool_profiles_dict(acs.agent_pool_profiles),
type=acs.type
)
def create_linux_profile_dict(linuxprofile):
'''
Helper method to deserialize a ContainerServiceLinuxProfile to a dict
:param: linuxprofile: ContainerServiceLinuxProfile with the Azure callback object
:return: dict with the state on Azure
'''
return dict(
ssh_key=linuxprofile.ssh.public_keys[0].key_data,
admin_username=linuxprofile.admin_username
)
def create_master_profile_dict(masterprofile):
'''
Helper method to deserialize a ContainerServiceMasterProfile to a dict
:param: masterprofile: ContainerServiceMasterProfile with the Azure callback object
:return: dict with the state on Azure
'''
return dict(
count=masterprofile.count,
fqdn=masterprofile.fqdn,
vm_size=masterprofile.vm_size,
dns_prefix=masterprofile.dns_prefix
)
def create_service_principal_profile_dict(serviceprincipalprofile):
'''
Helper method to deserialize a ContainerServiceServicePrincipalProfile to a dict
Note: For security reason, the service principal secret is skipped on purpose.
:param: serviceprincipalprofile: ContainerServiceServicePrincipalProfile with the Azure callback object
:return: dict with the state on Azure
'''
return dict(
client_id=serviceprincipalprofile.client_id
)
def create_diagnotstics_profile_dict(diagnosticsprofile):
'''
Helper method to deserialize a ContainerServiceVMDiagnostics to a dict
:param: diagnosticsprofile: ContainerServiceVMDiagnostics with the Azure callback object
:return: dict with the state on Azure
'''
return dict(
vm_diagnostics=diagnosticsprofile.vm_diagnostics.enabled
)
def create_orchestrator_profile_dict(orchestratorprofile):
'''
Helper method to deserialize a ContainerServiceOrchestratorProfile to a dict
:param: orchestratorprofile: ContainerServiceOrchestratorProfile with the Azure callback object
:return: dict with the state on Azure
'''
return dict(
orchestrator_type=str(orchestratorprofile.orchestrator_type)
)
def create_agent_pool_profiles_dict(agentpoolprofiles):
'''
Helper method to deserialize a ContainerServiceAgentPoolProfile to a dict
:param: agentpoolprofiles: ContainerServiceAgentPoolProfile with the Azure callback object
:return: dict with the state on Azure
'''
return [dict(
count=profile.count,
vm_size=profile.vm_size,
name=profile.name,
dns_prefix=profile.dns_prefix,
fqdn=profile.fqdn
) for profile in agentpoolprofiles]
class AzureRMContainerService(AzureRMModuleBase):
"""Configuration class for an Azure RM container service resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
),
location=dict(
type='str'
),
orchestration_platform=dict(
type='str',
required=True,
choices=['DCOS', 'Kubernetes', 'Swarm']
),
master_profile=dict(
type='list',
required=True
),
linux_profile=dict(
type='list',
required=True
),
agent_pool_profiles=dict(
type='list',
required=True
),
service_principal=dict(
type='list'
),
diagnostics_profile=dict(
type='bool',
required=True
)
)
self.resource_group = None
self.name = None
self.location = None
self.tags = None
self.state = None
self.orchestration_platform = None
self.master_profile = None
self.linux_profile = None
self.agent_pool_profiles = None
self.service_principal = None
self.diagnostics_profile = None
self.results = dict(changed=False, state=dict())
super(AzureRMContainerService, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
resource_group = None
response = None
results = dict()
to_be_updated = False
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
self.location = resource_group.location
# Check if the ACS instance already present in the RG
if self.state == 'present':
if self.orchestration_platform == 'Kubernetes':
if not self.service_principal:
self.fail('service_principal should be specified when using Kubernetes')
if not self.service_principal[0].get('client_id'):
self.fail('service_principal.client_id should be specified when using Kubernetes')
if not self.service_principal[0].get('client_secret'):
self.fail('service_principal.client_secret should be specified when using Kubernetes')
mastercount = self.master_profile[0].get('count')
if mastercount != 1 and mastercount != 3 and mastercount != 5:
self.fail('Master Count number wrong : {0} / should be 1 3 or 5'.format(mastercount))
# For now Agent Pool cannot be more than 1, just remove this part in the future if it change
agentpoolcount = len(self.agent_pool_profiles)
if agentpoolcount > 1:
self.fail('You cannot specify more than agent_pool_profiles')
response = self.get_acs()
self.results['state'] = response
if not response:
to_be_updated = True
else:
self.log('Results : {0}'.format(response))
update_tags, response['tags'] = self.update_tags(response['tags'])
if response['provisioning_state'] == "Succeeded":
if update_tags:
to_be_updated = True
def is_property_changed(profile, property, ignore_case=False):
base = response[profile].get(property)
new = getattr(self, profile)[0].get(property)
if ignore_case:
return base.lower() != new.lower()
else:
return base != new
# Cannot Update the master count for now // Uncomment this block in the future to support it
if is_property_changed('master_profile', 'count'):
# self.log(("Master Profile Count Diff, Was {0} / Now {1}"
# .format(response['master_profile'].count,
# self.master_profile[0].get('count'))))
# to_be_updated = True
self.module.warn("master_profile.count cannot be updated")
# Cannot Update the master vm_size for now. Could be a client SDK bug
# Uncomment this block in the future to support it
if is_property_changed('master_profile', 'vm_size', True):
# self.log(("Master Profile VM Size Diff, Was {0} / Now {1}"
# .format(response['master_profile'].get('vm_size'),
# self.master_profile[0].get('vm_size'))))
# to_be_updated = True
self.module.warn("master_profile.vm_size cannot be updated")
# Cannot Update the SSH Key for now // Uncomment this block in the future to support it
if is_property_changed('linux_profile', 'ssh_key'):
# self.log(("Linux Profile Diff SSH, Was {0} / Now {1}"
# .format(response['linux_profile'].ssh.public_keys[0].key_data,
# self.linux_profile[0].get('ssh_key'))))
# to_be_updated = True
self.module.warn("linux_profile.ssh_key cannot be updated")
# self.log("linux_profile response : {0}".format(response['linux_profile'].get('admin_username')))
# self.log("linux_profile self : {0}".format(self.linux_profile[0].get('admin_username')))
# Cannot Update the Username for now // Uncomment this block in the future to support it
if is_property_changed('linux_profile', 'admin_username'):
# self.log(("Linux Profile Diff User, Was {0} / Now {1}"
# .format(response['linux_profile'].admin_username,
# self.linux_profile[0].get('admin_username'))))
# to_be_updated = True
self.module.warn("linux_profile.admin_username cannot be updated")
# Cannot have more that one agent pool profile for now // Uncomment this block in the future to support it
# if len(response['agent_pool_profiles']) != len(self.agent_pool_profiles):
# self.log("Agent Pool count is diff, need to updated")
# to_be_updated = True
for profile_result in response['agent_pool_profiles']:
matched = False
for profile_self in self.agent_pool_profiles:
if profile_result['name'] == profile_self['name']:
matched = True
if profile_result['count'] != profile_self['count'] or profile_result['vm_size'] != \
profile_self['vm_size']:
self.log(("Agent Profile Diff - Count was {0} / Now {1} - Vm_size was {2} / Now {3}"
.format(profile_result['count'], profile_self['count'],
profile_result['vm_size'], profile_self['vm_size'])))
to_be_updated = True
if not matched:
self.log("Agent Pool not found")
to_be_updated = True
if to_be_updated:
self.log("Need to Create / Update the ACS instance")
if self.check_mode:
return self.results
self.results['state'] = self.create_update_acs()
self.results['changed'] = True
self.log("Creation / Update done")
elif self.state == 'absent':
if self.check_mode:
return self.results
self.delete_acs()
self.log("ACS instance deleted")
return self.results
def create_update_acs(self):
'''
Creates or updates a container service with the specified configuration of orchestrator, masters, and agents.
:return: deserialized ACS instance state dictionary
'''
self.log("Creating / Updating the ACS instance {0}".format(self.name))
service_principal_profile = None
agentpools = []
if self.agent_pool_profiles:
for profile in self.agent_pool_profiles:
self.log("Trying to push the following Profile {0}".format(profile))
agentpools.append(create_agent_pool_profile_instance(profile))
if self.orchestration_platform == 'Kubernetes':
service_principal_profile = create_service_principal_profile_instance(self.service_principal)
parameters = ContainerService(
location=self.location,
tags=self.tags,
orchestrator_profile=create_orch_platform_instance(self.orchestration_platform),
service_principal_profile=service_principal_profile,
linux_profile=create_linux_profile_instance(self.linux_profile),
master_profile=create_master_profile_instance(self.master_profile),
agent_pool_profiles=agentpools,
diagnostics_profile=create_diagnostics_profile_instance(self.diagnostics_profile)
)
# self.log("orchestrator_profile : {0}".format(parameters.orchestrator_profile))
# self.log("service_principal_profile : {0}".format(parameters.service_principal_profile))
# self.log("linux_profile : {0}".format(parameters.linux_profile))
# self.log("ssh from yaml : {0}".format(results.get('linux_profile')[0]))
# self.log("ssh : {0}".format(parameters.linux_profile.ssh))
# self.log("master_profile : {0}".format(parameters.master_profile))
# self.log("agent_pool_profiles : {0}".format(parameters.agent_pool_profiles))
# self.log("vm_diagnostics : {0}".format(parameters.diagnostics_profile.vm_diagnostics))
try:
poller = self.containerservice_client.container_services.create_or_update(self.resource_group, self.name,
parameters)
response = self.get_poller_result(poller)
except CloudError as exc:
self.log('Error attempting to create the ACS instance.')
self.fail("Error creating the ACS instance: {0}".format(str(exc)))
return create_acs_dict(response)
def delete_acs(self):
'''
Deletes the specified container service in the specified subscription and resource group.
The operation does not delete other resources created as part of creating a container service,
including storage accounts, VMs, and availability sets.
All the other resources created with the container service are part of the same resource group and can be deleted individually.
:return: True
'''
self.log("Deleting the ACS instance {0}".format(self.name))
try:
poller = self.containerservice_client.container_services.delete(self.resource_group, self.name)
self.get_poller_result(poller)
except CloudError as e:
self.log('Error attempting to delete the ACS instance.')
self.fail("Error deleting the ACS instance: {0}".format(str(e)))
return True
def get_acs(self):
'''
Gets the properties of the specified container service.
:return: deserialized ACS instance state dictionary
'''
self.log("Checking if the ACS instance {0} is present".format(self.name))
found = False
try:
response = self.containerservice_client.container_services.get(self.resource_group, self.name)
found = True
self.log("Response : {0}".format(response))
self.log("ACS instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the ACS instance.')
if found is True:
return create_acs_dict(response)
else:
return False
def main():
"""Main execution"""
AzureRMContainerService()
if __name__ == '__main__':
main()
| pilou-/ansible | lib/ansible/modules/cloud/azure/azure_rm_acs.py | Python | gpl-3.0 | 29,457 |
"""
WSGI config for {{ project_name }} project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "qessera.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
| neldom/qessera | qessera/wsgi.py | Python | mit | 493 |
import logging
import os
import time
import numpy as np
import theano.tensor as T
from theano import config
import theano
from blocks.algorithms import (GradientDescent, Adam, Momentum,
CompositeRule, StepClipping)
from blocks.extensions import FinishAfter, Printing, ProgressBar
from blocks.bricks.cost import CategoricalCrossEntropy, MisclassificationRate
from blocks.extensions.monitoring import (TrainingDataMonitoring,
DataStreamMonitoring)
from blocks.bricks import Rectifier, Softmax, MLP
from blocks.main_loop import MainLoop
from blocks.model import Model
from utils import SaveLog, SaveParams, Glorot, visualize_attention, LRDecay
from blocks.initialization import Constant
from blocks.graph import ComputationGraph, apply_noise
from LSTM_attention_model import LSTMAttention
from blocks.monitoring import aggregation
from blocks.filter import VariableFilter
from blocks.roles import WEIGHT
from visualize import analyze
floatX = theano.config.floatX
logger = logging.getLogger('main')
def setup_model(configs):
tensor5 = theano.tensor.TensorType(config.floatX, (False,) * 5)
# shape: T x B x C x X x Y
input_ = tensor5('features')
tensor3 = theano.tensor.TensorType(config.floatX, (False,) * 3)
locs = tensor3('locs')
# shape: B x Classes
target = T.ivector('targets')
model = LSTMAttention(
configs,
weights_init=Glorot(),
biases_init=Constant(0))
model.initialize()
(h, c, location, scale, alpha, patch, downn_sampled_input,
conved_part_1, conved_part_2, pre_lstm) = model.apply(input_, locs)
model.location = location
model.scale = scale
model.alpha = location
model.patch = patch
classifier = MLP(
[Rectifier(), Softmax()],
configs['classifier_dims'],
weights_init=Glorot(),
biases_init=Constant(0))
classifier.initialize()
probabilities = classifier.apply(h[-1])
cost = CategoricalCrossEntropy().apply(target, probabilities)
cost.name = 'CE'
error_rate = MisclassificationRate().apply(target, probabilities)
error_rate.name = 'ER'
model.cost = cost
model.error_rate = error_rate
model.probabilities = probabilities
if configs['load_pretrained']:
blocks_model = Model(model.cost)
all_params = blocks_model.parameters
with open('VGG_CNN_params.npz') as f:
loaded = np.load(f)
all_conv_params = loaded.keys()
for param in all_params:
if param.name in loaded.keys():
assert param.get_value().shape == loaded[param.name].shape
param.set_value(loaded[param.name])
all_conv_params.pop(all_conv_params.index(param.name))
print "the following parameters did not match: " + str(all_conv_params)
if configs['test_model']:
print "TESTING THE MODEL: CHECK THE INPUT SIZE!"
cg = ComputationGraph(model.cost)
f = theano.function(cg.inputs, [model.cost],
on_unused_input='ignore',
allow_input_downcast=True)
data = configs['get_streams'](configs[
'batch_size'])[0].get_epoch_iterator().next()
f(data[1], data[0], data[2])
print "Test passed! ;)"
model.monitorings = [cost, error_rate]
return model
def train(model, configs):
get_streams = configs['get_streams']
save_path = configs['save_path']
num_epochs = configs['num_epochs']
batch_size = configs['batch_size']
lrs = configs['lrs']
until_which_epoch = configs['until_which_epoch']
grad_clipping = configs['grad_clipping']
monitorings = model.monitorings
# Training
if configs['weight_noise'] > 0:
cg = ComputationGraph(model.cost)
weights = VariableFilter(roles=[WEIGHT])(cg.variables)
cg = apply_noise(cg, weights, configs['weight_noise'])
model.cost = cg.outputs[0].copy(name='CE')
if configs['l2_reg'] > 0:
cg = ComputationGraph(model.cost)
weights = VariableFilter(roles=[WEIGHT])(cg.variables)
new_cost = model.cost + configs['l2_reg'] * sum([
(weight ** 2).sum() for weight in weights])
model.cost = new_cost.copy(name='CE')
blocks_model = Model(model.cost)
all_params = blocks_model.parameters
print "Number of found parameters:" + str(len(all_params))
print all_params
default_lr = np.float32(configs['lrs'][0])
lr_var = theano.shared(default_lr, name="learning_rate")
clipping = StepClipping(threshold=np.cast[floatX](grad_clipping))
# sgd_momentum = Momentum(
# learning_rate=0.0001,
# momentum=0.95)
# step_rule = CompositeRule([clipping, sgd_momentum])
adam = Adam(learning_rate=lr_var)
step_rule = CompositeRule([clipping, adam])
training_algorithm = GradientDescent(
cost=model.cost, parameters=all_params,
step_rule=step_rule)
monitored_variables = [
lr_var,
aggregation.mean(training_algorithm.total_gradient_norm)] + monitorings
for param in all_params:
name = param.tag.annotations[0].name + "." + param.name
to_monitor = training_algorithm.gradients[param].norm(2)
to_monitor.name = name + "_grad_norm"
monitored_variables.append(to_monitor)
to_monitor = param.norm(2)
to_monitor.name = name + "_norm"
monitored_variables.append(to_monitor)
train_data_stream, valid_data_stream = get_streams(batch_size)
train_monitoring = TrainingDataMonitoring(
variables=monitored_variables,
prefix="train",
after_epoch=True)
valid_monitoring = DataStreamMonitoring(
variables=monitored_variables,
data_stream=valid_data_stream,
prefix="valid",
after_epoch=True)
main_loop = MainLoop(
algorithm=training_algorithm,
data_stream=train_data_stream,
model=blocks_model,
extensions=[
train_monitoring,
valid_monitoring,
FinishAfter(after_n_epochs=num_epochs),
SaveParams('valid_CE',
blocks_model, save_path,
after_epoch=True),
SaveLog(after_epoch=True),
ProgressBar(),
LRDecay(lr_var, lrs, until_which_epoch,
after_epoch=True),
Printing(after_epoch=True)])
main_loop.run()
def evaluate(model, load_path, configs):
with open(load_path + 'trained_params_best.npz') as f:
loaded = np.load(f)
blocks_model = Model(model.cost)
params_dicts = blocks_model.get_parameter_dict()
params_names = params_dicts.keys()
for param_name in params_names:
param = params_dicts[param_name]
# '/f_6_.W' --> 'f_6_.W'
slash_index = param_name.find('/')
param_name = param_name[slash_index + 1:]
assert param.get_value().shape == loaded[param_name].shape
param.set_value(loaded[param_name])
inps = ComputationGraph(model.error_rate).inputs
eval_function = theano.function(
inps, [model.error_rate, model.probabilities])
_, vds = configs['get_streams'](100)
data = vds.get_epoch_iterator().next()
print "Valid_ER: " + str(
eval_function(data[0], data[2], data[1])[0])
return eval_function
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
configs = {}
# from datasets import get_cmv_v2_len10_streams
# from datasets import get_cmv_v1_streams
from datasets import get_bmnist_streams
configs['get_streams'] = get_bmnist_streams
configs['save_path'] = 'results/Test_'
configs['num_epochs'] = 600
configs['batch_size'] = 100
configs['lrs'] = [1e-4, 1e-5, 1e-6]
configs['until_which_epoch'] = [150, 400, configs['num_epochs']]
configs['grad_clipping'] = 2
configs['weight_noise'] = 0.0
configs['conv_layers'] = [ # 1 x 28 x 28
['conv_1', (20, 1, 5, 5), (2, 2), None], # 20 x 16 x 16
['conv_2', (50, 20, 5, 5), (2, 2), None], # 50 x 10 x 10
['conv_3', (80, 50, 3, 3), (2, 2), None]] # 80 x 6 x 6
configs['num_layers_first_half_of_conv'] = 0
configs['fc_layers'] = [['fc', (2880, 128), 'relu']]
configs['lstm_dim'] = 128
configs['attention_mlp_hidden_dims'] = [128]
configs['cropper_input_shape'] = (100, 100)
configs['patch_shape'] = (28, 28)
configs['num_channels'] = 1
configs['classifier_dims'] = [configs['lstm_dim'], 64, 10]
configs['load_pretrained'] = False
configs['test_model'] = True
configs['l2_reg'] = 0.001
timestr = time.strftime("%Y_%m_%d_at_%H_%M")
save_path = configs['save_path'] + timestr
configs['save_path'] = save_path
log_path = os.path.join(save_path, 'log.txt')
os.makedirs(save_path)
fh = logging.FileHandler(filename=log_path)
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
for item in configs:
logger.info(item + ': %s' % str(configs[item]))
model = setup_model(configs)
eval_ = False
if eval_:
eval_function = evaluate(model, 'results/BMNIST_Learn_2016_02_25_at_23_50/', configs)
analyze('results/BMNIST_Learn_2016_02_25_at_23_50/')
visualize_attention(model, configs, eval_function)
else:
# evaluate(model, 'results/CMV_Hard_len10_2016_02_22_at_21_00/')
train(model, configs)
| negar-rostamzadeh/rna | cooking.py | Python | mit | 9,787 |
import random
def generate(data):
ask = ['equivalent resistance $R_T$', 'current from the power supply $I_T$']
which = random.choice([0,1])
data['params']['ask'] = ask[which]
label = ["$R_T$", "$I_T$"]
data['params']['lab'] = label[which]
unit = ["$\\Omega$", "A"]
data['params']['unit'] = unit[which]
Vt = random.randint(100,200)
data['params']['Vt'] = Vt
R1 = random.choice(list(range(20,180,10)))
data['params']['R1'] = R1
R2 = random.choice(list(range(20,180,20)))
data['params']['R2'] = R2
R3 = random.choice(list(range(20,100,5)))
data['params']['R3'] = R3
figname = ["circ1.png", "circ2.png"]
whichfig = random.choice([0,1])
data['params']['figname'] = figname[whichfig]
if whichfig: # this is the series
Rt = R1 + R2 + R3
else: # this is the parallel
Rtinv = 1/R1 + 1/R2 + 1/R3
Rt = 1/Rtinv
It = Vt/Rt
ans = [Rt, It]
data['correct_answers']['ans'] = ans[which]
| PrairieLearn/PrairieLearn | exampleCourse/questions/workshop/Lesson1_example3_v3/server.py | Python | agpl-3.0 | 1,005 |
from datetime import datetime, timedelta
import re
from warnings import warn
from .base_handler import BaseHandler
import requests
API_TARGET = "https://api.bitbucket.org/1.0/repositories"
descendants_re = re.compile(r"Forks/Queues \((?P<descendants>\d+)\)", re.IGNORECASE)
class BitbucketHandler(BaseHandler):
title = 'Bitbucket'
url_regex = 'https://bitbucket.org/'
url = 'https://bitbucket.org'
repo_regex = r'https://bitbucket.org/[\w\-\_]+/([\w\-\_]+)/{0,1}'
slug_regex = r'https://bitbucket.org/[\w\-\_]+/([\w\-\_]+)/{0,1}'
def _get_bitbucket_commits(self, package):
repo_name = package.repo_name()
if repo_name.endswith("/"):
repo_name = repo_name[0:-1]
target = "%s/%s/changesets/?limit=50" % (API_TARGET, repo_name)
data = self.get_json(target)
if data is None:
return [] # todo: log this?
return data.get("changesets", [])
def fetch_commits(self, package):
from package.models import Commit # Import placed here to avoid circular dependencies
for commit in self._get_bitbucket_commits(package):
timestamp = commit["timestamp"].split("+")
if len(timestamp) > 1:
timestamp = timestamp[0]
else:
timestamp = commit["timestamp"]
commit, created = Commit.objects.get_or_create(package=package, commit_date=timestamp)
# ugly way to get 52 weeks of commits
# TODO - make this better
now = datetime.now()
commits = package.commit_set.filter(
commit_date__gt=now - timedelta(weeks=52),
).values_list('commit_date', flat=True)
weeks = [0] * 52
for cdate in commits:
age_weeks = (now - cdate).days // 7
if age_weeks < 52:
weeks[age_weeks] += 1
package.commit_list = ','.join(map(str, reversed(weeks)))
package.save()
def fetch_metadata(self, package):
# prep the target name
repo_name = package.repo_name()
target = API_TARGET + "/" + repo_name
if not target.endswith("/"):
target += "/"
data = self.get_json(target)
if data is None:
# TODO - log this better
message = "%s had a JSONDecodeError during bitbucket.repo.pull" % (package.title)
warn(message)
return package
# description
package.repo_description = data.get("description", "")
# get the forks of a repo
url = "{0}forks/".format(target)
data = self.get_json(url)
package.repo_forks = len(data['forks'])
# get the followers of a repo
url = "{0}followers/".format(target)
data = self.get_json(url)
package.repo_watchers = data['count']
# Getting participants
try:
package.participants = package.repo_url.split("/")[3] # the only way known to fetch this from bitbucket!!!
except IndexError:
package.participants = ""
return package
repo_handler = BitbucketHandler()
| miketheman/opencomparison | package/repos/bitbucket.py | Python | mit | 3,115 |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A program to annotate a conll file with a tensorflow neural net parser."""
import os
import os.path
import time
import tempfile
import tensorflow as tf
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from google.protobuf import text_format
from syntaxnet import sentence_pb2
from syntaxnet import graph_builder
from syntaxnet import structured_graph_builder
from syntaxnet.ops import gen_parser_ops
from syntaxnet import task_spec_pb2
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('task_context', '',
'Path to a task context with inputs and parameters for '
'feature extractors.')
flags.DEFINE_string('resource_dir', '',
'Optional base directory for task context resources.')
flags.DEFINE_string('model_path', '', 'Path to model parameters.')
flags.DEFINE_string('arg_prefix', None, 'Prefix for context parameters.')
flags.DEFINE_string('graph_builder', 'greedy',
'Which graph builder to use, either greedy or structured.')
flags.DEFINE_string('input', 'stdin',
'Name of the context input to read data from.')
flags.DEFINE_string('output', 'stdout',
'Name of the context input to write data to.')
flags.DEFINE_string('hidden_layer_sizes', '200,200',
'Comma separated list of hidden layer sizes.')
flags.DEFINE_integer('batch_size', 32,
'Number of sentences to process in parallel.')
flags.DEFINE_integer('beam_size', 8, 'Number of slots for beam parsing.')
flags.DEFINE_integer('max_steps', 1000, 'Max number of steps to take.')
flags.DEFINE_bool('slim_model', False,
'Whether to expect only averaged variables.')
def RewriteContext(task_context):
context = task_spec_pb2.TaskSpec()
with gfile.FastGFile(task_context) as fin:
text_format.Merge(fin.read(), context)
for resource in context.input:
for part in resource.part:
if part.file_pattern != '-':
part.file_pattern = os.path.join(FLAGS.resource_dir, part.file_pattern)
with tempfile.NamedTemporaryFile(delete=False) as fout:
fout.write(str(context))
return fout.name
def Eval(sess):
"""Builds and evaluates a network."""
task_context = FLAGS.task_context
if FLAGS.resource_dir:
task_context = RewriteContext(task_context)
feature_sizes, domain_sizes, embedding_dims, num_actions = sess.run(
gen_parser_ops.feature_size(task_context=task_context,
arg_prefix=FLAGS.arg_prefix))
t = time.time()
hidden_layer_sizes = map(int, FLAGS.hidden_layer_sizes.split(','))
logging.info('Building training network with parameters: feature_sizes: %s '
'domain_sizes: %s', feature_sizes, domain_sizes)
if FLAGS.graph_builder == 'greedy':
parser = graph_builder.GreedyParser(num_actions,
feature_sizes,
domain_sizes,
embedding_dims,
hidden_layer_sizes,
gate_gradients=True,
arg_prefix=FLAGS.arg_prefix)
else:
parser = structured_graph_builder.StructuredGraphBuilder(
num_actions,
feature_sizes,
domain_sizes,
embedding_dims,
hidden_layer_sizes,
gate_gradients=True,
arg_prefix=FLAGS.arg_prefix,
beam_size=FLAGS.beam_size,
max_steps=FLAGS.max_steps)
parser.AddEvaluation(task_context,
FLAGS.batch_size,
corpus_name=FLAGS.input,
evaluation_max_steps=FLAGS.max_steps)
parser.AddSaver(FLAGS.slim_model)
sess.run(parser.inits.values())
parser.saver.restore(sess, FLAGS.model_path)
sink_documents = tf.placeholder(tf.string)
sink = gen_parser_ops.document_sink(sink_documents,
task_context=task_context,
corpus_name=FLAGS.output)
t = time.time()
num_epochs = None
num_tokens = 0
num_correct = 0
num_documents = 0
while True:
tf_eval_epochs, tf_eval_metrics, tf_documents = sess.run([
parser.evaluation['epochs'],
parser.evaluation['eval_metrics'],
parser.evaluation['documents'],
])
if len(tf_documents):
logging.info('Processed %d documents', len(tf_documents))
num_documents += len(tf_documents)
sess.run(sink, feed_dict={sink_documents: tf_documents})
num_tokens += tf_eval_metrics[0]
num_correct += tf_eval_metrics[1]
if num_epochs is None:
num_epochs = tf_eval_epochs
elif num_epochs < tf_eval_epochs:
break
logging.info('Total processed documents: %d', num_documents)
if num_tokens > 0:
eval_metric = 100.0 * num_correct / num_tokens
logging.info('num correct tokens: %d', num_correct)
logging.info('total tokens: %d', num_tokens)
logging.info('Seconds elapsed in evaluation: %.2f, '
'eval metric: %.2f%%', time.time() - t, eval_metric)
def main(unused_argv):
logging.set_verbosity(logging.INFO)
with tf.Session() as sess:
Eval(sess)
if __name__ == '__main__':
tf.app.run()
| clinc/models | syntaxnet/syntaxnet/parser_eval.py | Python | apache-2.0 | 6,000 |
# -*- coding: utf-8 -*-
from __future__ import print_function
import copy
from mitmflib.dnslib import RR,QTYPE,RCODE
from mitmflib.dnslib.server import DNSServer,DNSHandler,BaseResolver,DNSLogger
class ZoneResolver(BaseResolver):
"""
Simple fixed zone file resolver.
"""
def __init__(self,zone,glob=False):
"""
Initialise resolver from zone file.
Stores RRs as a list of (label,type,rr) tuples
If 'glob' is True use glob match against zone file
"""
self.zone = [(rr.rname,QTYPE[rr.rtype],rr) for rr in RR.fromZone(zone)]
self.glob = glob
self.eq = 'matchGlob' if glob else '__eq__'
def resolve(self,request,handler):
"""
Respond to DNS request - parameters are request packet & handler.
Method is expected to return DNS response
"""
reply = request.reply()
qname = request.q.qname
qtype = QTYPE[request.q.qtype]
for name,rtype,rr in self.zone:
# Check if label & type match
if getattr(qname,self.eq)(name) and (qtype == rtype or
qtype == 'ANY' or
rtype == 'CNAME'):
# If we have a glob match fix reply label
if self.glob:
a = copy.copy(rr)
a.rname = qname
reply.add_answer(a)
else:
reply.add_answer(rr)
# Check for A/AAAA records associated with reply and
# add in additional section
if rtype in ['CNAME','NS','MX','PTR']:
for a_name,a_rtype,a_rr in self.zone:
if a_name == rr.rdata.label and a_rtype in ['A','AAAA']:
reply.add_ar(a_rr)
if not reply.rr:
reply.header.rcode = RCODE.NXDOMAIN
return reply
if __name__ == '__main__':
import argparse,sys,time
p = argparse.ArgumentParser(description="Zone DNS Resolver")
p.add_argument("--zone","-z",required=True,
metavar="<zone-file>",
help="Zone file ('-' for stdin)")
p.add_argument("--port","-p",type=int,default=53,
metavar="<port>",
help="Server port (default:53)")
p.add_argument("--address","-a",default="",
metavar="<address>",
help="Listen address (default:all)")
p.add_argument("--glob",action='store_true',default=False,
help="Glob match against zone file (default: false)")
p.add_argument("--udplen","-u",type=int,default=0,
metavar="<udplen>",
help="Max UDP packet length (default:0)")
p.add_argument("--tcp",action='store_true',default=False,
help="TCP server (default: UDP only)")
p.add_argument("--log",default="request,reply,truncated,error",
help="Log hooks to enable (default: +request,+reply,+truncated,+error,-recv,-send,-data)")
p.add_argument("--log-prefix",action='store_true',default=False,
help="Log prefix (timestamp/handler/resolver) (default: False)")
args = p.parse_args()
if args.zone == '-':
args.zone = sys.stdin
else:
args.zone = open(args.zone)
resolver = ZoneResolver(args.zone,args.glob)
logger = DNSLogger(args.log,args.log_prefix)
print("Starting Zone Resolver (%s:%d) [%s]" % (
args.address or "*",
args.port,
"UDP/TCP" if args.tcp else "UDP"))
for rr in resolver.zone:
print(" | ",rr[2].toZone(),sep="")
print()
if args.udplen:
DNSHandler.udplen = args.udplen
udp_server = DNSServer(resolver,
port=args.port,
address=args.address,
logger=logger)
udp_server.start_thread()
if args.tcp:
tcp_server = DNSServer(resolver,
port=args.port,
address=args.address,
tcp=True,
logger=logger)
tcp_server.start_thread()
while udp_server.isAlive():
time.sleep(1)
| CiuffysHub/MITMf | mitmflib-0.18.4/mitmflib/dnslib/zoneresolver.py | Python | gpl-3.0 | 4,433 |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016-2017 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Create parts containing a Plainbox test collection known as a provider.
Plainbox is a toolkit consisting of python3 library, development tools,
documentation and examples. It is targeted at developers working on testing or
certification applications and authors creating tests for such applications.
More information: http://plainbox.readthedocs.org/en/latest/
To find out more about authoring a plainbox provider, see the following
documentation: http://plainbox.readthedocs.org/en/latest/author/providers.html
This plugin uses the common plugin keywords as well as those for "sources".
For more information check the 'plugins' topic for the former and the
'sources' topic for the latter.
"""
import os
import snapcraft
from snapcraft.internal import mangling
class PlainboxProviderPlugin(snapcraft.BasePlugin):
def __init__(self, name, options, project):
super().__init__(name, options, project)
self.build_packages.extend(['intltool'])
def build(self):
super().build()
env = os.environ.copy()
# Ensure the first provider does not attempt to validate against
# providers installed on the build host by initialising PROVIDERPATH
# to empty
env['PROVIDERPATH'] = ''
provider_stage_dir = os.path.join(self.project.stage_dir, 'providers')
if os.path.exists(provider_stage_dir):
provider_dirs = [os.path.join(provider_stage_dir, provider)
for provider in os.listdir(provider_stage_dir)]
env['PROVIDERPATH'] = ':'.join(provider_dirs)
self.run(['python3', 'manage.py', 'validate'], env=env)
self.run(['python3', 'manage.py', 'build'])
self.run(['python3', 'manage.py', 'i18n'])
self.run([
'python3', 'manage.py', 'install', '--layout=relocatable',
'--prefix=/providers/{}'.format(self.name),
'--root={}'.format(self.installdir)])
mangling.rewrite_python_shebangs(self.installdir)
def snap_fileset(self):
fileset = super().snap_fileset()
# If a python package is added as a stage-packages it will include
# sitecustomize.py which is irrelevant and will cause unnecessary
# conflicts so instead we just ignore these entries.
fileset.append('-usr/lib/python*/sitecustomize.py')
fileset.append('-etc/python*/sitecustomize.py')
return fileset
| elopio/snapcraft | snapcraft/plugins/plainbox_provider.py | Python | gpl-3.0 | 3,098 |
# Copyright 2015-2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from azure_common import BaseTest, arm_template
class DataLakeTest(BaseTest):
def setUp(self):
super(DataLakeTest, self).setUp()
def test_data_lake_schema_validate(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-data-lake',
'resource': 'azure.datalake'
}, validate=True)
self.assertTrue(p)
@arm_template('datalake.json')
def test_find_by_name(self):
p = self.load_policy({
'name': 'test-azure-datalake',
'resource': 'azure.datalake',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestdatalake'}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
| FireballDWF/cloud-custodian | tools/c7n_azure/tests/test_datalake.py | Python | apache-2.0 | 1,567 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from cpt.packager import ConanMultiPackager
import os
if __name__ == "__main__":
username = os.getenv("GITHUB_ACTOR")
tag_version = os.getenv("GITHUB_REF")
tag_package = os.getenv("GITHUB_REPOSITORY")
login_username = os.getenv("CONAN_LOGIN_USERNAME")
package_version = tag_version.replace("refs/tags/v", "")
package_name = tag_package.replace("skypjack/", "")
reference = "{}/{}".format(package_name, package_version)
channel = os.getenv("CONAN_CHANNEL", "stable")
upload = os.getenv("CONAN_UPLOAD")
stable_branch_pattern = os.getenv("CONAN_STABLE_BRANCH_PATTERN", r"v\d+\.\d+\.\d+.*")
test_folder = os.getenv("CPT_TEST_FOLDER", os.path.join("conan", "test_package"))
upload_only_when_stable = os.getenv("CONAN_UPLOAD_ONLY_WHEN_STABLE", True)
disable_shared = os.getenv("CONAN_DISABLE_SHARED_BUILD", "False")
builder = ConanMultiPackager(username=username,
reference=reference,
channel=channel,
login_username=login_username,
upload=upload,
stable_branch_pattern=stable_branch_pattern,
upload_only_when_stable=upload_only_when_stable,
test_folder=test_folder)
builder.add()
filtered_builds = []
for settings, options, env_vars, build_requires, reference in builder.items:
if disable_shared == "False" or not options["{}:shared".format(package_name)]:
filtered_builds.append([settings, options, env_vars, build_requires])
builder.builds = filtered_builds
builder.run()
| skypjack/entt | conan/build.py | Python | mit | 1,747 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from django.db import models
from datetime import date
class Settings(models.Model):
""" Singleton object containing site wide settings configurable by the trainer. """
class Meta:
# Django admin adds an 's' to the class name; prevent SettingSS
verbose_name = 'Setting'
email_validation_regex = \
models.CharField(
max_length=200,
blank=True,
default=".*@(student.)?kit.edu",
help_text="Regular expression used to check the email domain of registering users."
)
mat_number_validation_regex = \
models.CharField(
max_length=200,
blank=True,
default="\d{5,7}",
help_text="Regular expression used to check the student number."
)
new_users_via_sso = \
models.BooleanField(
default=True,
help_text="If enabled, users previously unknown to the Praktomat can register via single sign on (eg. Shibboleth)."
)
deny_registration_from = \
models.DateTimeField(
default=date(2222, 1, 1),
help_text="After this date, registration won't be possible."
)
acount_activation_days = \
models.IntegerField(
default=10,
help_text="Days until the user has time to activate his account with the link sent in the registration email."
)
account_manual_validation = \
models.BooleanField(
default=False,
help_text="If enabled, registrations via the website must be manually validated by a trainer."
)
requires_disclaimer_acceptance = \
models.BooleanField(
default=False,
help_text="If enabled, users have to accept the disclaimer before using the site."
)
accept_all_solutions = \
models.BooleanField(
default=False,
help_text="If enabled, solutions can become the final soution even if not all required checkers are passed."
)
anonymous_attestation = \
models.BooleanField(
default=False,
help_text="If enabled, the tutor can't see the name of the user who submitted the solution."
)
final_grades_published = \
models.BooleanField(
default=False,
help_text="If enabled, all users can see their final grades."
)
SUM = 'SUM'
AVERAGE = 'AVG'
ARITHMETIC_CHOICES = (
(SUM, 'Sum'),
(AVERAGE, 'Average'),
)
final_grades_arithmetic_option = \
models.CharField(
max_length=3,
choices=ARITHMETIC_CHOICES,
default=SUM,
)
WITH_PLAGIARISM = 'WP'
NO_PLAGIARISM = 'NP'
PLAGIARISM_CHOICES = (
(NO_PLAGIARISM, 'Without'),
(WITH_PLAGIARISM, 'Including'),
)
final_grades_plagiarism_option = \
models.CharField(
max_length=2,
choices=PLAGIARISM_CHOICES,
default=NO_PLAGIARISM,
)
invisible_attestor = \
models.BooleanField(
default=False,
help_text="If enabled, a user will not learn which tutor wrote attestations to his solutions. In particular, tutors will not be named in attestation emails."
)
attestation_reply_to = \
models.EmailField(
blank=True,
help_text="Additional Reply-To: address to be set for attestation emails."
)
attestation_allow_run_checkers = \
models.BooleanField(
default=False,
help_text="If enabled, tutors can re-run all checkers for solutions they attest. Can be used to re-run checks that failed due to problems unrelated to the solution (e.g.: time-outs because of high server load), but needs to be used with care, since it may change the results from what the student saw when he submitted his solution."
)
jplag_setting = \
models.CharField(
max_length=200,
default='Java',
help_text="Default settings for jPlag"
)
@python_2_unicode_compatible
class Chunk(models.Model):
""" A Chunk is a piece of content associated with a unique key that can be inserted into any template with the use of a special template tag """
settings = models.ForeignKey(
Settings,
default=1,
help_text="Makes it easy to display chunks as inlines in Settings.",
on_delete=models.CASCADE
)
key = \
models.CharField(
help_text="A unique name for this chunk of content",
blank=False,
max_length=255,
unique=True,
editable=False
)
content = models.TextField(blank=True)
def __str__(self):
return "%s" % (self.key,)
| KITPraktomatTeam/Praktomat | src/configuration/models.py | Python | gpl-2.0 | 5,274 |
def fbnq(n):
a,b=0,1
while b<n:
print(a," ")
# print(a,end=' ')
a,b=b,a+b
print(a)
print ("it's end!")
fbnq(1000)
| jiaorenyu/learning | python/may.py | Python | mit | 126 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from greenlet import getcurrent as get_ident
except ImportError: # pragma: no cover
try:
from thread import get_ident
except ImportError: # pragma: no cover
from dummy_thread import get_ident
def try_import(module):
from importlib import import_module
try:
return import_module(module)
except ImportError:
pass
def release_local(local):
"""Releases the contents of the local for the current context.
This makes it possible to use locals without a manager.
Example::
>>> loc = Local()
>>> loc.foo = 42
>>> release_local(loc)
>>> hasattr(loc, 'foo')
False
With this function one can release :class:`Local` objects as well
as :class:`StackLocal` objects. However it is not possible to
release data held by proxies that way, one always has to retain
a reference to the underlying local object in order to be able
to release it.
.. versionadded:: 0.6.1
"""
local.__release_local__()
class Local(object):
__slots__ = ('__storage__', '__ident_func__')
def __init__(self):
object.__setattr__(self, '__storage__', {})
object.__setattr__(self, '__ident_func__', get_ident)
def __iter__(self):
return iter(self.__storage__.items())
def __call__(self, proxy):
"""Create a proxy for a name."""
return LocalProxy(self, proxy)
def __release_local__(self):
self.__storage__.pop(self.__ident_func__(), None)
def __getattr__(self, name):
try:
return self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
ident = self.__ident_func__()
storage = self.__storage__
try:
storage[ident][name] = value
except KeyError:
storage[ident] = {name: value}
def __delattr__(self, name):
try:
del self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
class LocalStack(object):
"""This class works similar to a :class:`Local` but keeps a stack
of objects instead. This is best explained with an example::
>>> ls = LocalStack()
>>> ls.push(42)
>>> ls.top
42
>>> ls.push(23)
>>> ls.top
23
>>> ls.pop()
23
>>> ls.top
42
They can be force released by using a :class:`LocalManager` or with
the :func:`release_local` function but the correct way is to pop the
item from the stack after using. When the stack is empty it will
no longer be bound to the current context (and as such released).
By calling the stack without arguments it returns a proxy that resolves to
the topmost item on the stack.
.. versionadded:: 0.6.1
"""
def __init__(self):
self._local = Local()
def __release_local__(self):
self._local.__release_local__()
def _get__ident_func__(self):
return self._local.__ident_func__
def _set__ident_func__(self, value):
object.__setattr__(self._local, '__ident_func__', value)
__ident_func__ = property(_get__ident_func__, _set__ident_func__)
del _get__ident_func__, _set__ident_func__
def __call__(self):
def _lookup():
rv = self.top
if rv is None:
raise RuntimeError('object unbound')
return rv
return LocalProxy(_lookup)
def push(self, obj):
"""Pushes a new item to the stack"""
rv = getattr(self._local, 'stack', None)
if rv is None:
self._local.stack = rv = []
rv.append(obj)
return rv
def pop(self):
"""Removes the topmost item from the stack, will return the
old value or `None` if the stack was already empty.
"""
stack = getattr(self._local, 'stack', None)
if stack is None:
return None
elif len(stack) == 1:
release_local(self._local)
return stack[-1]
else:
return stack.pop()
@property
def top(self):
"""The topmost item on the stack. If the stack is empty,
`None` is returned.
"""
try:
return self._local.stack[-1]
except (AttributeError, IndexError):
return None
class LocalProxy(object):
"""Code stolen from werkzeug.local.LocalProxy."""
__slots__ = ('__local', '__dict__', '__name__')
def __init__(self, local, name=None):
object.__setattr__(self, '_LocalProxy__local', local)
object.__setattr__(self, '__name__', name)
def _get_current_object(self):
"""Return the current object. This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context.
"""
if not hasattr(self.__local, '__release_local__'):
return self.__local()
try:
return getattr(self.__local, self.__name__)
except AttributeError:
raise RuntimeError('no object bound to %s' % self.__name__)
@property
def __dict__(self):
try:
return self._get_current_object().__dict__
except RuntimeError:
raise AttributeError('__dict__')
def __repr__(self):
try:
obj = self._get_current_object()
except RuntimeError:
return '<%s unbound>' % self.__class__.__name__
return repr(obj)
def __nonzero__(self):
try:
return bool(self._get_current_object())
except RuntimeError:
return False
def __unicode__(self):
try:
return unicode(self._get_current_object())
except RuntimeError:
return repr(self)
def __dir__(self):
try:
return dir(self._get_current_object())
except RuntimeError:
return []
def __getattr__(self, name):
if name == '__members__':
return dir(self._get_current_object())
return getattr(self._get_current_object(), name)
def __setitem__(self, key, value):
self._get_current_object()[key] = value
def __delitem__(self, key):
del self._get_current_object()[key]
def __setslice__(self, i, j, seq):
self._get_current_object()[i:j] = seq
def __delslice__(self, i, j):
del self._get_current_object()[i:j]
__setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v)
__delattr__ = lambda x, n: delattr(x._get_current_object(), n)
__str__ = lambda x: str(x._get_current_object())
__lt__ = lambda x, o: x._get_current_object() < o
__le__ = lambda x, o: x._get_current_object() <= o
__eq__ = lambda x, o: x._get_current_object() == o
__ne__ = lambda x, o: x._get_current_object() != o
__gt__ = lambda x, o: x._get_current_object() > o
__ge__ = lambda x, o: x._get_current_object() >= o
__cmp__ = lambda x, o: cmp(x._get_current_object(), o)
__hash__ = lambda x: hash(x._get_current_object())
__call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw)
__len__ = lambda x: len(x._get_current_object())
__getitem__ = lambda x, i: x._get_current_object()[i]
__iter__ = lambda x: iter(x._get_current_object())
__contains__ = lambda x, i: i in x._get_current_object()
__getslice__ = lambda x, i, j: x._get_current_object()[i:j]
__add__ = lambda x, o: x._get_current_object() + o
__sub__ = lambda x, o: x._get_current_object() - o
__mul__ = lambda x, o: x._get_current_object() * o
__floordiv__ = lambda x, o: x._get_current_object() // o
__mod__ = lambda x, o: x._get_current_object() % o
__divmod__ = lambda x, o: x._get_current_object().__divmod__(o)
__pow__ = lambda x, o: x._get_current_object() ** o
__lshift__ = lambda x, o: x._get_current_object() << o
__rshift__ = lambda x, o: x._get_current_object() >> o
__and__ = lambda x, o: x._get_current_object() & o
__xor__ = lambda x, o: x._get_current_object() ^ o
__or__ = lambda x, o: x._get_current_object() | o
__div__ = lambda x, o: x._get_current_object().__div__(o)
__truediv__ = lambda x, o: x._get_current_object().__truediv__(o)
__neg__ = lambda x: -(x._get_current_object())
__pos__ = lambda x: +(x._get_current_object())
__abs__ = lambda x: abs(x._get_current_object())
__invert__ = lambda x: ~(x._get_current_object())
__complex__ = lambda x: complex(x._get_current_object())
__int__ = lambda x: int(x._get_current_object())
__long__ = lambda x: long(x._get_current_object())
__float__ = lambda x: float(x._get_current_object())
__oct__ = lambda x: oct(x._get_current_object())
__hex__ = lambda x: hex(x._get_current_object())
__index__ = lambda x: x._get_current_object().__index__()
__coerce__ = lambda x, o: x.__coerce__(x, o)
__enter__ = lambda x: x.__enter__()
__exit__ = lambda x, *a, **kw: x.__exit__(*a, **kw)
| youngking/lazyconn | lazyconn/local.py | Python | bsd-3-clause | 9,195 |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 27 15:41:46 2014
@author: leo
"""
import numpy as np
td = np.arange(0,50,.05)
Xt = map(lambda x: x*exp(-x) if x <= 1 and x >= 0 else 0, td)
#Xjw = fftshift(fft(Xt))
Xjw = map(lambda jw: (-exp(-1.0-jw)/(1.0+jw))
- (-exp(-1.0-jw)/np.power(1.0+jw,2.0))
+ 1.0/np.power(1+jw,2),
np.arange(-10*pi,10*pi,.2)*1j)
fsize = (8,4)
picfolder = '/home/leo/Documents/Sinais e Sistemas Lineares/exercicios/'
save = True
figure(figsize=fsize)
plot(td[0:100],Xt[0:100])
if save:
plt.savefig(picfolder+'4-23da.eps')
plt.close()
figure(figsize=fsize)
plot(Xjw)
if save:
plt.savefig(picfolder+'4-23db.eps')
plt.close() | kewitz/mestrado | Analise de Sinais/exercicio423.py | Python | mit | 664 |
#coding=utf-8
import web
web.config.debug = False
web.config.session_parameters['timeout'] = 60 * 10
web.config.session_parameters['ignore_change_ip'] = False
#web.config.session_parameters['secret_key'] = '!AvadD03FDS34%%Sdfas035$$asd'
db = web.db.database(
dbn = 'mysql',
user = 'twifi',
pw = 'twifi123$',
db = 'twifi_dev',
port = 3306
)
urls = (
'/', 'index',
'/blog', 'redirect',
'/add', 'add',
'/del', 'delete',
'/blog/(\d+)', 'show_scrap_all',
'/tag/(.*)', 'search_by_tag',
'/archive/(.*)', 'search_by_archive',
'/scrap/(.*)', 'show_scrap',
'/add_scrap', 'add_scrap',
'/del_scrap', 'del_scrap',
'/add_tag', 'add_tag',
'/login', 'login',
'/logout', 'logout',
'/admin', 'admin',
) | koakumaping/simple-blog | settings.py | Python | gpl-2.0 | 767 |
#!/usr/bin/python
from basic_discovery import BasicDiscoverer
import re
class Discoverer(BasicDiscoverer):
def discovery(self, *args):
response = self.client.describe_target_groups()
data = []
# Load balancer data cache
LoadBalancersDescrByArn = {}
for TargetGroup in response["TargetGroups"]:
# A target group may be related to one or more load balancers - create one entry per lb-target group
# combination
for LoadBalancerArn in TargetGroup["LoadBalancerArns"]:
if LoadBalancerArn not in LoadBalancersDescrByArn:
LoadBalancersDescrByArn[LoadBalancerArn] = self.client.describe_load_balancers(
LoadBalancerArns=[LoadBalancerArn]
)['LoadBalancers'][0]
target_health = self.client.describe_target_health(
TargetGroupArn=TargetGroup["TargetGroupArn"]
)
# Get the short ARNs that are effectivelly used when querying for metrics
Arn = re.search(":(targetgroup/.*)", TargetGroup["TargetGroupArn"]).group(1)
LoadBalancerShortArn = re.search(":loadbalancer/(.*)", LoadBalancerArn).group(1)
# Final discovery entry
ldd = {
"{#TARGET_GROUP_NAME}": TargetGroup["TargetGroupName"],
"{#TARGET_GROUP_ARN}": Arn,
"{#TARGET_GROUP_PORT}": TargetGroup["Port"],
"{#TARGET_GROUP_VPC_ID}": TargetGroup["VpcId"],
"{#TARGET_GROUP_LOAD_BALANCER_NAME}": LoadBalancersDescrByArn[LoadBalancerArn]['LoadBalancerName'],
"{#TARGET_GROUP_LOAD_BALANCER_DNS_NAME}": LoadBalancersDescrByArn[LoadBalancerArn]['DNSName'],
"{#TARGET_GROUP_LOAD_BALANCER_ARN}": LoadBalancerShortArn,
"{#TARGET_COUNT}": len(target_health['TargetHealthDescriptions']),
}
# Next load balancer in target group
data.append(ldd)
return data
| wawastein/zabbix-cloudwatch | zabbix-scripts/scripts/discovery/elbv2.py | Python | gpl-3.0 | 2,103 |
import os
import mock
from zope.interface import implementer
from twisted.trial import unittest
from twisted.internet import endpoints, defer, reactor
from twisted.internet.endpoints import clientFromString
from twisted.internet.defer import inlineCallbacks
from twisted.internet.interfaces import IStreamClientEndpoint
from twisted.application import service
import txtorcon
from foolscap.api import Tub
from foolscap.info import ConnectionInfo
from foolscap.connection import get_endpoint
from foolscap.connections import tcp, tor, i2p
from foolscap.tokens import NoLocationHintsError
from foolscap.ipb import InvalidHintError
from foolscap.test.common import (certData_low, certData_high, Target,
ShouldFailMixin)
from foolscap import ipb, util
def discard_status(status):
pass
@implementer(IStreamClientEndpoint)
class FakeHostnameEndpoint:
def __init__(self, reactor, host, port):
self.reactor = reactor
self.host = host
self.port = port
class Convert(unittest.TestCase):
def checkTCPEndpoint(self, hint, expected_host, expected_port):
with mock.patch("foolscap.connections.tcp.HostnameEndpoint",
side_effect=FakeHostnameEndpoint):
d = get_endpoint(hint, {"tcp": tcp.default()}, ConnectionInfo())
(ep, host) = self.successResultOf(d)
self.assertTrue(isinstance(ep, FakeHostnameEndpoint), ep)
self.failUnlessIdentical(ep.reactor, reactor)
self.assertEqual(ep.host, expected_host)
self.assertEqual(ep.port, expected_port)
def checkBadTCPEndpoint(self, hint):
d = get_endpoint(hint, {"tcp": tcp.default()}, ConnectionInfo())
self.failureResultOf(d, ipb.InvalidHintError)
def checkUnknownEndpoint(self, hint):
d = get_endpoint(hint, {"tcp": tcp.default()}, ConnectionInfo())
self.failureResultOf(d, ipb.InvalidHintError)
def testConvertLegacyHint(self):
self.assertEqual(tcp.convert_legacy_hint("127.0.0.1:9900"),
"tcp:127.0.0.1:9900")
self.assertEqual(tcp.convert_legacy_hint("tcp:127.0.0.1:9900"),
"tcp:127.0.0.1:9900")
self.assertEqual(tcp.convert_legacy_hint("other:127.0.0.1:9900"),
"other:127.0.0.1:9900")
# this is unfortunate
self.assertEqual(tcp.convert_legacy_hint("unix:1"), "tcp:unix:1")
# so new hints should do one of these:
self.assertEqual(tcp.convert_legacy_hint("tor:host:1234"),
"tor:host:1234") # multiple colons
self.assertEqual(tcp.convert_legacy_hint("unix:fd=1"),
"unix:fd=1") # equals signs, key=value -style
def testTCP(self):
self.checkTCPEndpoint("tcp:127.0.0.1:9900", "127.0.0.1", 9900)
self.checkTCPEndpoint("tcp:hostname:9900", "hostname", 9900)
self.checkBadTCPEndpoint("tcp:hostname:NOTAPORT")
def testLegacyTCP(self):
self.checkTCPEndpoint("127.0.0.1:9900", "127.0.0.1", 9900)
self.checkTCPEndpoint("hostname:9900", "hostname", 9900)
self.checkBadTCPEndpoint("hostname:NOTAPORT")
def testTCP6(self):
self.checkTCPEndpoint("tcp:[2001:0DB8:f00e:eb00::1]:9900",
"2001:0DB8:f00e:eb00::1", 9900)
self.checkBadTCPEndpoint("tcp:[2001:0DB8:f00e:eb00::1]:NOTAPORT")
self.checkBadTCPEndpoint("tcp:2001:0DB8:f00e:eb00::1]:9900")
self.checkBadTCPEndpoint("tcp:[2001:0DB8:f00e:eb00::1:9900")
self.checkBadTCPEndpoint("tcp:2001:0DB8:f00e:eb00::1:9900")
# IPv4-mapped addresses
self.checkTCPEndpoint("tcp:[::FFFF:1.2.3.4]:99", "::FFFF:1.2.3.4", 99)
self.checkBadTCPEndpoint("tcp:[::FFFF:1.2.3]:99")
self.checkBadTCPEndpoint("tcp:[::FFFF:1.2.3.4567]:99")
# local-scoped address with good/bad zone-ids (like "123" or "en0")
self.checkTCPEndpoint("tcp:[FE8::1%123]:9900", "FE8::1%123", 9900)
self.checkTCPEndpoint("tcp:[FE8::1%en1.2]:9900", "FE8::1%en1.2", 9900)
self.checkBadTCPEndpoint("tcp:[FE8::1%%]:9900")
self.checkBadTCPEndpoint("tcp:[FE8::1%$]:9900")
self.checkBadTCPEndpoint("tcp:[FE8::1%]:9900")
self.checkBadTCPEndpoint("tcp:[FE8::1%en0%nomultiple]:9900")
# not both IPv4-mapped and zone-id
self.checkBadTCPEndpoint("tcp:[::FFFF:1.2.3.4%en0]:9900")
def testNoColon(self):
self.checkBadTCPEndpoint("hostname")
def testExtensionsFromFuture(self):
self.checkUnknownEndpoint("udp:127.0.0.1:7700")
self.checkUnknownEndpoint("127.0.0.1:7700:postextension")
@implementer(ipb.IConnectionHintHandler)
class NewHandler:
def __init__(self):
self.asked = 0
self.accepted = 0
def hint_to_endpoint(self, hint, reactor, update_status):
self.asked += 1
if "bad" in hint:
raise ipb.InvalidHintError
self.accepted += 1
pieces = hint.split(":")
new_hint = "tcp:%s:%d" % (pieces[1], int(pieces[2])+0)
ep = tcp.default().hint_to_endpoint(new_hint, reactor, update_status)
if pieces[0] == "slow":
update_status("being slow")
self._d = defer.Deferred()
self._d.addCallback(lambda _: ep)
return self._d
return ep
class ErrorSuffix(unittest.TestCase):
def test_context(self):
statuses = []
with tor.add_context(statuses.append, "context"):
pass
self.assertEqual(statuses, ["context"])
statuses = []
def _try():
with tor.add_context(statuses.append, "context"):
raise ValueError("foo")
e = self.assertRaises(ValueError, _try)
self.assertEqual(statuses, ["context"])
self.assertTrue(hasattr(e, "foolscap_connection_handler_error_suffix"))
self.assertEqual(e.foolscap_connection_handler_error_suffix,
" (while context)")
class Handlers(ShouldFailMixin, unittest.TestCase):
def setUp(self):
self.s = service.MultiService()
self.s.startService()
def tearDown(self):
return self.s.stopService()
def makeTub(self, hint_type):
tubA = Tub(certData=certData_low)
tubA.setServiceParent(self.s)
tubB = Tub(certData=certData_high)
tubB.setServiceParent(self.s)
portnum = util.allocate_tcp_port()
tubA.listenOn("tcp:%d:interface=127.0.0.1" % portnum)
tubA.setLocation("%s:127.0.0.1:%d" % (hint_type, portnum))
furl = tubA.registerReference(Target())
return furl, tubB
def testNoHandlers(self):
furl, tubB = self.makeTub("type2")
tubB.removeAllConnectionHintHandlers()
d = tubB.getReference(furl)
self.failureResultOf(d, NoLocationHintsError)
def testNoSuccessfulHandlers(self):
furl, tubB = self.makeTub("type2")
d = self.shouldFail(NoLocationHintsError, "no handlers", None,
tubB.getReference, furl)
return d
def testExtraHandler(self):
furl, tubB = self.makeTub("type2")
h = NewHandler()
tubB.addConnectionHintHandler("type2", h)
d = tubB.getReference(furl)
def _got(rref):
self.assertEqual(h.asked, 1)
self.assertEqual(h.accepted, 1)
d.addCallback(_got)
return d
def testOnlyHandler(self):
furl, tubB = self.makeTub("type2")
h = NewHandler()
tubB.removeAllConnectionHintHandlers()
tubB.addConnectionHintHandler("type2", h)
d = tubB.getReference(furl)
def _got(rref):
self.assertEqual(h.asked, 1)
self.assertEqual(h.accepted, 1)
d.addCallback(_got)
return d
def testOrdering(self):
furl, tubB = self.makeTub("type2")
h1 = NewHandler()
h2 = NewHandler()
tubB.removeAllConnectionHintHandlers()
tubB.addConnectionHintHandler("type2", h1) # replaced by h2
tubB.addConnectionHintHandler("type2", h2)
d = tubB.getReference(furl)
def _got(rref):
self.assertEqual(h1.asked, 0)
self.assertEqual(h1.accepted, 0)
self.assertEqual(h2.asked, 1)
self.assertEqual(h2.accepted, 1)
d.addCallback(_got)
return d
def testUnhelpfulHandlers(self):
furl, tubB = self.makeTub("type2")
h1 = NewHandler()
h2 = NewHandler()
tubB.removeAllConnectionHintHandlers()
tubB.addConnectionHintHandler("type1", h1) # this is ignored
tubB.addConnectionHintHandler("type2", h2) # this handles it
d = tubB.getReference(furl)
def _got(rref):
self.assertEqual(h1.asked, 0)
self.assertEqual(h1.accepted, 0)
self.assertEqual(h2.asked, 1)
self.assertEqual(h2.accepted, 1)
d.addCallback(_got)
return d
def testDeferredHandler(self):
furl, tubB = self.makeTub("slow")
h = NewHandler()
tubB.removeAllConnectionHintHandlers()
tubB.addConnectionHintHandler("slow", h)
d = tubB.getReference(furl)
self.assertNoResult(d)
h._d.callback(None)
def _got(rref):
self.assertEqual(h.asked, 1)
self.assertEqual(h.accepted, 1)
d.addCallback(_got)
return d
class Empty:
pass
class Tor(unittest.TestCase):
@inlineCallbacks
def test_default_socks(self):
with mock.patch("foolscap.connections.tor.txtorcon.TorClientEndpoint"
) as tce:
tce.return_value = expected_ep = object()
h = tor.default_socks()
res = yield h.hint_to_endpoint("tcp:example.com:1234", reactor,
discard_status)
self.assertEqual(tce.mock_calls,
[mock.call("example.com", 1234,
socks_endpoint=None)])
ep, host = res
self.assertIdentical(ep, expected_ep)
self.assertEqual(host, "example.com")
@inlineCallbacks
def test_default_socks_real(self):
h = tor.default_socks()
res = yield h.hint_to_endpoint("tcp:example.com:1234", reactor,
discard_status)
ep, host = res
self.assertIsInstance(ep, txtorcon.endpoints.TorClientEndpoint)
self.assertEqual(host, "example.com")
self.assertEqual(h.describe(), "tor")
def test_badaddr(self):
isnon = tor.is_non_public_numeric_address
self.assertTrue(isnon("10.0.0.1"))
self.assertTrue(isnon("127.0.0.1"))
self.assertTrue(isnon("192.168.78.254"))
self.assertTrue(isnon("::1"))
self.assertFalse(isnon("8.8.8.8"))
self.assertFalse(isnon("example.org"))
@inlineCallbacks
def test_default_socks_badaddr(self):
h = tor.default_socks()
d = h.hint_to_endpoint("tcp:10.0.0.1:1234", reactor, discard_status)
f = yield self.assertFailure(d, InvalidHintError)
self.assertEqual(str(f), "ignoring non-Tor-able ipaddr 10.0.0.1")
d = h.hint_to_endpoint("tcp:127.0.0.1:1234", reactor, discard_status)
f = yield self.assertFailure(d, InvalidHintError)
self.assertEqual(str(f), "ignoring non-Tor-able ipaddr 127.0.0.1")
d = h.hint_to_endpoint("tcp:not@a@hint:123", reactor, discard_status)
f = yield self.assertFailure(d, InvalidHintError)
self.assertEqual(str(f), "unrecognized TCP/Tor hint")
@inlineCallbacks
def test_socks_endpoint(self):
tor_socks_endpoint = clientFromString(reactor, "tcp:socks_host:100")
with mock.patch("foolscap.connections.tor.txtorcon.TorClientEndpoint"
) as tce:
tce.return_value = expected_ep = object()
h = tor.socks_endpoint(tor_socks_endpoint)
res = yield h.hint_to_endpoint("tcp:example.com:1234", reactor,
discard_status)
self.assertEqual(tce.mock_calls,
[mock.call("example.com", 1234,
socks_endpoint=tor_socks_endpoint)])
ep, host = res
self.assertIs(ep, expected_ep)
self.assertEqual(host, "example.com")
@inlineCallbacks
def test_socks_endpoint_real(self):
tor_socks_endpoint = clientFromString(reactor, "tcp:socks_host:100")
h = tor.socks_endpoint(tor_socks_endpoint)
res = yield h.hint_to_endpoint("tcp:example.com:1234", reactor,
discard_status)
ep, host = res
self.assertIsInstance(ep, txtorcon.endpoints.TorClientEndpoint)
self.assertEqual(host, "example.com")
@inlineCallbacks
def test_launch(self):
tpp = Empty()
tpp.tor_protocol = None
h = tor.launch()
fake_reactor = object()
with mock.patch("txtorcon.launch_tor", return_value=tpp) as lt:
res = yield h.hint_to_endpoint("tor:foo.onion:29212", fake_reactor,
discard_status)
self.assertEqual(len(lt.mock_calls), 1)
args,kwargs = lt.mock_calls[0][1:]
self.assertIdentical(args[0], h.config)
self.assertIdentical(args[1], fake_reactor)
self.assertEqual(kwargs, {"tor_binary": None})
ep, host = res
self.assertIsInstance(ep, txtorcon.endpoints.TorClientEndpoint)
self.assertEqual(host, "foo.onion")
# launch_tor will allocate a local TCP port for SOCKS
self.assertTrue(h._socks_desc.startswith("tcp:127.0.0.1:"), h._socks_desc)
@inlineCallbacks
def test_launch_tor_binary(self):
tpp = Empty()
tpp.tor_protocol = None
h = tor.launch(tor_binary="/bin/tor")
fake_reactor = object()
with mock.patch("txtorcon.launch_tor", return_value=tpp) as lt:
res = yield h.hint_to_endpoint("tor:foo.onion:29212", fake_reactor,
discard_status)
self.assertEqual(len(lt.mock_calls), 1)
args,kwargs = lt.mock_calls[0][1:]
self.assertIdentical(args[0], h.config)
self.assertIdentical(args[1], fake_reactor)
self.assertEqual(kwargs, {"tor_binary": "/bin/tor"})
ep, host = res
self.assertIsInstance(ep, txtorcon.endpoints.TorClientEndpoint)
self.assertEqual(host, "foo.onion")
self.assertTrue(h._socks_desc.startswith("tcp:127.0.0.1:"), h._socks_desc)
@inlineCallbacks
def test_launch_data_directory(self):
datadir = self.mktemp()
tpp = Empty()
tpp.tor_protocol = None
h = tor.launch(data_directory=datadir)
fake_reactor = object()
with mock.patch("txtorcon.launch_tor", return_value=tpp) as lt:
res = yield h.hint_to_endpoint("tor:foo.onion:29212", fake_reactor,
discard_status)
self.assertEqual(len(lt.mock_calls), 1)
args,kwargs = lt.mock_calls[0][1:]
self.assertIdentical(args[0], h.config)
self.assertIdentical(args[1], fake_reactor)
self.assertEqual(kwargs, {"tor_binary": None})
self.assertEqual(h.config.DataDirectory, datadir)
ep, host = res
self.assertIsInstance(ep, txtorcon.endpoints.TorClientEndpoint)
self.assertEqual(host, "foo.onion")
self.assertTrue(h._socks_desc.startswith("tcp:127.0.0.1:"), h._socks_desc)
@inlineCallbacks
def test_launch_data_directory_exists(self):
datadir = self.mktemp()
os.mkdir(datadir)
tpp = Empty()
tpp.tor_protocol = None
h = tor.launch(data_directory=datadir)
fake_reactor = object()
with mock.patch("txtorcon.launch_tor", return_value=tpp) as lt:
res = yield h.hint_to_endpoint("tor:foo.onion:29212", fake_reactor,
discard_status)
self.assertEqual(len(lt.mock_calls), 1)
args,kwargs = lt.mock_calls[0][1:]
self.assertIdentical(args[0], h.config)
self.assertIdentical(args[1], fake_reactor)
self.assertEqual(kwargs, {"tor_binary": None})
self.assertEqual(h.config.DataDirectory, datadir)
ep, host = res
self.assertIsInstance(ep, txtorcon.endpoints.TorClientEndpoint)
self.assertEqual(host, "foo.onion")
self.assertTrue(h._socks_desc.startswith("tcp:127.0.0.1:"), h._socks_desc)
@inlineCallbacks
def test_control_endpoint(self):
control_ep = FakeHostnameEndpoint(reactor, "localhost", 9051)
h = tor.control_endpoint(control_ep)
# We don't actually care about the generated endpoint, just the state
# that the handler builds up internally. But we need to provoke a
# connection to build that state, and we need to prevent the handler
# from actually talking to a Tor daemon (which probably doesn't exist
# on this host).
config = Empty()
config.SocksPort = ["1234"]
with mock.patch("txtorcon.build_tor_connection",
return_value=None):
with mock.patch("txtorcon.TorConfig.from_protocol",
return_value=config):
res = yield h.hint_to_endpoint("tor:foo.onion:29212", reactor,
discard_status)
ep, host = res
self.assertIsInstance(ep, txtorcon.endpoints.TorClientEndpoint)
self.assertEqual(host, "foo.onion")
self.assertEqual(h._socks_desc, "tcp:127.0.0.1:1234")
@inlineCallbacks
def test_control_endpoint_default(self):
control_ep = FakeHostnameEndpoint(reactor, "localhost", 9051)
h = tor.control_endpoint(control_ep)
config = Empty()
config.SocksPort = [txtorcon.DEFAULT_VALUE]
with mock.patch("txtorcon.build_tor_connection",
return_value=None):
with mock.patch("txtorcon.TorConfig.from_protocol",
return_value=config):
res = yield h.hint_to_endpoint("tor:foo.onion:29212", reactor,
discard_status)
ep, host = res
self.assertIsInstance(ep, txtorcon.endpoints.TorClientEndpoint)
self.assertEqual(host, "foo.onion")
self.assertEqual(h._socks_desc, "tcp:127.0.0.1:9050")
@inlineCallbacks
def test_control_endpoint_non_numeric(self):
control_ep = FakeHostnameEndpoint(reactor, "localhost", 9051)
h = tor.control_endpoint(control_ep)
config = Empty()
config.SocksPort = ["unix:var/run/tor/socks WorldWritable", "1234"]
with mock.patch("txtorcon.build_tor_connection",
return_value=None):
with mock.patch("txtorcon.TorConfig.from_protocol",
return_value=config):
res = yield h.hint_to_endpoint("tor:foo.onion:29212", reactor,
discard_status)
ep, host = res
self.assertIsInstance(ep, txtorcon.endpoints.TorClientEndpoint)
self.assertEqual(host, "foo.onion")
self.assertEqual(h._socks_desc, "tcp:127.0.0.1:1234")
@inlineCallbacks
def test_control_endpoint_nested_list(self):
control_ep = FakeHostnameEndpoint(reactor, "localhost", 9051)
h = tor.control_endpoint(control_ep)
config = Empty()
config.SocksPort = [["unix:var/run/tor/socks WorldWritable", "1234"]]
with mock.patch("txtorcon.build_tor_connection",
return_value=None):
with mock.patch("txtorcon.TorConfig.from_protocol",
return_value=config):
res = yield h.hint_to_endpoint("tor:foo.onion:29212", reactor,
discard_status)
ep, host = res
self.assertIsInstance(ep, txtorcon.endpoints.TorClientEndpoint)
self.assertEqual(host, "foo.onion")
self.assertEqual(h._socks_desc, "tcp:127.0.0.1:1234")
@inlineCallbacks
def test_control_endpoint_no_port(self):
control_ep = FakeHostnameEndpoint(reactor, "localhost", 9051)
h = tor.control_endpoint(control_ep)
config = Empty()
config.SocksPort = ["unparseable"]
with mock.patch("txtorcon.build_tor_connection",
return_value=None):
with mock.patch("txtorcon.TorConfig.from_protocol",
return_value=config):
d = h.hint_to_endpoint("tor:foo.onion:29212", reactor,
discard_status)
f = yield self.assertFailure(d, ValueError)
self.assertIn("could not use config.SocksPort", str(f))
def test_control_endpoint_maker_immediate(self):
return self.do_test_control_endpoint_maker(False)
def test_control_endpoint_maker_deferred(self):
return self.do_test_control_endpoint_maker(True)
def test_control_endpoint_maker_nostatus(self):
return self.do_test_control_endpoint_maker(True, takes_status=False)
@inlineCallbacks
def do_test_control_endpoint_maker(self, use_deferred, takes_status=True):
control_ep = FakeHostnameEndpoint(reactor, "localhost", 9051)
results = []
def make(arg):
results.append(arg)
if use_deferred:
return defer.succeed(control_ep)
else:
return control_ep # immediate
def make_takes_status(arg, update_status):
return make(arg)
if takes_status:
h = tor.control_endpoint_maker(make_takes_status, takes_status=True)
else:
h = tor.control_endpoint_maker(make, takes_status=False)
self.assertEqual(results, []) # not called yet
# We don't actually care about the generated endpoint, just the state
# that the handler builds up internally. But we need to provoke a
# connection to build that state, and we need to prevent the handler
# from actually talking to a Tor daemon (which probably doesn't exist
# on this host).
config = Empty()
config.SocksPort = ["1234"]
with mock.patch("txtorcon.build_tor_connection",
return_value=None):
with mock.patch("txtorcon.TorConfig.from_protocol",
return_value=config):
res = yield h.hint_to_endpoint("tor:foo.onion:29212", reactor,
discard_status)
self.assertEqual(results, [reactor]) # called once
ep, host = res
self.assertIsInstance(ep, txtorcon.endpoints.TorClientEndpoint)
self.assertEqual(host, "foo.onion")
self.assertEqual(h._socks_desc, "tcp:127.0.0.1:1234")
res = yield h.hint_to_endpoint("tor:foo.onion:29213", reactor,
discard_status)
self.assertEqual(results, [reactor]) # still only called once
ep, host = res
self.assertIsInstance(ep, txtorcon.endpoints.TorClientEndpoint)
self.assertEqual(host, "foo.onion")
self.assertEqual(h._socks_desc, "tcp:127.0.0.1:1234")
class I2P(unittest.TestCase):
@inlineCallbacks
def test_default(self):
with mock.patch("foolscap.connections.i2p.SAMI2PStreamClientEndpoint") as sep:
sep.new = n = mock.Mock()
n.return_value = expected_ep = object()
h = i2p.default(reactor, misc_kwarg="foo")
res = yield h.hint_to_endpoint("i2p:fppym.b32.i2p", reactor,
discard_status)
self.assertEqual(len(n.mock_calls), 1)
args = n.mock_calls[0][1]
got_sep, got_host, got_portnum = args
self.assertIsInstance(got_sep, endpoints.TCP4ClientEndpoint)
self.failUnlessEqual(got_sep._host, "127.0.0.1") # fragile
self.failUnlessEqual(got_sep._port, 7656)
self.failUnlessEqual(got_host, "fppym.b32.i2p")
self.failUnlessEqual(got_portnum, None)
kwargs = n.mock_calls[0][2]
self.failUnlessEqual(kwargs, {"misc_kwarg": "foo"})
ep, host = res
self.assertIdentical(ep, expected_ep)
self.assertEqual(host, "fppym.b32.i2p")
self.assertEqual(h.describe(), "i2p")
@inlineCallbacks
def test_default_with_portnum(self):
# I2P addresses generally don't use port numbers, but the parser is
# supposed to handle them
with mock.patch("foolscap.connections.i2p.SAMI2PStreamClientEndpoint") as sep:
sep.new = n = mock.Mock()
n.return_value = expected_ep = object()
h = i2p.default(reactor)
res = yield h.hint_to_endpoint("i2p:fppym.b32.i2p:1234", reactor,
discard_status)
self.assertEqual(len(n.mock_calls), 1)
args = n.mock_calls[0][1]
got_sep, got_host, got_portnum = args
self.assertIsInstance(got_sep, endpoints.TCP4ClientEndpoint)
self.failUnlessEqual(got_sep._host, "127.0.0.1") # fragile
self.failUnlessEqual(got_sep._port, 7656)
self.failUnlessEqual(got_host, "fppym.b32.i2p")
self.failUnlessEqual(got_portnum, 1234)
ep, host = res
self.assertIdentical(ep, expected_ep)
self.assertEqual(host, "fppym.b32.i2p")
@inlineCallbacks
def test_default_with_portnum_kwarg(self):
# setting extra kwargs on the handler should provide a default for
# the portnum. sequential calls with/without portnums in the hints
# should get the right values.
h = i2p.default(reactor, port=1234)
with mock.patch("foolscap.connections.i2p.SAMI2PStreamClientEndpoint") as sep:
sep.new = n = mock.Mock()
yield h.hint_to_endpoint("i2p:fppym.b32.i2p", reactor,
discard_status)
got_portnum = n.mock_calls[0][1][2]
self.failUnlessEqual(got_portnum, 1234)
with mock.patch("foolscap.connections.i2p.SAMI2PStreamClientEndpoint") as sep:
sep.new = n = mock.Mock()
yield h.hint_to_endpoint("i2p:fppym.b32.i2p:3456", reactor,
discard_status)
got_portnum = n.mock_calls[0][1][2]
self.failUnlessEqual(got_portnum, 3456)
with mock.patch("foolscap.connections.i2p.SAMI2PStreamClientEndpoint") as sep:
sep.new = n = mock.Mock()
yield h.hint_to_endpoint("i2p:fppym.b32.i2p", reactor,
discard_status)
got_portnum = n.mock_calls[0][1][2]
self.failUnlessEqual(got_portnum, 1234)
def test_default_badhint(self):
h = i2p.default(reactor)
d = defer.maybeDeferred(h.hint_to_endpoint, "i2p:not@a@hint", reactor,
discard_status)
f = self.failureResultOf(d, InvalidHintError)
self.assertEqual(str(f.value), "unrecognized I2P hint")
@inlineCallbacks
def test_sam_endpoint(self):
with mock.patch("foolscap.connections.i2p.SAMI2PStreamClientEndpoint") as sep:
sep.new = n = mock.Mock()
n.return_value = expected_ep = object()
my_ep = FakeHostnameEndpoint(reactor, "localhost", 1234)
h = i2p.sam_endpoint(my_ep, misc_kwarg="foo")
res = yield h.hint_to_endpoint("i2p:fppym.b32.i2p", reactor,
discard_status)
self.assertEqual(len(n.mock_calls), 1)
args = n.mock_calls[0][1]
got_sep, got_host, got_portnum = args
self.assertIdentical(got_sep, my_ep)
self.failUnlessEqual(got_host, "fppym.b32.i2p")
self.failUnlessEqual(got_portnum, None)
kwargs = n.mock_calls[0][2]
self.failUnlessEqual(kwargs, {"misc_kwarg": "foo"})
ep, host = res
self.assertIdentical(ep, expected_ep)
self.assertEqual(host, "fppym.b32.i2p")
| warner/foolscap | src/foolscap/test/test_connection.py | Python | mit | 28,403 |
# Obtained from: http://www.djangosnippets.org/snippets/133/
# Author: http://www.djangosnippets.org/users/SmileyChris/
from django.template import loader, Context, RequestContext, TemplateSyntaxError
from django.http import HttpResponse
def render_response(template_prefix=None, always_use_requestcontext=True):
"""
Create a decorator which can be used as a shortcut to render templates to
an HttpResponse.
The decorated function must return either:
* an HttpResponse object,
* a string containing the template name (if doesn't start with '/' then
will be combined with the template_prefix) or
* a tuple comprising of:
* a string or tuple containing the template name(s),
* a dictionary to add to the Context or RequestContext and
* (optionally) a list of context processors (if given, forces use of
RequestContext).
Example usage (in a views module)::
from projectname.renderer import render_response
render_response = render_response('app_name/') # Template dir.
@render_response
app_view(request):
...
return 'app_view_template.htm', dict(object=object)
"""
def renderer(func):
def _dec(request, *args, **kwargs):
response = func(request, *args, **kwargs)
if isinstance(response, HttpResponse):
return response
elif isinstance(response, basestring):
template_name = response
namespace = {}
context_processors = None
elif isinstance(response, (tuple, list)):
len_tuple = len(response)
if len_tuple == 2:
template_name, namespace = response
context_processors = None
elif len_tuple == 3:
template_name, namespace, context_processors = response
else:
raise TemplateSyntaxError, '%s.%s function did not return a parsable tuple' % (func.__module__, func.__name__)
else:
raise TemplateSyntaxError, '%s.%s function did not provide a template name or HttpResponse object' % (func.__module__, func.__name__)
if always_use_requestcontext or context_processors is not None:
context = RequestContext(request, namespace, context_processors)
else:
context = Context(namespace)
if template_prefix:
if isinstance(template_name, (list, tuple)):
template_name = map(correct_path, template_name)
else:
template_name = correct_path(template_name)
return HttpResponse(loader.render_to_string(template_name, context_instance=context))
return _dec
def correct_path(template_name):
if template_name.startswith('/'):
return template_name[1:]
return '%s%s' % (template_prefix, template_name)
return renderer
| DarwinSalaz/django-profile | userprofile/utils/decorators.py | Python | bsd-2-clause | 3,031 |
"""Factory for creating devices."""
import logging
import math
import time
from google.appengine.api import namespace_manager
from google.appengine.ext import ndb
import flask
from appengine import account, model, pushrpc, rest
from common import detector
DEVICE_TYPES = {}
def static_command(func):
"""Device command decorator - automatically dispatches methods."""
setattr(func, 'is_command', True)
setattr(func, 'is_static', True)
return func
def register(device_type):
"""Decorator to cause device types to be registered."""
def class_rebuilder(cls):
DEVICE_TYPES[device_type] = cls
return cls
return class_rebuilder
def create_device(device_id, body, device_type=None):
"""Factory for creating new devices."""
if device_type is None:
device_type = body.pop('type', None)
if device_type is None:
flask.abort(400, '\'type\' field expected in body.')
constructor = DEVICE_TYPES.get(device_type, None)
if constructor is None:
logging.error('No device type \'%s\'', device_type)
flask.abort(400)
return constructor(id=device_id)
class Device(model.Base):
"""Base class for all device drivers."""
# This is the name the user sets
name = ndb.StringProperty(required=False)
# This is the (optional) name read from the device itself
device_name = ndb.StringProperty(required=False)
last_update = ndb.DateTimeProperty(required=False, auto_now=True)
room = ndb.StringProperty()
# What can I do with this device? ie SWITCH, DIMMABLE, COLOR_TEMP etc
capabilities = ndb.ComputedProperty(lambda self: self.get_capabilities(),
repeated=True)
# What broad category does this device belong to? LIGHTING, CLIMATE, MUSIC
categories = ndb.ComputedProperty(lambda self: self.get_categories(),
repeated=True)
# Does this device belong to an account?
account = ndb.StringProperty()
def get_capabilities(self):
return []
def get_categories(self):
return []
@classmethod
def _event_classname(cls):
return 'device'
def handle_event(self, event):
pass
@classmethod
def handle_static_event(cls, event):
pass
@classmethod
def get_by_capability(cls, capability):
return cls.query(Device.capabilities == capability)
def find_room(self):
"""Resolve the room for this device. May return null."""
if not self.room:
return None
# This is a horrible hack, but room imports devices,
# so need to be lazy here.
from appengine import room
room_obj = room.Room.get_by_id(self.room)
if not room_obj:
return None
return room_obj
def find_account(self):
"""Resolve the account for this device. May return null."""
if not self.account:
return None
# This is a horrible hack, but room imports devices,
# so need to be lazy here.
account_obj = account.Account.get_by_id(self.account)
if not account_obj:
return None
return account_obj
class DetectorMixin(object):
"""Add a failure detector to a device to interpret motion sensor data."""
detector = ndb.JsonProperty()
# These fields represent the real state of this sensor
# and when the real state last changes
occupied = ndb.BooleanProperty(default=False)
occupied_last_update = ndb.IntegerProperty()
# These fields represent the inferred state of this sensor
# after processing by the failure detector, and the time
# when this inferred state last changed.
inferred_state = ndb.BooleanProperty(default=False)
inferred_last_update = ndb.IntegerProperty(default=0)
#def to_dict(self):
# """We don't need to expose the detector in the dict repr."""
# # Mainly as its too big for pusher
# result = super(DetectorMixin, self).to_dict()
# del result['detector']
# return result
def _load_detector(self):
"""Either return the a rehydrated detector, or a fresh one."""
if self.detector is None:
return detector.AccrualFailureDetector()
else:
return detector.AccrualFailureDetector.from_dict(self.detector)
def is_occupied(self):
"""Use a failure detector to determine state of sensor"""
# Does the sensor say we're occupied?
if self.occupied:
return True
# If not, does the detector?
instance = self._load_detector()
inferred_state = instance.is_alive()
if self.inferred_state != inferred_state:
self.inferred_state = inferred_state
self.inferred_last_update = int(time.time())
self.put()
return inferred_state
def real_occupied_state_change(self, state):
"""The underly state changed; synthensize events and save the detector."""
now = int(time.time())
logging.info('real_occupied_state_change %s, state=%s', self, state)
# I'm getting dupe events; ignore until
# I figure out why.
if state == self.occupied:
return
instance = self._load_detector()
# Construct an appropriate set of fake heartbeats and
# feed them to the detector.
# As we don't get heart beats from the motion sensors,
# we just fake them. We only do this if the sensor
# is transitions from occupied -> not occupied. We
# don't need to do it for the other way as we just use
# the real state.
if self.occupied and self.occupied_last_update is not None:
assert state == False
# We know the first hit was at self.occupied_last_update.
# We know the sensor can't have recieved a hit in the past
# timeout seconds, so the last hit was now - timeout ago
# Otherwise, we're going to put a bunch of hits inbetween
# those times
timeout = 240
start = self.occupied_last_update
end = now - timeout
if start > end:
logging.error("This shouldn't happen; start = %s < end = %s",
start, end)
end = start + 1
diff = end - start
count = math.ceil(diff * 1.0 / timeout)
if count > 0:
inc = diff / count
for i in xrange(int(count)):
instance.heartbeat(start + int(i * inc))
# save the detector and other fields
# no need to put this object, plumbing in device.py
# will do that for us.
self.detector = instance.to_dict()
self.occupied = state
self.occupied_last_update = now
room = self.find_room()
if room:
room.update_lights()
class Switch(Device):
"""A switch."""
# Represents the actual state of the switch; changing this
# (and calling update()) will changed the switch.
state = ndb.BooleanProperty(default=False)
# Represents the state the user wants, and when they asked for
# it. Most of the time users will control rooms etc, not individual
# lights. But its possible.
# UI should set this and call update_lights on the room.
intended_state = ndb.BooleanProperty()
state_last_update = ndb.IntegerProperty(default=0)
def get_capabilities(self):
return ['SWITCH']
def get_categories(self):
return ['LIGHTING']
# pylint: disable=invalid-name
blueprint = flask.Blueprint('device', __name__)
rest.register_class(blueprint, Device, create_device)
def process_events(events):
"""Process a set of events."""
device_cache = {}
for event in events:
device_type = event['device_type']
device_id = event['device_id']
event_body = event['event']
if device_id is None:
DEVICE_TYPES[device_type].handle_static_event(event_body)
continue
if device_id in device_cache:
device = device_cache[device_id]
else:
device = Device.get_by_id(device_id)
if not device:
device = create_device(device_id, None,
device_type=device_type)
device_cache[device_id] = device
device.handle_event(event_body)
ndb.put_multi(device_cache.values())
@blueprint.route('/events', methods=['POST'])
def handle_events():
"""Handle events from devices."""
# This endpoint needs to authenticate itself.
proxy = pushrpc.authenticate()
if proxy is None:
flask.abort(401)
# If proxy hasn't been claimed, not much we can do.
if proxy.building_id is None:
logging.info('Dropping events as this proxy is not claimed')
return ('', 204)
# We need to set namespace - not done by main.py
namespace_manager.set_namespace(proxy.building_id)
events = flask.request.get_json()
logging.info('Processing %d events', len(events))
process_events(events)
return ('', 204)
| tomwilkie/awesomation | src/appengine/device.py | Python | mit | 8,484 |
from .main import encode | ikvk/pdf417as_str | pdf417as_str/__init__.py | Python | lgpl-3.0 | 24 |
import lxml.html
import re
import requests
from utils import State
from .people import AZPersonScraper
from .bills import AZBillScraper
# from .committees import AZCommitteeScraper
# from .events import AZEventScraper
class Arizona(State):
scrapers = {
"people": AZPersonScraper,
# 'committees': AZCommitteeScraper,
# 'events': AZEventScraper,
"bills": AZBillScraper,
}
legislative_sessions = [
{
"_scraped_name": "2009 - Forty-ninth Legislature - First Regular Session",
"classification": "primary",
"end_date": "2009-07-01",
"identifier": "49th-1st-regular",
"name": "49th Legislature, 1st Regular Session (2009)",
"start_date": "2009-01-12",
},
{
"_scraped_name": "2009 - Forty-ninth Legislature - First Special Session",
"classification": "special",
"end_date": "2009-01-31",
"identifier": "49th-1st-special",
"name": "49th Legislature, 1st Special Session (2009)",
"start_date": "2009-01-28",
},
{
"_scraped_name": "2010 - Forty-ninth Legislature - Second Regular Session",
"classification": "primary",
"end_date": "2010-04-29",
"identifier": "49th-2nd-regular",
"name": "49th Legislature, 2nd Regular Session (2010)",
"start_date": "2010-01-11",
},
{
"_scraped_name": "2009 - Forty-ninth Legislature - Second Special Session",
"classification": "special",
"end_date": "2009-05-27",
"identifier": "49th-2nd-special",
"name": "49th Legislature, 2nd Special Session (2009)",
"start_date": "2009-05-21",
},
{
"_scraped_name": "2009 - Forty-ninth Legislature - Third Special Session",
"classification": "special",
"end_date": "2009-08-25",
"identifier": "49th-3rd-special",
"name": "49th Legislature, 3rd Special Session (2009)",
"start_date": "2009-07-06",
},
{
"_scraped_name": "2009 - Forty-ninth Legislature - Fourth Special Session",
"classification": "special",
"end_date": "2009-11-23",
"identifier": "49th-4th-special",
"name": "49th Legislature, 4th Special Session (2009)",
"start_date": "2009-11-17",
},
{
"_scraped_name": "2009 - Forty-ninth Legislature - Fifth Special Session",
"classification": "special",
"end_date": "2009-12-19",
"identifier": "49th-5th-special",
"name": "49th Legislature, 5th Special Session (2009)",
"start_date": "2009-12-17",
},
{
"_scraped_name": "2010 - Forty-ninth Legislature - Sixth Special Session",
"classification": "special",
"end_date": "2010-02-11",
"identifier": "49th-6th-special",
"name": "49th Legislature, 6th Special Session (2010)",
"start_date": "2010-02-01",
},
{
"_scraped_name": "2010 - Forty-ninth Legislature - Seventh Special Session",
"classification": "special",
"end_date": "2010-03-16",
"identifier": "49th-7th-special",
"name": "49th Legislature, 7th Special Session (2010)",
"start_date": "2010-03-08",
},
{
"_scraped_name": "2010 - Forty-ninth Legislature - Eighth Special Session",
"classification": "special",
"end_date": "2010-04-01",
"identifier": "49th-8th-special",
"name": "49th Legislature, 8th Special Session (2010)",
"start_date": "2010-03-29",
},
{
"_scraped_name": "2010 - Forty-ninth Legislature - Ninth Special Session",
"classification": "special",
"end_date": "2010-08-11",
"identifier": "49th-9th-special",
"name": "49th Legislature, 9th Special Session (2010)",
"start_date": "2010-08-09",
},
{
"_scraped_name": "2011 - Fiftieth Legislature - First Regular Session",
"classification": "primary",
"end_date": "2011-04-20",
"identifier": "50th-1st-regular",
"name": "50th Legislature, 1st Regular Session (2011)",
"start_date": "2011-01-10",
},
{
"_scraped_name": "2011 - Fiftieth Legislature - First Special Session",
"classification": "special",
"end_date": "2011-01-20",
"identifier": "50th-1st-special",
"name": "50th Legislature, 1st Special Session (2011)",
"start_date": "2011-01-19",
},
{
"_scraped_name": "2012 - Fiftieth Legislature - Second Regular Session",
"classification": "primary",
"identifier": "50th-2nd-regular",
"name": "50th Legislature, 2nd Regular Session (2012)",
"start_date": "2012-01-09",
"end_date": "2012-05-03",
},
{
"_scraped_name": "2011 - Fiftieth Legislature - Second Special Session",
"classification": "special",
"end_date": "2011-02-16",
"identifier": "50th-2nd-special",
"name": "50th Legislature, 2nd Special Session (2011)",
"start_date": "2011-02-14",
},
{
"_scraped_name": "2011 - Fiftieth Legislature - Third Special Session",
"classification": "special",
"end_date": "2011-06-13",
"identifier": "50th-3rd-special",
"name": "50th Legislature, 3rd Special Session (2011)",
"start_date": "2011-06-10",
},
{
"_scraped_name": "2011 - Fiftieth Legislature - Fourth Special Session",
"classification": "special",
"end_date": "2011-11-01",
"identifier": "50th-4th-special",
"name": "50th Legislature, 4th Special Session (2011)",
"start_date": "2011-11-01",
},
{
"_scraped_name": "2013 - Fifty-first Legislature - First Regular Session",
"classification": "primary",
"identifier": "51st-1st-regular",
"name": "51st Legislature - 1st Regular Session (2013)",
"start_date": "2013-01-14",
"end_date": "2013-06-14",
},
{
"_scraped_name": "2013 - Fifty-first Legislature - First Special Session",
"classification": "primary",
"identifier": "51st-1st-special",
"name": "51st Legislature - 1st Special Session (2013)",
"start_date": "2013-06-11",
"end_date": "2013-06-14",
},
{
"_scraped_name": "2014 - Fifty-first Legislature - Second Regular Session",
"classification": "primary",
"identifier": "51st-2nd-regular",
"name": "51st Legislature - 2nd Regular Session",
"start_date": "2014-01-13",
"end_date": "2014-04-24",
},
{
"_scraped_name": "2014 - Fifty-first Legislature - Second Special Session",
"classification": "special",
"identifier": "51st-2nd-special",
"name": "51st Legislature - 2nd Special Session",
"start_date": "2014-05-27",
"end_date": "2014-05-29",
},
{
"_scraped_name": "2015 - Fifty-second Legislature - First Regular Session",
"classification": "primary",
"identifier": "52nd-1st-regular",
"name": "52nd Legislature - 1st Regular Session",
"start_date": "2015-01-12",
"end_date": "2015-04-02",
},
{
"_scraped_name": "2015 - Fifty-second Legislature - First Special Session",
"classification": "special",
"identifier": "52nd-1st-special",
"name": "52nd Legislature - 1st Special Session",
"start_date": "2015-10-28",
"end_date": "2015-10-30",
},
{
"_scraped_name": "2016 - Fifty-second Legislature - Second Regular Session",
"classification": "primary",
"identifier": "52nd-2nd-regular",
"name": "52nd Legislature - 2nd Regular Session",
"start_date": "2016-01-11",
"end_date": "2016-05-07",
},
{
"_scraped_name": "2017 - Fifty-third Legislature - First Regular Session",
"classification": "primary",
"end_date": "2017-05-03",
"identifier": "53rd-1st-regular",
"name": "53rd Legislature - 1st Regular Session",
"start_date": "2017-01-09",
},
{
"_scraped_name": "2018 - Fifty-third Legislature - First Special Session",
"classification": "special",
"identifier": "53rd-1st-special",
"name": "53rd Legislature - 1st Special Session",
"start_date": "2018-01-22",
"end_date": "2018-01-26",
},
{
"_scraped_name": "2018 - Fifty-third Legislature - Second Regular Session",
"classification": "primary",
"identifier": "53rd-2nd-regular",
"name": "53rd Legislature - 2nd Regular Session",
"start_date": "2018-01-08",
"end_date": "2018-05-03",
},
{
"_scraped_name": "2019 - Fifty-fourth Legislature - First Regular Session",
"classification": "primary",
"identifier": "54th-1st-regular",
"name": "54th Legislature - 1st Regular Session",
"start_date": "2019-01-14",
"end_date": "2019-03-29",
},
{
"_scraped_name": "2020 - Fifty-fourth Legislature - Second Regular Session",
"classification": "primary",
"identifier": "54th-2nd-regular",
"name": "54th Legislature - 2nd Regular Session",
"start_date": "2020-01-13",
"end_date": "2020-05-27",
},
{
"_scraped_name": "2021 - Fifty-fifth Legislature - First Regular Session",
"classification": "primary",
"identifier": "55th-1st-regular",
"name": "55th Legislature - 1st Regular Session",
"start_date": "2020-01-11",
"end_date": "2020-04-23",
},
]
ignored_scraped_sessions = [
"2008 - Forty-eighth Legislature - Second Regular Session",
"2007 - Forty-eighth Legislature - First Regular Session",
"2006 - Forty-seventh Legislature - First Special Session",
"2006 - Forty-seventh Legislature - Second Regular Session",
"2005 - Forty-seventh Legislature - First Regular Session",
"2004 - Forty-sixth Legislature - Second Regular Session",
"2003 - Forty-sixth Legislature - Second Special Session",
"2003 - Forty-sixth Legislature - First Special Session",
"2003 - Forty-sixth Legislature - First Regular Session",
"2002 - Forty-fifth Legislature - Sixth Special Session",
"2002 - Forty-fifth Legislature - Fifth Special Session",
"2002 - Forty-fifth Legislature - Fourth Special Session",
"2002 - Forty-fifth Legislature - Third Special Session",
"2002 - Forty-fifth Legislature - Second Regular Session",
"2001 - Forty-fifth Legislature - Second Special Session",
"2001 - Forty-fifth Legislature - First Special Session",
"2001 - Forty-fifth Legislature - First Regular Session",
"2000 - Forty-fourth Legislature - Seventh Special Session",
"2000 - Forty-fourth Legislature - Sixth Special Session",
"2000 - Forty-fourth Legislature - Fifth Special Session",
"2000 - Forty-fourth Legislature - Fourth Special Session",
"2000 - Forty-fourth Legislature - Second Regular Session",
"1999 - Forty-fourth Legislature - Third Special Session",
"1999 - Forty-fourth Legislature - Second Special Session",
"1999 - Forty-fourth Legislature - First Special Session",
"1999 - Forty-fourth Legislature - First Regular Session",
"1998 - Forty-third Legislature - Sixth Special Session",
"1998 - Forty-third Legislature - Fifth Special Session",
"1998 - Forty-third Legislature - Fourth Special Session",
"1998 - Forty-third Legislature - Third Special Session",
"1998 - Forty-third Legislature - Second Regular Session",
"1997 - Forty-third Legislature - Second Special Session",
"1997 - Forty-third Legislature - First Special Session",
"1997 - Forty-third Legislature - First Regular Session",
"1996 - Forty-second Legislature - Seventh Special Session",
"1996 - Forty-second Legislature - Sixth Special Session",
"1996 - Forty-second Legislature - Fifth Special Session",
"1996 - Forty-second Legislature - Second Regular Session",
"1995 - Forty-second Legislature - Fourth Special Session",
"1995 - Forty-second Legislature - Third Special Session",
"1995 - Forty-Second Legislature - Second Special Session",
"1995 - Forty-Second Legislature - First Special Session",
"1995 - Forty-second Legislature - First Regular Session",
"1994 - Forty-first Legislature - Ninth Special Session",
"1994 - Forty-first Legislature - Eighth Special Session",
"1994 - Forty-first Legislature - Second Regular Session",
"1993 - Forty-first Legislature - Seventh Special Session",
"1993 - Forty-first Legislature - Sixth Special Session",
"1993 - Forty-first Legislature - Fifth Special Session",
"1993 - Forty-first Legislature - Fourth Special Session",
"1993 - Forty-first Legislature - Third Special Session",
"1993 - Forty-first Legislature - Second Special Session",
"1993 - Forty-first Legislature - First Special Session",
"1993 - Forty-first Legislature - First Regular Session",
"1992 - Fortieth Legislature - Ninth Special Session",
"1992 - Fortieth Legislature - Eighth Special Session",
"1992 - Fortieth Legislature - Seventh Special Session",
"1992 - Fortieth Legislature - Fifth Special Session",
"1992 - Fortieth Legislature - Sixth Special Session",
"1992 - Fortieth Legislature - Second Regular Session",
"1991 - Fortieth Legislature - Fourth Special Session",
"1991 - Fortieth Legislature - Third Special Session",
"1991 - Fortieth Legislature - Second Special Session",
"1991 - Fortieth Legislature - First Special Session",
"1991 - Fortieth Legislature - First Regular Session",
"1990 - Thirty-ninth Legislature - Fifth Special Session",
"1990 - Thirty-ninth Legislature - Fourth Special Session",
"1990 - Thirty-ninth Legislature - Third Special Session",
"1990 - Thirty-ninth Legislature - Second Regular Session",
"1989 - Thirty-ninth Legislature - Second Special Session",
"1989 - Thirty-ninth Legislature - First Special Session",
"1989 - Thirty-ninth Legislature - First Regular Session",
]
def get_session_list(self):
session = requests.Session()
data = session.get("https://www.azleg.gov/")
# TODO: JSON at https://apps.azleg.gov/api/Session/
doc = lxml.html.fromstring(data.text)
sessions = doc.xpath("//select/option/text()")
sessions = [re.sub(r"\(.+$", "", x).strip() for x in sessions]
return sessions
| sunlightlabs/openstates | scrapers/az/__init__.py | Python | gpl-3.0 | 15,823 |
#!/usr/bin/env python
###
## Faraday Penetration Test IDE
## Copyright (C) 2015 Infobyte LLC (http://www.infobytesec.com/)
## See the file 'doc/LICENSE' for the license information
###
config = {
#NMAP
'CS_NMAP' : "nmap",
#OPENVAS
'CS_OPENVAS_USER' : 'admin',
'CS_OPENVAS_PASSWORD' : 'openvas',
'CS_OPENVAS_SCAN_CONFIG' : "Full and fast",
'CS_OPENVAS_ALIVE_TEST' : "ICMP, TCP-ACK Service & ARP Ping",
'CS_OPENVAS' : 'omp',
#BURP
'CS_BURP' : '/root/tools/burpsuite_pro_v1.6.26.jar',
#NIKTO
'CS_NIKTO' : "nikto",
#W3AF
'CS_W3AF' : "/root/tools/w3af/w3af_api",
'CS_W3AF_PROFILE' : "/root/tools/w3af/profiles/fast_scan.pw3af",
#ZAP
'CS_ZAP' : "/root/tools/zap/ZAP_D-2015-08-24/zap.sh",
#NESSUS
'CS_NESSUS_URL' : "https://127.0.0.1:8834",
'CS_NESSUS_USER' : "nessus",
'CS_NESSUS_PASS' : "nessus",
'CS_NESSUS_PROFILE' : "Basic Network Scan",
}
| RB4-Solutions/cscan | config.py | Python | gpl-3.0 | 1,117 |
#!/usr/bin/python
# Copyright 2017 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Management of Contrail resources
================================
:depends: - vnc_api Python module
Enforce the virtual router existence
------------------------------------
.. code-block:: yaml
virtual_router:
contrail.virtual_router_present:
name: cmp01
ip_address: 10.0.0.23
dpdk_enabled: False
Enforce the virtual router absence
----------------------------------
.. code-block:: yaml
virtual_router_cmp01:
contrail.virtual_router_absent:
name: cmp01
Enforce the link local service entry existence
----------------------------------------------
.. code-block:: yaml
# Example with dns name, only one is permited
lls_meta1:
contrail.linklocal_service_present:
- name: meta1
- lls_ip: 10.0.0.23
- lls_port: 80
- ipf_addresses: "meta.example.com"
- ipf_port: 80
# Example with multiple ip addresses
lls_meta2:
contrail.linklocal_service_present:
- name: meta2
- lls_ip: 10.0.0.23
- lls_port: 80
- ipf_addresses:
- 10.10.10.10
- 10.20.20.20
- 10.30.30.30
- ipf_port: 80
# Example with one ip addresses
lls_meta3:
contrail.linklocal_service_present:
- name: meta3
- lls_ip: 10.0.0.23
- lls_port: 80
- ipf_addresses:
- 10.10.10.10
- ipf_port: 80
Enforce the link local service entry absence
--------------------------------------------
.. code-block:: yaml
lls_meta1_delete:
contrail.linklocal_service_absent:
- name: cmp01
Enforce the analytics node existence
------------------------------------
.. code-block:: yaml
analytics_node01:
contrail.analytics_node_present:
name: nal01
ip_address: 10.0.0.13
Enforce the config node existence
---------------------------------
.. code-block:: yaml
config_node01:
contrail.config_node_present:
name: ntw01
ip_address: 10.0.0.23
Enforce the database node existence
-----------------------------------
.. code-block:: yaml
config_node01:
contrail.database_node_present:
name: ntw01
ip_address: 10.0.0.33
'''
def __virtual__():
'''
Load Contrail module
'''
return 'contrail'
def virtual_router_present(name, ip_address, dpdk_enabled=False, **kwargs):
'''
Ensures that the Contrail virtual router exists.
:param name: Virtual router name
:param ip_address: Virtual router IP address
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Virtual router "{0}" already exists'.format(name)}
virtual_router = __salt__['contrail.virtual_router_get'](name, **kwargs)
if 'Error' not in virtual_router:
pass
else:
__salt__['contrail.virtual_router_create'](name, ip_address, dpdk_enabled, **kwargs)
ret['comment'] = 'Virtual router {0} has been created'.format(name)
ret['changes']['VirtualRouter'] = 'Created'
return ret
def virtual_router_absent(name, **kwargs):
'''
Ensure that the Contrail virtual router doesn't exist
:param name: The name of the virtual router that should not exist
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Virtual router "{0}" is already absent'.format(name)}
virtual_router = __salt__['contrail.virtual_router_get'](name, **kwargs)
if 'Error' not in virtual_router:
__salt__['contrail.virtual_router_delete'](name, **kwargs)
ret['comment'] = 'Virtual router {0} has been deleted'.format(name)
ret['changes']['VirtualRouter'] = 'Deleted'
return ret
def linklocal_service_present(name, lls_ip, lls_port, ipf_addresses, ipf_port, **kwargs):
'''
Ensures that the Contrail link local service entry exists.
:param name: Link local service name
:param lls_ip: Link local ip address
:param lls_port: Link local service port
:param ipf_addresses: IP fabric dns name or list of IP fabric ip addresses
:param ipf_port: IP fabric port
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Link local service "{0}" already exists'.format(name)}
lls = __salt__['contrail.linklocal_service_get'](name, **kwargs)
if 'Error' in lls:
__salt__['contrail.linklocal_service_create'](name, lls_ip, lls_port, ipf_addresses, ipf_port, **kwargs)
ret['comment'] = 'Link local service "{0}" has been created'.format(name)
ret['changes']['LinkLocalService'] = 'Created'
return ret
def linklocal_service_absent(name, **kwargs):
'''
Ensure that the Contrail link local service entry doesn't exist
:param name: The name of the link local service entry
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ' "{0}" is already absent'.format(name)}
lls = __salt__['contrail.linklocal_service_get'](name, **kwargs)
if 'Error' not in lls:
__salt__['contrail.linklocal_service_delete'](name, **kwargs)
ret['comment'] = 'Link local service "{0}" has been deleted'.format(name)
ret['changes']['LinkLocalService'] = 'Deleted'
return ret
def analytics_node_present(name, ip_address, **kwargs):
'''
Ensures that the Contrail analytics node exists.
:param name: Analytics node name
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Analytics node {0} already exists'.format(name)}
analytics_node = __salt__['contrail.analytics_node_get'](name, **kwargs)
if 'Error' not in analytics_node:
pass
else:
__salt__['contrail.analytics_node_create'](name, ip_address, **kwargs)
ret['comment'] = 'Analytics node {0} has been created'.format(name)
ret['changes']['AnalyticsNode'] = 'Created'
return ret
def config_node_present(name, ip_address, **kwargs):
'''
Ensures that the Contrail config node exists.
:param name: Config node name
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Config node {0} already exists'.format(name)}
config_node = __salt__['contrail.config_node_get'](name, **kwargs)
if 'Error' not in config_node:
pass
else:
__salt__['contrail.config_node_create'](name, ip_address, **kwargs)
ret['comment'] = 'Config node {0} has been created'.format(name)
ret['changes']['ConfigNode'] = 'Created'
return ret
def bgp_router_present(name, type, ip_address, asn=64512, **kwargs):
'''
Ensures that the Contrail BGP router exists.
:param name: BGP router name
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'BGP router {0} already exists'.format(name)}
bgp_router = __salt__['contrail.bgp_router_get'](name, **kwargs)
if 'Error' not in bgp_router:
pass
else:
__salt__['contrail.bgp_router_create'](name, type, ip_address, asn, **kwargs)
ret['comment'] = 'BGP router {0} has been created'.format(name)
ret['changes']['BgpRouter'] = 'Created'
return ret
def database_node_present(name, ip_address, **kwargs):
'''
Ensures that the Contrail database node exists.
:param name: Database node name
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Database node {0} already exists'.format(name)}
database_node = __salt__['contrail.database_node_get'](name, **kwargs)
if 'Error' not in database_node:
pass
else:
__salt__['contrail.database_node_create'](name, ip_address, **kwargs)
ret['comment'] = 'Database node {0} has been created'.format(name)
ret['changes']['DatabaseNode'] = 'Created'
return ret
| Martin819/salt-formula-opencontrail | _states/contrail.py | Python | apache-2.0 | 8,638 |
from random import randrange
class City:
def __init__(self, name, period, count, success):
self.name = name
self.period = period
self.count = count
self.success = success
class Champion:
def __init__(self, city, success, time):
self.city = city
self.success = success
self.time = time
antithan = City("Antithan", 6,3,65)
draimus = City("Draimus", 3,4,25)
phodela = City("Phodela", 6,5,45)
recomera = City("Recomera", 4,2,55)
sapydra = City("Sapydra", 4,2,55)
tutanethe = City("Tutanethe", 6,4,45)
cities = []
cities.append(antithan)
cities.append(draimus)
cities.append(phodela)
cities.append(recomera)
cities.append(sapydra)
cities.append(tutanethe)
champions = []
victors = {'Antithan':0,'Draimus':0,'Phodela':0,'Recomera':0,'Sapydra':0,'Tutanethe':0}
losers = {'Antithan':0,'Draimus':0,'Phodela':0,'Recomera':0,'Sapydra':0,'Tutanethe':0}
timePeriod = 12*100
f = open('championsOutput.txt', 'w')
for n in range(timePeriod):
f.write('Year {0}, Month {1}\n'.format((n // 12)+1, (n % 12)+1))
for c in cities:
if n % c.period == 0:
for i in range(0, c.count):
chance = randrange(1,101)
time = 36 + randrange(-6,7)
champions.append(Champion(c.name, chance <= c.success, time))
f.write(c.name + ' sends ' + str(c.count) + ' champions into Invidia\n')
for c in champions:
c.time -= 1
if c.time == 0:
if c.success:
victors[c.city] += 1
f.write('A champion from ' + c.city + ' succeeds\n')
else:
losers[c.city] += 1
f.write('A champion from ' + c.city + ' has failed\n')
champions.remove(c)
f.write('\n')
f.write(str(victors) + "\n")
f.write(str(losers)) | theperfectionist89/fotc | championsTest.py | Python | mit | 1,627 |
#!/usr/bin/env python
""" MultiQC Submodule to parse output from Qualimap RNASeq """
from __future__ import print_function
from collections import OrderedDict
import logging
import re
from multiqc import config
from multiqc.plots import bargraph, linegraph
# Initialise the logger
log = logging.getLogger(__name__)
def parse_reports(self):
""" Find Qualimap RNASeq reports and parse their data """
self.qualimap_rnaseq_genome_results = dict()
regexes = {
'reads_aligned': r"read(?:s| pairs) aligned\s*=\s*([\d,]+)",
'total_alignments': r"total alignments\s*=\s*([\d,]+)",
'non_unique_alignments': r"non-unique alignments\s*=\s*([\d,]+)",
'reads_aligned_genes': r"aligned to genes\s*=\s*([\d,]+)",
'ambiguous_alignments': r"ambiguous alignments\s*=\s*([\d,]+)",
'not_aligned': r"not aligned\s*=\s*([\d,]+)",
'5_3_bias': r"5'-3' bias\s*=\s*(\d+\.\d+)",
'reads_aligned_exonic': r"exonic\s*=\s*([\d,]+)",
'reads_aligned_intronic': r"intronic\s*=\s*([\d,]+)",
'reads_aligned_intergenic': r"intergenic\s*=\s*([\d,]+)",
'reads_aligned_overlapping_exon': r"overlapping exon\s*=\s*([\d,]+)",
}
for f in self.find_log_files('qualimap/rnaseq/rnaseq_results'):
d = dict()
# Get the sample name
s_name_regex = re.search(r"bam file\s*=\s*(.+)", f['f'], re.MULTILINE)
if s_name_regex:
d['bam_file'] = s_name_regex.group(1)
s_name = self.clean_s_name(d['bam_file'], f['root'])
else:
log.warn("Couldn't find an input filename in genome_results file {}/{}".format(f['root'], f['fn']))
return None
# Check for and 'fix' European style decimal places / thousand separators
comma_regex = re.search(r"exonic\s*=\s*[\d\.]+ \(\d{1,3},\d+%\)", f['f'], re.MULTILINE)
if comma_regex:
log.debug("Trying to fix European comma style syntax in Qualimap report {}/{}".format(f['root'], f['fn']))
f['f'] = f['f'].replace('.','')
f['f'] = f['f'].replace(',','.')
# Go through all numeric regexes
for k, r in regexes.items():
r_search = re.search(r, f['f'], re.MULTILINE)
if r_search:
try:
d[k] = float(r_search.group(1).replace(',',''))
except UnicodeEncodeError:
# Qualimap reports infinity (\u221e) when 3' bias denominator is zero
pass
except ValueError:
d[k] = r_search.group(1)
# Add to general stats table
for k in ['5_3_bias', 'reads_aligned']:
try:
self.general_stats_data[s_name][k] = d[k]
except KeyError:
pass
# Save results
if s_name in self.qualimap_rnaseq_genome_results:
log.debug("Duplicate genome results sample name found! Overwriting: {}".format(s_name))
self.qualimap_rnaseq_genome_results[s_name] = d
self.add_data_source(f, s_name=s_name, section='rna_genome_results')
#### Coverage profile
self.qualimap_rnaseq_cov_hist = dict()
for f in self.find_log_files('qualimap/rnaseq/coverage', filehandles=True):
s_name = self.get_s_name(f)
d = dict()
for l in f['f']:
if l.startswith('#'):
continue
coverage, count = l.split(None, 1)
coverage = int(round(float(coverage)))
count = float(count)
d[coverage] = count
if len(d) == 0:
log.debug("Couldn't parse contents of coverage histogram file {}".format(f['fn']))
return None
# Save results
if s_name in self.qualimap_rnaseq_cov_hist:
log.debug("Duplicate coverage histogram sample name found! Overwriting: {}".format(s_name))
self.qualimap_rnaseq_cov_hist[s_name] = d
self.add_data_source(f, s_name=s_name, section='rna_coverage_histogram')
# Filter to strip out ignored sample names
self.qualimap_rnaseq_genome_results = self.ignore_samples(self.qualimap_rnaseq_genome_results)
self.qualimap_rnaseq_cov_hist = self.ignore_samples(self.qualimap_rnaseq_cov_hist)
#### Plots
# Genomic Origin Bar Graph
# NB: Ignore 'Overlapping Exon' in report - these make the numbers add up to > 100%
if len(self.qualimap_rnaseq_genome_results) > 0:
gorigin_cats = OrderedDict()
gorigin_cats['reads_aligned_exonic'] = {'name': 'Exonic'}
gorigin_cats['reads_aligned_intronic'] = {'name': 'Intronic'}
gorigin_cats['reads_aligned_intergenic'] = {'name': 'Intergenic'}
gorigin_pconfig = {
'id': 'qualimap_genomic_origin',
'title': 'Genomic Origin',
'cpswitch_c_active': False
}
self.add_section (
name = 'Genomic origin of reads',
anchor = 'qualimap-reads-genomic-origin',
description = 'Classification of mapped reads as originating in exonic, intronic or intergenic regions. These can be displayed as either the number or percentage of mapped reads.',
plot = bargraph.plot(self.qualimap_rnaseq_genome_results, gorigin_cats, gorigin_pconfig)
)
if len(self.qualimap_rnaseq_cov_hist) > 0:
self.add_section (
name = 'Gene Coverage Profile',
anchor = 'qualimap-genome-fraction-coverage',
description = 'Mean distribution of coverage depth across the length of all mapped transcripts.',
plot = linegraph.plot(self.qualimap_rnaseq_cov_hist, {
'id': 'qualimap_gene_coverage_profile',
'title': 'Coverage Profile Along Genes (total)',
'ylab': 'Coverage',
'xlab': 'Transcript Position (%)',
'ymin': 0,
'xmin': 0,
'xmax': 100,
'tt_label': '<b>{point.x} bp</b>: {point.y:.0f}%',
})
)
#### General Stats
self.general_stats_headers['5_3_bias'] = {
'title': "5'-3' bias",
'format': '{:,.2f}',
}
self.general_stats_headers['reads_aligned'] = {
'title': '{} Aligned'.format(config.read_count_prefix),
'description': 'Reads Aligned ({})'.format(config.read_count_desc),
'min': 0,
'scale': 'RdBu',
'shared_key': 'read_count',
'modify': lambda x: x * config.read_count_multiplier
}
# Return the number of reports we found
return len(self.qualimap_rnaseq_genome_results.keys())
| robinandeer/MultiQC | multiqc/modules/qualimap/QM_RNASeq.py | Python | gpl-3.0 | 6,595 |
#
# author: Cosmin Basca
#
# Copyright 2010 University of Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import defaultdict
from base import LubmGenerator, UniTriplesDistribution
import numpy as np
from rdftools.log import logger
import io
import sh
__author__ = 'basca'
DISTRIBUTIONS = {
# one uni to 3 sites
# '3S': np.array([1.0,
# 2.0,
# 1.0]) / 4.0,
'3S': np.array([3.0,
10.0,
3.0]) / 16.0,
# one uni to 5 sites
'5S': np.array([1.0,
3.0,
8.0,
3.0,
1.0]) / 16.0,
# one uni to 7 sites
'7S': np.array([1.0,
3.0,
12.0,
32.0,
12.0,
3.0,
1.0]) / 64.0,
}
is_valid_distribution = lambda distro: np.sum(distro) == 1.0
"""
distribution process:
1) choose a distribution (normal) for a university
2) distribute the data of the university to some machines given that uni
obs: see substitution smapling
"""
class LubmUni2Many(LubmGenerator):
def __init__(self, output_path, sites, universities=10, index=0, clean=True, pdist = None, **kwargs):
super(LubmUni2Many, self).__init__(output_path, sites, universities=universities, index=index, clean=clean,
**kwargs)
if not isinstance(pdist, np.ndarray):
raise ValueError('pdist must be a numpy ndarray')
if not is_valid_distribution(pdist):
raise ValueError('pdist is not valid, all values must sum up to 1. SUM({0}) = {1}'.format(
pdist, np.sum(pdist)))
self._pdist = pdist
self._sorted_pdist = np.sort(self._pdist)
num_extra_sites = len(self._sorted_pdist) - 1
base_sites = np.random.random_integers(0, self._num_sites - 1, self.num_universities)
print 'base sites = ', base_sites
sites = np.arange(self._num_sites)
self._uni_site_distros = [
# get the disrtribution of sites for that uni, base_sites[i] = the base university
[base_sites[i]] + list(
np.random.choice(sites[sites != base_sites[i]], num_extra_sites, replace=False))
for i in xrange(self.num_universities)
]
@property
def _distributor_type(self):
return Uni2Many
def _distributor_kwargs(self, uni_id, uni_rdf):
return dict(uni_site_distro=self._uni_site_distros[uni_id], sorted_pdist=self._sorted_pdist)
class Uni2Many(UniTriplesDistribution):
def _distribute_triples(self, triples, uni_site_distro=None, sorted_pdist=None):
if not isinstance(uni_site_distro, list) and len(uni_site_distro) > 0:
raise ValueError('uni_site_distro must be a non empty List')
if not isinstance(sorted_pdist, np.ndarray):
raise ValueError('sorted_p_distro must be a Numpy ndarray')
num_triples = len(triples)
logger.info('[distributing] university %s to sites: %s, with %s triples', self.uni_name, uni_site_distro,
num_triples)
site_index = np.random.choice(uni_site_distro, num_triples, p=sorted_pdist)
site_triples = defaultdict(list)
for j, triple in enumerate(triples):
site_triples[site_index[j]].append(triple)
return site_triples | cosminbasca/rdftools | rdftools/datagen/lubm_uni2many.py | Python | apache-2.0 | 3,943 |
#Splitting out maringal effects to see if they can be generalized
from statsmodels.compat.python import lzip
import numpy as np
from scipy.stats import norm
from statsmodels.tools.decorators import cache_readonly
#### margeff helper functions ####
#NOTE: todo marginal effects for group 2
# group 2 oprobit, ologit, gologit, mlogit, biprobit
def _check_margeff_args(at, method):
"""
Checks valid options for margeff
"""
if at not in ['overall','mean','median','zero','all']:
raise ValueError("%s not a valid option for `at`." % at)
if method not in ['dydx','eyex','dyex','eydx']:
raise ValueError("method is not understood. Got %s" % method)
def _check_discrete_args(at, method):
"""
Checks the arguments for margeff if the exogenous variables are discrete.
"""
if method in ['dyex','eyex']:
raise ValueError("%s not allowed for discrete variables" % method)
if at in ['median', 'zero']:
raise ValueError("%s not allowed for discrete variables" % at)
def _get_const_index(exog):
"""
Returns a boolean array of non-constant column indices in exog and
an scalar array of where the constant is or None
"""
effects_idx = exog.var(0) != 0
if np.any(~effects_idx):
const_idx = np.where(~effects_idx)[0]
else:
const_idx = None
return effects_idx, const_idx
def _isdummy(X):
"""
Given an array X, returns the column indices for the dummy variables.
Parameters
----------
X : array_like
A 1d or 2d array of numbers
Examples
--------
>>> X = np.random.randint(0, 2, size=(15,5)).astype(float)
>>> X[:,1:3] = np.random.randn(15,2)
>>> ind = _isdummy(X)
>>> ind
array([0, 3, 4])
"""
X = np.asarray(X)
if X.ndim > 1:
ind = np.zeros(X.shape[1]).astype(bool)
max = (np.max(X, axis=0) == 1)
min = (np.min(X, axis=0) == 0)
remainder = np.all(X % 1. == 0, axis=0)
ind = min & max & remainder
if X.ndim == 1:
ind = np.asarray([ind])
return np.where(ind)[0]
def _get_dummy_index(X, const_idx):
dummy_ind = _isdummy(X)
dummy = True
if dummy_ind.size == 0: # do not waste your time
dummy = False
dummy_ind = None # this gets passed to stand err func
return dummy_ind, dummy
def _iscount(X):
"""
Given an array X, returns the column indices for count variables.
Parameters
----------
X : array_like
A 1d or 2d array of numbers
Examples
--------
>>> X = np.random.randint(0, 10, size=(15,5)).astype(float)
>>> X[:,1:3] = np.random.randn(15,2)
>>> ind = _iscount(X)
>>> ind
array([0, 3, 4])
"""
X = np.asarray(X)
remainder = np.logical_and(np.logical_and(np.all(X % 1. == 0, axis = 0),
X.var(0) != 0), np.all(X >= 0, axis=0))
dummy = _isdummy(X)
remainder = np.where(remainder)[0].tolist()
for idx in dummy:
remainder.remove(idx)
return np.array(remainder)
def _get_count_index(X, const_idx):
count_ind = _iscount(X)
count = True
if count_ind.size == 0: # do not waste your time
count = False
count_ind = None # for stand err func
return count_ind, count
def _get_margeff_exog(exog, at, atexog, ind):
if atexog is not None: # user supplied
if isinstance(atexog, dict):
# assumes values are singular or of len(exog)
for key in atexog:
exog[:,key] = atexog[key]
elif isinstance(atexog, np.ndarray): #TODO: handle DataFrames
if atexog.ndim == 1:
k_vars = len(atexog)
else:
k_vars = atexog.shape[1]
try:
assert k_vars == exog.shape[1]
except:
raise ValueError("atexog does not have the same number "
"of variables as exog")
exog = atexog
#NOTE: we should fill in atexog after we process at
if at == 'mean':
exog = np.atleast_2d(exog.mean(0))
elif at == 'median':
exog = np.atleast_2d(np.median(exog, axis=0))
elif at == 'zero':
exog = np.zeros((1,exog.shape[1]))
exog[0,~ind] = 1
return exog
def _get_count_effects(effects, exog, count_ind, method, model, params):
"""
If there's a count variable, the predicted difference is taken by
subtracting one and adding one to exog then averaging the difference
"""
# this is the index for the effect and the index for count col in exog
for i in count_ind:
exog0 = exog.copy()
exog0[:, i] -= 1
effect0 = model.predict(params, exog0)
exog0[:, i] += 2
effect1 = model.predict(params, exog0)
#NOTE: done by analogy with dummy effects but untested bc
# stata does not handle both count and eydx anywhere
if 'ey' in method:
effect0 = np.log(effect0)
effect1 = np.log(effect1)
effects[:, i] = ((effect1 - effect0)/2)
return effects
def _get_dummy_effects(effects, exog, dummy_ind, method, model, params):
"""
If there's a dummy variable, the predicted difference is taken at
0 and 1
"""
# this is the index for the effect and the index for dummy col in exog
for i in dummy_ind:
exog0 = exog.copy() # only copy once, can we avoid a copy?
exog0[:,i] = 0
effect0 = model.predict(params, exog0)
#fittedvalues0 = np.dot(exog0,params)
exog0[:,i] = 1
effect1 = model.predict(params, exog0)
if 'ey' in method:
effect0 = np.log(effect0)
effect1 = np.log(effect1)
effects[:, i] = (effect1 - effect0)
return effects
def _effects_at(effects, at):
if at == 'all':
effects = effects
elif at == 'overall':
effects = effects.mean(0)
else:
effects = effects[0,:]
return effects
def _margeff_cov_params_dummy(model, cov_margins, params, exog, dummy_ind,
method, J):
r"""
Returns the Jacobian for discrete regressors for use in margeff_cov_params.
For discrete regressors the marginal effect is
\Delta F = F(XB) | d = 1 - F(XB) | d = 0
The row of the Jacobian for this variable is given by
f(XB)*X | d = 1 - f(XB)*X | d = 0
Where F is the default prediction of the model.
"""
for i in dummy_ind:
exog0 = exog.copy()
exog1 = exog.copy()
exog0[:,i] = 0
exog1[:,i] = 1
dfdb0 = model._derivative_predict(params, exog0, method)
dfdb1 = model._derivative_predict(params, exog1, method)
dfdb = (dfdb1 - dfdb0)
if dfdb.ndim >= 2: # for overall
dfdb = dfdb.mean(0)
if J > 1:
K = dfdb.shape[1] // (J-1)
cov_margins[i::K, :] = dfdb
else:
# dfdb could be too short if there are extra params, k_extra > 0
cov_margins[i, :len(dfdb)] = dfdb # how each F changes with change in B
return cov_margins
def _margeff_cov_params_count(model, cov_margins, params, exog, count_ind,
method, J):
r"""
Returns the Jacobian for discrete regressors for use in margeff_cov_params.
For discrete regressors the marginal effect is
\Delta F = F(XB) | d += 1 - F(XB) | d -= 1
The row of the Jacobian for this variable is given by
(f(XB)*X | d += 1 - f(XB)*X | d -= 1) / 2
where F is the default prediction for the model.
"""
for i in count_ind:
exog0 = exog.copy()
exog0[:,i] -= 1
dfdb0 = model._derivative_predict(params, exog0, method)
exog0[:,i] += 2
dfdb1 = model._derivative_predict(params, exog0, method)
dfdb = (dfdb1 - dfdb0)
if dfdb.ndim >= 2: # for overall
dfdb = dfdb.mean(0) / 2
if J > 1:
K = dfdb.shape[1] / (J-1)
cov_margins[i::K, :] = dfdb
else:
# dfdb could be too short if there are extra params, k_extra > 0
cov_margins[i, :len(dfdb)] = dfdb # how each F changes with change in B
return cov_margins
def margeff_cov_params(model, params, exog, cov_params, at, derivative,
dummy_ind, count_ind, method, J):
"""
Computes the variance-covariance of marginal effects by the delta method.
Parameters
----------
model : model instance
The model that returned the fitted results. Its pdf method is used
for computing the Jacobian of discrete variables in dummy_ind and
count_ind
params : array_like
estimated model parameters
exog : array_like
exogenous variables at which to calculate the derivative
cov_params : array_like
The variance-covariance of the parameters
at : str
Options are:
- 'overall', The average of the marginal effects at each
observation.
- 'mean', The marginal effects at the mean of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation.
Only overall has any effect here.you
derivative : function or array_like
If a function, it returns the marginal effects of the model with
respect to the exogenous variables evaluated at exog. Expected to be
called derivative(params, exog). This will be numerically
differentiated. Otherwise, it can be the Jacobian of the marginal
effects with respect to the parameters.
dummy_ind : array_like
Indices of the columns of exog that contain dummy variables
count_ind : array_like
Indices of the columns of exog that contain count variables
Notes
-----
For continuous regressors, the variance-covariance is given by
Asy. Var[MargEff] = [d margeff / d params] V [d margeff / d params]'
where V is the parameter variance-covariance.
The outer Jacobians are computed via numerical differentiation if
derivative is a function.
"""
if callable(derivative):
from statsmodels.tools.numdiff import approx_fprime_cs
params = params.ravel('F') # for Multinomial
try:
jacobian_mat = approx_fprime_cs(params, derivative,
args=(exog,method))
except TypeError: # norm.cdf does not take complex values
from statsmodels.tools.numdiff import approx_fprime
jacobian_mat = approx_fprime(params, derivative,
args=(exog,method))
if at == 'overall':
jacobian_mat = np.mean(jacobian_mat, axis=1)
else:
jacobian_mat = jacobian_mat.squeeze() # exog was 2d row vector
if dummy_ind is not None:
jacobian_mat = _margeff_cov_params_dummy(model, jacobian_mat,
params, exog, dummy_ind, method, J)
if count_ind is not None:
jacobian_mat = _margeff_cov_params_count(model, jacobian_mat,
params, exog, count_ind, method, J)
else:
jacobian_mat = derivative
#NOTE: this will not go through for at == 'all'
return np.dot(np.dot(jacobian_mat, cov_params), jacobian_mat.T)
def margeff_cov_with_se(model, params, exog, cov_params, at, derivative,
dummy_ind, count_ind, method, J):
"""
See margeff_cov_params.
Same function but returns both the covariance of the marginal effects
and their standard errors.
"""
cov_me = margeff_cov_params(model, params, exog, cov_params, at,
derivative, dummy_ind,
count_ind, method, J)
return cov_me, np.sqrt(np.diag(cov_me))
def margeff():
raise NotImplementedError
def _check_at_is_all(method):
if method['at'] == 'all':
raise ValueError("Only margeff are available when `at` is "
"'all'. Please input specific points if you would "
"like to do inference.")
_transform_names = dict(dydx='dy/dx',
eyex='d(lny)/d(lnx)',
dyex='dy/d(lnx)',
eydx='d(lny)/dx')
class Margins(object):
"""
Mostly a do nothing class. Lays out the methods expected of a sub-class.
This is just a sketch of what we may want out of a general margins class.
I (SS) need to look at details of other models.
"""
def __init__(self, results, get_margeff, derivative, dist=None,
margeff_args=()):
self._cache = {}
self.results = results
self.dist = dist
self.get_margeff(margeff_args)
def _reset(self):
self._cache = {}
def get_margeff(self, *args, **kwargs):
self._reset()
self.margeff = self.get_margeff(*args)
@cache_readonly
def tvalues(self):
raise NotImplementedError
@cache_readonly
def cov_margins(self):
raise NotImplementedError
@cache_readonly
def margins_se(self):
raise NotImplementedError
def summary_frame(self):
raise NotImplementedError
@cache_readonly
def pvalues(self):
raise NotImplementedError
def conf_int(self, alpha=.05):
raise NotImplementedError
def summary(self, alpha=.05):
raise NotImplementedError
#class DiscreteMargins(Margins):
class DiscreteMargins(object):
"""Get marginal effects of a Discrete Choice model.
Parameters
----------
results : DiscreteResults instance
The results instance of a fitted discrete choice model
args : tuple
Args are passed to `get_margeff`. This is the same as
results.get_margeff. See there for more information.
kwargs : dict
Keyword args are passed to `get_margeff`. This is the same as
results.get_margeff. See there for more information.
"""
def __init__(self, results, args, kwargs={}):
self._cache = {}
self.results = results
self.get_margeff(*args, **kwargs)
def _reset(self):
self._cache = {}
@cache_readonly
def tvalues(self):
_check_at_is_all(self.margeff_options)
return self.margeff / self.margeff_se
def summary_frame(self, alpha=.05):
"""
Returns a DataFrame summarizing the marginal effects.
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
frame : DataFrames
A DataFrame summarizing the marginal effects.
Notes
-----
The dataframe is created on each call and not cached, as are the
tables build in `summary()`
"""
_check_at_is_all(self.margeff_options)
results = self.results
model = self.results.model
from pandas import DataFrame, MultiIndex
names = [_transform_names[self.margeff_options['method']],
'Std. Err.', 'z', 'Pr(>|z|)',
'Conf. Int. Low', 'Cont. Int. Hi.']
ind = self.results.model.exog.var(0) != 0 # True if not a constant
exog_names = self.results.model.exog_names
k_extra = getattr(model, 'k_extra', 0)
if k_extra > 0:
exog_names = exog_names[:-k_extra]
var_names = [name for i,name in enumerate(exog_names) if ind[i]]
if self.margeff.ndim == 2:
# MNLogit case
ci = self.conf_int(alpha)
table = np.column_stack([i.ravel("F") for i in
[self.margeff, self.margeff_se, self.tvalues,
self.pvalues, ci[:, 0, :], ci[:, 1, :]]])
_, yname_list = results._get_endog_name(model.endog_names,
None, all=True)
ynames = np.repeat(yname_list, len(var_names))
xnames = np.tile(var_names, len(yname_list))
index = MultiIndex.from_tuples(list(zip(ynames, xnames)),
names=['endog', 'exog'])
else:
table = np.column_stack((self.margeff, self.margeff_se, self.tvalues,
self.pvalues, self.conf_int(alpha)))
index=var_names
return DataFrame(table, columns=names, index=index)
@cache_readonly
def pvalues(self):
_check_at_is_all(self.margeff_options)
return norm.sf(np.abs(self.tvalues)) * 2
def conf_int(self, alpha=.05):
"""
Returns the confidence intervals of the marginal effects
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
conf_int : ndarray
An array with lower, upper confidence intervals for the marginal
effects.
"""
_check_at_is_all(self.margeff_options)
me_se = self.margeff_se
q = norm.ppf(1 - alpha / 2)
lower = self.margeff - q * me_se
upper = self.margeff + q * me_se
return np.asarray(lzip(lower, upper))
def summary(self, alpha=.05):
"""
Returns a summary table for marginal effects
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
Summary : SummaryTable
A SummaryTable instance
"""
_check_at_is_all(self.margeff_options)
results = self.results
model = results.model
title = model.__class__.__name__ + " Marginal Effects"
method = self.margeff_options['method']
top_left = [('Dep. Variable:', [model.endog_names]),
('Method:', [method]),
('At:', [self.margeff_options['at']]),]
from statsmodels.iolib.summary import (Summary, summary_params,
table_extend)
exog_names = model.exog_names[:] # copy
smry = Summary()
# TODO: sigh, we really need to hold on to this in _data...
_, const_idx = _get_const_index(model.exog)
if const_idx is not None:
exog_names.pop(const_idx[0])
if getattr(model, 'k_extra', 0) > 0:
exog_names = exog_names[:-model.k_extra]
J = int(getattr(model, "J", 1))
if J > 1:
yname, yname_list = results._get_endog_name(model.endog_names,
None, all=True)
else:
yname = model.endog_names
yname_list = [yname]
smry.add_table_2cols(self, gleft=top_left, gright=[],
yname=yname, xname=exog_names, title=title)
# NOTE: add_table_params is not general enough yet for margeff
# could use a refactor with getattr instead of hard-coded params
# tvalues etc.
table = []
conf_int = self.conf_int(alpha)
margeff = self.margeff
margeff_se = self.margeff_se
tvalues = self.tvalues
pvalues = self.pvalues
if J > 1:
for eq in range(J):
restup = (results, margeff[:,eq], margeff_se[:,eq],
tvalues[:,eq], pvalues[:,eq], conf_int[:,:,eq])
tble = summary_params(restup, yname=yname_list[eq],
xname=exog_names, alpha=alpha, use_t=False,
skip_header=True)
tble.title = yname_list[eq]
# overwrite coef with method name
header = ['', _transform_names[method], 'std err', 'z',
'P>|z|', '[' + str(alpha/2), str(1-alpha/2) + ']']
tble.insert_header_row(0, header)
table.append(tble)
table = table_extend(table, keep_headers=True)
else:
restup = (results, margeff, margeff_se, tvalues, pvalues, conf_int)
table = summary_params(restup, yname=yname, xname=exog_names,
alpha=alpha, use_t=False, skip_header=True)
header = ['', _transform_names[method], 'std err', 'z',
'P>|z|', '[' + str(alpha/2), str(1-alpha/2) + ']']
table.insert_header_row(0, header)
smry.tables.append(table)
return smry
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Parameters
----------
at : str, optional
Options are:
- 'overall', The average of the marginal effects at each
observation.
- 'mean', The marginal effects at the mean of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation. If `at` is all
only margeff will be available.
Note that if `exog` is specified, then marginal effects for all
variables not specified by `exog` are calculated using the `at`
option.
method : str, optional
Options are:
- 'dydx' - dy/dx - No transformation is made and marginal effects
are returned. This is the default.
- 'eyex' - estimate elasticities of variables in `exog` --
d(lny)/d(lnx)
- 'dyex' - estimate semi-elasticity -- dy/d(lnx)
- 'eydx' - estimate semi-elasticity -- d(lny)/dx
Note that tranformations are done after each observation is
calculated. Semi-elasticities for binary variables are computed
using the midpoint method. 'dyex' and 'eyex' do not make sense
for discrete variables.
atexog : array_like, optional
Optionally, you can provide the exogenous variables over which to
get the marginal effects. This should be a dictionary with the key
as the zero-indexed column number and the value of the dictionary.
Default is None for all independent variables less the constant.
dummy : bool, optional
If False, treats binary variables (if present) as continuous. This
is the default. Else if True, treats binary variables as
changing from 0 to 1. Note that any variable that is either 0 or 1
is treated as binary. Each binary variable is treated separately
for now.
count : bool, optional
If False, treats count variables (if present) as continuous. This
is the default. Else if True, the marginal effect is the
change in probabilities when each observation is increased by one.
Returns
-------
effects : ndarray
the marginal effect corresponding to the input options
Notes
-----
When using after Poisson, returns the expected number of events
per period, assuming that the model is loglinear.
"""
self._reset() # always reset the cache when this is called
#TODO: if at is not all or overall, we can also put atexog values
# in summary table head
method = method.lower()
at = at.lower()
_check_margeff_args(at, method)
self.margeff_options = dict(method=method, at=at)
results = self.results
model = results.model
params = results.params
exog = model.exog.copy() # copy because values are changed
effects_idx, const_idx = _get_const_index(exog)
if dummy:
_check_discrete_args(at, method)
dummy_idx, dummy = _get_dummy_index(exog, const_idx)
else:
dummy_idx = None
if count:
_check_discrete_args(at, method)
count_idx, count = _get_count_index(exog, const_idx)
else:
count_idx = None
# attach dummy_idx and cout_idx
self.dummy_idx = dummy_idx
self.count_idx = count_idx
# get the exogenous variables
exog = _get_margeff_exog(exog, at, atexog, effects_idx)
# get base marginal effects, handled by sub-classes
effects = model._derivative_exog(params, exog, method,
dummy_idx, count_idx)
J = getattr(model, 'J', 1)
effects_idx = np.tile(effects_idx, J) # adjust for multi-equation.
effects = _effects_at(effects, at)
if at == 'all':
if J > 1:
K = model.K - np.any(~effects_idx) # subtract constant
self.margeff = effects[:, effects_idx].reshape(-1, K, J,
order='F')
else:
self.margeff = effects[:, effects_idx]
else:
# Set standard error of the marginal effects by Delta method.
margeff_cov, margeff_se = margeff_cov_with_se(model, params, exog,
results.cov_params(), at,
model._derivative_exog,
dummy_idx, count_idx,
method, J)
# reshape for multi-equation
if J > 1:
K = model.K - np.any(~effects_idx) # subtract constant
self.margeff = effects[effects_idx].reshape(K, J, order='F')
self.margeff_se = margeff_se[effects_idx].reshape(K, J,
order='F')
self.margeff_cov = margeff_cov[effects_idx][:, effects_idx]
else:
# do not care about at constant
# hack truncate effects_idx again if necessary
# if eyex, then effects is truncated to be without extra params
effects_idx = effects_idx[:len(effects)]
self.margeff_cov = margeff_cov[effects_idx][:, effects_idx]
self.margeff_se = margeff_se[effects_idx]
self.margeff = effects[effects_idx]
| statsmodels/statsmodels | statsmodels/discrete/discrete_margins.py | Python | bsd-3-clause | 26,644 |
# standard libraries
import os
import collections.abc
import collections
#import http.cookies
# third party libraries
pass
# first party libraries
pass
__where__ = os.path.dirname(os.path.abspath(__file__))
class RequestCookies(collections.abc.Mapping):
def __init__(self, **cookies):
self._cookies = cookies
@classmethod
def from_header(cls, header):
#parsed_cookies = http.cookies.SimpleCookie()
#parsed_cookies.load(header)
#cookies = {key: value.value for key, value in parsed_cookies.items()}
cookies = {}
for cookie in header.split(';'):
cookie = cookie.strip(' ')
morsels = cookie.split('=')
if len(morsels) == 0:
continue
elif len(morsels) == 1:
key = morsels[0]
value = None
else:
key = morsels[0].strip()
value = '='.join(morsels[1:])
key = key.strip()
if value is not None:
value = value.strip()
cookies[key] = value
return cls(**cookies)
def __getitem__(self, key):
return self._cookies[key]
def __iter__(self):
return iter(self._cookies)
def __len__(self):
return len(self._cookies)
def __str__(self):
return '; '.join(
'{}={}'.format(
key, http.cookies._quote(value)
) for key, value in self.items()
)
def __repr__(self):
return '{}.{}({})'.format(
self.__module__, self.__class__.__name__, ', '.join(
"{}='{}'".format(key, value) for key, value in self.items()
)
)
class ResponseCookie:
def __init__(self, key, value='', max_age=None, expires=None, path=None,
domain=None, secure=False, http_only=False):
self.key = key
self.value = value
self.max_age = max_age
self.expires = expires
self.path = path
self.domain = domain
self.secure = secure
self.http_only = http_only
def __repr__(self):
return '{}.{}({})'.format(
self.__module__, self.__class__.__name__, ', '.join(
'{}={}'.format(name, repr(getattr(self, name))) for name in (
'key', 'value', 'max_age', 'expires', 'path', 'domain',
'secure', 'http_only',
)
)
)
def __str__(self):
key = self.key
if self.value is not None:
value = self.value
else:
value = ''
cookie_segments = ['{}={}'.format(key, value), ]
if self.max_age is not None:
cookie_segments.append('Max-Age={}'.format(self.max_age))
if self.expires is not None:
cookie_segments.append(
'Expires={:%a, %d-%b-%Y %T GMT}'.format(self.expires)
)
if self.path is not None:
cookie_segments.append('Path={}'.format(self.path))
if self.domain is not None:
cookie_segments.append('Domain={}'.format(self.domain))
if self.secure:
cookie_segments.append('Secure')
if self.http_only:
cookie_segments.append('HttpOnly')
return '; '.join(cookie_segments)
class ResponseCookies(collections.abc.MutableMapping):
def __init__(self, *cookies):
self._cookies = collections.OrderedDict(
(cookie.key, cookie) for cookie in cookies
)
def set(self, key, value='', max_age=None, expires=None, path=None,
domain=None, secure=False, http_only=False):
self[key] = ResponseCookie(
key, value, max_age, expires, path, domain, secure, http_only
)
def set_from_header(self, cookie):
raise NotImplementedError
def __getitem__(self, key):
return self._cookies[key]
def __delitem__(self, key):
del self._cookies[key]
def __iter__(self):
return iter(self._cookies)
def __len__(self):
return len(self._cookies)
def __setitem__(self, key, value):
if isinstance(value, ResponseCookie):
self._cookies[key] = value
else:
self.set(key, value)
def __str__(self):
return '\n'.join(
'{}: {}'.format(key, value) for key, value in self.headers
)
def __repr__(self):
return '{}.{}({})'.format(
self.__module__, self.__class__.__name__, ', '.join(
(repr(cookie) for cookie in self.values())
)
)
| brianjpetersen/bocce | bocce/cookies.py | Python | mit | 4,695 |
from __future__ import absolute_import
import time
import itertools
from nose import SkipTest
from celery.datastructures import ExceptionInfo
from celery.tests.utils import Case
def do_something(i):
return i * i
def long_something():
time.sleep(1)
def raise_something(i):
try:
raise KeyError("FOO EXCEPTION")
except KeyError:
return ExceptionInfo()
class test_TaskPool(Case):
def setUp(self):
try:
__import__("multiprocessing")
except ImportError:
raise SkipTest("multiprocessing not supported")
from celery.concurrency.processes import TaskPool
self.TaskPool = TaskPool
def test_attrs(self):
p = self.TaskPool(2)
self.assertEqual(p.limit, 2)
self.assertIsNone(p._pool)
def x_apply(self):
p = self.TaskPool(2)
p.start()
scratchpad = {}
proc_counter = itertools.count().next
def mycallback(ret_value):
process = proc_counter()
scratchpad[process] = {}
scratchpad[process]["ret_value"] = ret_value
myerrback = mycallback
res = p.apply_async(do_something, args=[10], callback=mycallback)
res2 = p.apply_async(raise_something, args=[10], errback=myerrback)
res3 = p.apply_async(do_something, args=[20], callback=mycallback)
self.assertEqual(res.get(), 100)
time.sleep(0.5)
self.assertDictContainsSubset({"ret_value": 100},
scratchpad.get(0))
self.assertIsInstance(res2.get(), ExceptionInfo)
self.assertTrue(scratchpad.get(1))
time.sleep(1)
self.assertIsInstance(scratchpad[1]["ret_value"],
ExceptionInfo)
self.assertEqual(scratchpad[1]["ret_value"].exception.args,
("FOO EXCEPTION", ))
self.assertEqual(res3.get(), 400)
time.sleep(0.5)
self.assertDictContainsSubset({"ret_value": 400},
scratchpad.get(2))
res3 = p.apply_async(do_something, args=[30], callback=mycallback)
self.assertEqual(res3.get(), 900)
time.sleep(0.5)
self.assertDictContainsSubset({"ret_value": 900},
scratchpad.get(3))
p.stop()
| couchbaselabs/celery | celery/tests/concurrency/test_pool.py | Python | bsd-3-clause | 2,351 |
from itertools import izip
import random
import numpy as np
from learntools.libs.common_test_utils import use_logger_in_test
from learntools.data import cv_split
from learntools.emotiv import BaseEmotiv, prepare_data
from learntools.emotiv.tests.emotiv_simple import SimpleEmotiv
from learntools.data import Dataset
import pytest
slow = pytest.mark.slow
def gen_small_emotiv_data():
conds = ['cond1', 'cond2', 'cond1', 'cond2', 'cond1']
eegs = [[1, 2, 3, 4],
[4, 3, 2, 1],
[1, 2, 3, 4],
[4, 3, 2, 1],
[1, 2, 3, 4]]
headers = [('condition', Dataset.ENUM), ('eeg', Dataset.MATFLOAT)]
dataset = Dataset(headers=headers, n_rows=len(conds))
for i, row in enumerate(izip(conds, eegs)):
dataset[i] = row
return dataset
def gen_random_emotiv_data():
eeg_width = 60
source_magnitude = 0.5
noise_magnitude = 1.5
n_rows = 8000
eeg_sources = {'cond1': np.random.random(eeg_width) * source_magnitude,
'cond2': np.random.random(eeg_width) * source_magnitude}
conds = ['cond1', 'cond2']
headers = [('condition', Dataset.ENUM), ('eeg', Dataset.MATFLOAT)]
dataset = Dataset(headers=headers, n_rows=n_rows)
for i in xrange(n_rows):
cond = conds[random.randint(0, 1)]
eeg = eeg_sources[cond] + np.random.random(eeg_width) * noise_magnitude
dataset[i] = (cond, eeg)
return dataset
@use_logger_in_test
def test_emotive_base():
dataset = gen_small_emotiv_data()
train_idx, valid_idx = cv_split(dataset, percent=0.3, fold_index=0)
prepared_data = (dataset, train_idx, valid_idx)
model = BaseEmotiv(prepared_data, batch_size=1)
best_loss, best_epoch = model.train_full(n_epochs=40, patience=40)
assert best_loss > 0.8
@use_logger_in_test
@slow
def test_emotive_base_random():
dataset = gen_random_emotiv_data()
train_idx, valid_idx = cv_split(dataset, percent=0.2, fold_index=0)
prepared_data = (dataset, train_idx, valid_idx)
model = BaseEmotiv(prepared_data, batch_size=50)
best_loss, best_epoch = model.train_full(n_epochs=100, patience=100)
assert best_loss > 0.8
@use_logger_in_test
def test_emotiv_simple():
""" tests that we can train a simple logistic regression model on the emotiv data """
dataset = prepare_data('raw_data/indices_all.txt', conds=['EyesOpen', 'EyesClosed'])
train_idx, valid_idx = cv_split(dataset, percent=0.3, fold_index=0)
model = SimpleEmotiv((dataset, train_idx, valid_idx), batch_size=1)
best_loss, best_epoch = model.train_full(n_epochs=100, patience=100)
assert best_loss > .67 | yueranyuan/vector_edu | learntools/emotiv/tests/test_emotiv_base.py | Python | mit | 2,637 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides tools for making a simulated image for tests.
"""
from astropy.io import fits
import numpy as np
from ..geometry import EllipseGeometry
from ...datasets import make_noise_image
def make_test_image(nx=512, ny=512, x0=None, y0=None,
background=100., noise=1.e-6, i0=100., sma=40.,
eps=0.2, pa=0., seed=None):
"""
Make a simulated image for testing the isophote subpackage.
Parameters
----------
nx, ny : int, optional
The image size.
x0, y0 : int, optional
The center position. If `None`, the default is the image center.
background : float, optional
The constant background level to add to the image values.
noise : float, optional
The standard deviation of the Gaussian noise to add to the image.
i0 : float, optional
The surface brightness over the reference elliptical isophote.
sma : float, optional
The semi-major axis length of the reference elliptical isophote.
eps : float, optional
The ellipticity of the reference isophote.
pa : float, optional
The position angle of the reference isophote.
seed : int, optional
A seed to initialize the `numpy.random.BitGenerator`. If `None`,
then fresh, unpredictable entropy will be pulled from the OS.
Returns
-------
data : 2D `~numpy.ndarray`
The resulting simulated image.
"""
if x0 is None or y0 is None:
xcen = nx / 2
ycen = ny / 2
else:
xcen = x0
ycen = y0
g = EllipseGeometry(xcen, ycen, sma, eps, pa, 0.1, False)
y, x = np.mgrid[0:ny, 0:nx]
radius, angle = g.to_polar(x, y)
e_radius = g.radius(angle)
tmp_image = radius / e_radius
image = i0 * np.exp(-7.669 * (tmp_image**0.25 - 1.)) + background
# central pixel is messed up; replace it with interpolated value
image[int(xcen), int(ycen)] = (image[int(xcen - 1), int(ycen)] +
image[int(xcen + 1), int(ycen)] +
image[int(xcen), int(ycen - 1)] +
image[int(xcen), int(ycen + 1)]) / 4.
image += make_noise_image(image.shape, distribution='gaussian', mean=0.,
stddev=noise, seed=seed)
return image
def make_fits_test_image(name, nx=512, ny=512, x0=None, y0=None,
background=100., noise=1.e-6, i0=100., sma=40.,
eps=0.2, pa=0., seed=None):
"""
Make a simulated image and write it to a FITS file.
Used for testing the isophot subpackage.
Examples
--------
import numpy as np
pa = np.pi / 4.
make_fits_test_image('synth_lowsnr.fits', noise=40., pa=pa,
seed=0)
make_fits_test_image('synth_highsnr.fits', noise=1.e-12, pa=pa,
seed=0)
make_fits_test_image('synth.fits', pa=pa, seed=0)
"""
if not name.endswith('.fits'):
name += '.fits'
array = make_test_image(nx, ny, x0, y0, background, noise, i0, sma, eps,
pa, seed=seed)
hdu = fits.PrimaryHDU(array)
hdulist = fits.HDUList([hdu])
header = hdulist[0].header
header['X0'] = (x0, 'x position of galaxy center')
header['Y0'] = (y0, 'y position of galaxy center')
header['BACK'] = (background, 'constant background value')
header['NOISE'] = (noise, 'standard deviation of noise')
header['I0'] = (i0, 'reference pixel value')
header['SMA'] = (sma, 'reference semi major axis')
header['EPS'] = (eps, 'ellipticity')
header['PA'] = (pa, 'position ange')
hdulist.writeto(name)
| astropy/photutils | photutils/isophote/tests/make_test_data.py | Python | bsd-3-clause | 3,779 |
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
urlpatterns = patterns('',
url(r'^$', 'website.views.index'),
url(r'^uploader/$', 'website.views.uploader'),
url(r'^register/$', 'website.views.register'),
url(r'^account/$', 'website.views.account'),
url(r'^checkout/$', 'website.views.checkout'),
url(r'^download/$', 'website.views.download_codes'),
url(r'^order_products/$', 'website.views.get_order_products'),
url(r'^login/$', 'django.contrib.auth.views.login', {'template_name': 'website/login.html'}),
url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}),
url(r'upload/', 'website.views.upload', name = 'jfu_upload' ),
# You may optionally define a delete url as well
url( r'^delete/(?P<pk>\d+)$', 'website.views.upload_delete', name = 'jfu_delete' ),
url(r'^assets/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| vlameiras/cdkeyswholesale | website/urls.py | Python | mit | 1,066 |
from lib.hachoir_parser.network.tcpdump import TcpdumpFile
| Branlala/docker-sickbeardfr | sickbeard/lib/hachoir_parser/network/__init__.py | Python | mit | 60 |
# coding: utf-8
# In[2]:
import time
from bs4 import BeautifulSoup
import sys, io
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.proxy import *
# @author Ranjeet Singh <[email protected]>
# Modify it according to your requirements
no_of_reviews = 1000
non_bmp_map = dict.fromkeys(range(0x10000, sys.maxunicode + 1), 0xfffd)
driver = webdriver.Chrome(r"C:\Users\user\Anaconda3\Scripts\chromedriver.exe")
wait = WebDriverWait( driver, 10 )
# Append your app store urls here
urls = ["https://play.google.com/store/apps/details?id=com.flipkart.android&hl=en"]
for url in urls:
driver.get(url)
page = driver.page_source
soup_expatistan = BeautifulSoup(page, "html.parser")
expatistan_table = soup_expatistan.find("h1", class_="AHFaub")
print("App name: ", expatistan_table.string)
expatistan_table = soup_expatistan.findAll("span", class_="htlgb")[4]
print("Installs Range: ", expatistan_table.string)
expatistan_table = soup_expatistan.find("meta", itemprop="ratingValue")
print("Rating Value: ", expatistan_table['content'])
expatistan_table = soup_expatistan.find("meta", itemprop="reviewCount")
print("Reviews Count: ", expatistan_table['content'])
soup_histogram = soup_expatistan.find("div", class_="VEF2C")
rating_bars = soup_histogram.find_all('div', class_="mMF0fd")
for rating_bar in rating_bars:
print("Rating: ", rating_bar.find("span").text)
print("Rating count: ", rating_bar.find("span", class_="L2o20d").get('title'))
# open all reviews
url = url+'&showAllReviews=true'
driver.get(url)
time.sleep(5) # wait dom ready
for i in range(1,10):
driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')#scroll to load other reviews
time.sleep(1)
page = driver.page_source
soup_expatistan = BeautifulSoup(page, "html.parser")
expand_pages = soup_expatistan.findAll("div", class_="d15Mdf")
counter = 1
for expand_page in expand_pages:
try:
print("\n===========\n")
print("review:"+str(counter))
print("Author Name: ", str(expand_page.find("span", class_="X43Kjb").text))
print("Review Date: ", expand_page.find("span", class_="p2TkOb").text)
'''
//didn't find reviewer link
print("Reviewer Link: ", expand_page.find("a", class_="reviews-permalink")['href'])
'''
reviewer_ratings = expand_page.find("div", class_="pf5lIe").find_next()['aria-label'];
reviewer_ratings = reviewer_ratings.split('(')[0]
reviewer_ratings = ''.join(x for x in reviewer_ratings if x.isdigit())
print("Reviewer Ratings: ", reviewer_ratings)
'''
//didn't find review title
print("Review Title: ", str(expand_page.find("span", class_="review-title").string))
'''
print("Review Body: ", str(expand_page.find("div", class_="UD7Dzf").text))
developer_reply = expand_page.find_parent().find("div", class_="LVQB0b")
if hasattr(developer_reply, "text"):
print("Developer Reply: "+"\n", str(developer_reply.text))
else:
print("Developer Reply: ", "")
counter+=1
except:
pass
driver.quit()
| ranjeet867/google-play-crawler | crawl_play_store.py | Python | mit | 3,421 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('chat', '0004_auto_20150905_1700'),
]
operations = [
migrations.RenameField(
model_name='message',
old_name='text',
new_name='message_content',
),
migrations.AddField(
model_name='message',
name='message_type',
field=models.CharField(default=b'text', max_length=10, choices=[(b'text', b'text'), (b'image', b'image')]),
),
]
| dionyziz/ting | API/chat/migrations/0005_auto_20160511_1921.py | Python | mit | 619 |
import json
class NoEventLoop(NameError):
pass
class DataStore:
def __init__(self, filename=None, evt_loop=None, debug=False):
if not evt_loop:
raise NoEventLoop
self.namespace = {}
self.debug = debug
self.evt_loop = evt_loop
self.filename = filename
if self.filename:
self.import_file()
if self.debug:
self.evt_loop.on('.+', lambda *args, **kwargs: print("Args: %s\nKwargs:%s" % (args, kwargs)))
self.evt_loop.on('log debug$', lambda message: print(message))
self.evt_loop.on('insert$', self.insert)
self.evt_loop.on('delete$', self.delete)
self.evt_loop.on('find$', self.find)
self.evt_loop.on('find\_one$', self.find_one)
def commit(self, filename=None):
out_filename = filename or self.filename
if out_filename:
try:
with open(out_filename, 'wb') as f:
f.write(self.__toJSON(self.namespace))
except:
pass
def import_file(self, filename=None):
in_filename = filename or self.filename
if in_filename:
try:
with open(in_filename, 'rb') as f:
self.namespace = self.__fromJSON(f.read())
except:
pass
def __toJSON(self, code):
#TODO: implement better conversion
return json.dumps(code)
def __fromJSON(self, code):
#TODO: implement better conversion
return json.loads(code)
def insert(self, collection, document, callback=None, *args, **kwargs):
if collection and document:
if collection in self.namespace.keys():
self.namespace[collection].append(document)
else:
self.namespace[collection] = [document]
if callback:
callback(None, document, *args, **kwargs)
elif callback:
callback(True, None, *args, **kwargs)
def delete(self, collection, query, callback=None, *args, **kwargs):
if collection in self.namespace.keys():
doccount = 0
for doc in self.namespace[collection]:
match = True
for key in query.keys():
if key in doc.keys() and query.get(key) == doc.get(key):
continue
else:
match = False
break
if match:
self.namespace[collection].pop(self.namespace[collection].index(doc))
doccount += 1
if callback:
callback(None, doccount, *args, **kwargs)#(err, doccount)
else:
pass
elif callback:
callback(True, None, *args, **kwargs)
def find(self, collection, query, callback, *args, **kwargs):
if collection in self.namespace.keys():
retval = []
for doc in self.namespace[collection]:
match=True
for key in query.keys():
if key in doc.keys() and query.get(key) == doc.get(key):
continue
else:
match = False
break
if match:
retval.append(doc)
callback(None, retval, *args, **kwargs)
else:
callback(True, None, *args, **kwargs)
def find_one(self, collection, query, callback, *args, **kwargs):
if collection in self.namespace.keys():
d = None
for doc in self.namespace[collection]:
match = True
for key in query.keys():
if key in doc.keys() and query.get(key) == doc.get(key):
continue
else:
match = False
break
if match:
d = doc
break
callback(None, d, *args, **kwargs)
else:
callback(True, None, *args, **kwargs)
| jackatbancast/bettaDB | bettadb/db.py | Python | mit | 4,168 |
"""The test for binary_sensor device automation."""
from datetime import timedelta
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.binary_sensor import DEVICE_CLASSES, DOMAIN
from homeassistant.components.binary_sensor.device_trigger import ENTITY_TRIGGERS
from homeassistant.const import CONF_PLATFORM, STATE_OFF, STATE_ON
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import (
MockConfigEntry,
async_fire_time_changed,
async_get_device_automation_capabilities,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa: F401
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(hass, device_reg, entity_reg, enable_custom_integrations):
"""Test we get the expected triggers from a binary_sensor."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
for device_class in DEVICE_CLASSES:
entity_reg.async_get_or_create(
DOMAIN,
"test",
platform.ENTITIES[device_class].unique_id,
device_id=device_entry.id,
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": trigger["type"],
"device_id": device_entry.id,
"entity_id": platform.ENTITIES[device_class].entity_id,
}
for device_class in DEVICE_CLASSES
for trigger in ENTITY_TRIGGERS[device_class]
]
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert triggers == expected_triggers
async def test_get_trigger_capabilities(hass, device_reg, entity_reg):
"""Test we get the expected capabilities from a binary_sensor trigger."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_capabilities = {
"extra_fields": [
{"name": "for", "optional": True, "type": "positive_time_period_dict"}
]
}
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
for trigger in triggers:
capabilities = await async_get_device_automation_capabilities(
hass, "trigger", trigger
)
assert capabilities == expected_capabilities
async def test_if_fires_on_state_change(hass, calls, enable_custom_integrations):
"""Test for on and off triggers firing."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
sensor1 = platform.ENTITIES["battery"]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "bat_low",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "bat_low {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "not_bat_low",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "not_bat_low {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
]
},
)
await hass.async_block_till_done()
assert hass.states.get(sensor1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, STATE_OFF)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "not_bat_low device - {} - on - off - None".format(
sensor1.entity_id
)
hass.states.async_set(sensor1.entity_id, STATE_ON)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "bat_low device - {} - off - on - None".format(
sensor1.entity_id
)
async def test_if_fires_on_state_change_with_for(
hass, calls, enable_custom_integrations
):
"""Test for triggers firing with delay."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
sensor1 = platform.ENTITIES["battery"]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "turned_off",
"for": {"seconds": 5},
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
}
]
},
)
await hass.async_block_till_done()
assert hass.states.get(sensor1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, STATE_OFF)
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
await hass.async_block_till_done()
assert calls[0].data["some"] == "turn_off device - {} - on - off - 0:00:05".format(
sensor1.entity_id
)
| kennedyshead/home-assistant | tests/components/binary_sensor/test_device_trigger.py | Python | apache-2.0 | 8,733 |
# Copyright (C) 2013 Marco Aslak Persson
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
class ParserError(Exception):
"""An error raised when an invalid syntax has been encountered.
"""
def __init__( self , token , expected=None , eof=False ):
"""token : is the token that invoked the error
expected : is a the string message informing about what the parser expected ( default None )
eof : makes the error becomes a EOF error ( default None )
"""
self.token = token;
self.expected = expected;
self.eof = False;
def __str__( self ):
if self.eof:
return "Parsing error: reached EOF before expected!";
s = "Parsing error at %d:%d, got token %s "%( self.token.line , self.token.pos , str(self.token) );
if self.expected != None:
s += ", expected %s"%( self.expected );
return s;
class Parser(object):
"""A parser that will take a list of tokens and returns a AST
( though this is not forced, any value can be returned. )
"""
def __init__( self ):
self.tokens = [];
self.index = 0;
self.channel = 1;
self.hidden = False;
self.marks = [ 0 ];
def cur( self ):
"""Returns the current token.
"""
return self.tokens[self.index]
def lookahead( self , amount=1 , channel=None , hidden=None ):
"""Returns the next token without moving to it.
amount : the specified amount of tokens to look ahead ( defaut 1 )
channel : the specified channel to handle tokens in ( default self.channel )
hidden : should read hidden tokens ( default self.hidden )
"""
channel = channel or self.channel;
hidden = hidden or self.hidden;
if self.index+amount >= len( self.tokens ):
raise ParserError( None , None , True ); #Raise a EOF exception
tokenoffset = 0;
tokenslookedahead = 0;
while tokenslookedahead != amount:
tokenoffset += 1;
if self.index + tokenoffset >= len( self.tokens ):
raise ParserError( None , None , True ); #Raise a EOF exception
testtoken = self.tokens[ self.index + tokenoffset ];
if testtoken.channel == channel and testtoken.hidden == hidden:
tokenslookedahead += 1;
if tokenslookedahead == amount:
return self.tokens[ self.index + tokenoffset ];
def matches( self , ttype , channel=None , hidden=None ):
"""Returns if the current token matches the specified type
ttype : the type of the token to check for
channel : the specified channel to handle tokens in ( default self.channel )
hidden : should read hidden tokens ( default self.hidden )
"""
channel = channel or self.channel;
hidden = hidden or self.hidden;
c = self.cur();
return c.type == ttype and c.channel == channel and c.hidden == hidden;
def lookaheadmatches( self , ttype , amount = 1 , channel=None , hidden=None ):
"""The same as matches( lookahead( ) )
ttype : the type of the token to check for
amount : the specified amount of tokens to look ahead ( defaut 1 )
channel : the specified channel to handle tokens in ( default self.channel )
hidden : should read hidden tokens ( default self.hidden )
"""
channel = channel or self.channel;
hidden = hidden or self.hidden;
return self.lookahead( amount , channel , hidden ).type == ttype;
def hasnext( self , channel=None , hidden=None ):
"""Returns if there is another token to be read.
channel : the specified channel to handle tokens in ( default self.channel )
hidden : should read hidden tokens ( default self.hidden )
"""
channel = channel or self.channel;
hidden = hidden or self.hidden;
i = self.index+1;
if i >= len( self.tokens ):
return False;
while self.tokens[i].channel != channel or self.tokens[i].hidden != hidden:
i += 1;
if i >= len( self.tokens ):
return False;
return True;
def next( self , channel=None , hidden=None ):
"""Returns current token, and moves to the next token.
channel : the specified channel to handle tokens in ( default self.channel )
hidden : should read hidden tokens ( default self.hidden )
"""
channel = channel or self.channel;
hidden = hidden or self.hidden;
r = self.tokens[ self.index ]
i = self.index+1;
if i >= len( self.tokens ):
raise ParserError( None , None , True ); #Raise a EOF exception
while self.tokens[i].channel != channel or self.tokens[i].hidden != hidden:
i += 1;
if i >= len( self.tokens ):
raise ParserError( None , None , True ); #Raise a EOF exception
self.index = i;
return r;
# DO NOTE:
# This method does not apply the given channel and hidden parameters
# to the next() call, so set the parser variables for that instead
def nextif( self , ttype , channel=None , hidden=None ):
"""If the type of the current token matches the specified type, calls next( )
ttype : the type of the token to check for
channel : the specified channel to handle tokens in ( default self.channel )
hidden : should read hidden tokens ( default self.hidden )
"""
channel = channel or self.channel;
hidden = hidden or self.hidden;
r = False;
if self.matches( ttype , channel , hidden ):
r = self.cur();
self.next();
return r;
def expect( self , ttype , errorexp=None , channel=None , hidden=None ):
"""Returns current token if the specified type matches, and calls next( ) else raises a ParsingError
ttype : the type of the token to check for
errorexp : the value displayed in the error message ( default ttype )
channel : the specified channel to handle tokens in ( default self.channel )
hidden : should read hidden tokens ( default self.hidden )
"""
channel = channel or self.channel;
hidden = hidden or self.hidden;
errorexp = errorexp or ttype;
n = self.nextif( ttype , channel , hidden )
if not n:
raise ParserError( self.cur() , errorexp );
else:
return n;
def skiptokens( self , channel=None , hidden=None ):
"""Skips all tokens without the given settings
channel : the channel that shouldn't be skipped past
hidden : the visibility that shouldn't be skipped past
"""
channel = channel or self.channel;
hidden = hidden or self.hidden;
while True:
v = self.cur();
if v.channel == channel and v.hidden == hidden:
break;
self.index += 1;
if self.index == len( self.tokens ):
raise ParserError( None , None , True ); #Raise a EOF exception
def mark( self ):
"""Marks the current tokens index so
it can be returned to with restore()
"""
self.marks.append( ( self.index , self.channel , self.hidden ) );
def restore( self ):
"""Returns to last marked token index ( marked with mark() )
"""
t = self.marks[-1];
if len( self.marks ) != 1:
self.marks.pop();
self.index = t[0];
self.channel = t[1];
self.hidden = t[2];
def popmark( self ):
"""Pops the last mark from the stack without applying it.
"""
if len( self.marks ) != 1:
self.marks.pop();
def parse( self , tokens ):
"""Parses a list of tokens and returns a AST ( not required though, could return any value )
This method should be overridden and called from the child class.
tokens : the list of tokens to read from
"""
self.tokens = tokens;
self.index = 0;
self.marks = [ 0 ]
| nulldatamap/IceLeaf | IceLeaf/parse.py | Python | agpl-3.0 | 7,744 |
#!/usr/bin/env python
"""
Dittohead watcher daemon.
Uses watchdog to watch a given "inbox" input directory where files are landing
from the dittohead clients.
Does not do anything until a `.directory` is renamed to `directory` without a dot.
Then it makes a thread for a `dittohead_worker.py` to operate on that directory.
"""
import os
import logging
import yaml
import subprocess
import time
import sys
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
def load_yaml(filename):
if os.path.isfile(filename):
stream = open(filename, 'r')
x = yaml.load(stream)
stream.close()
return x
else:
return {}
def configure_logging(log_directory):
# From the logging cookbook: https://docs.python.org/2/howto/logging-cookbook.html#logging-cookbook
log = logging.getLogger('dittohead-watcher')
log.setLevel(logging.DEBUG)
# create file handler which logs debug messages
fh = logging.FileHandler('{0}/dittohead-watcher.log'.format(log_directory))
fh.setLevel(logging.DEBUG)
# create console handler with a less-verbose log level
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
log.addHandler(fh)
log.addHandler(ch)
return log
def operate(directory):
path = os.path.dirname(os.path.abspath(__file__))
subprocess.Popen(
["python", os.path.join(path, "dittohead_worker.py"), directory]
)
class DittoheadWatcher(FileSystemEventHandler):
def __init__(self, config, *args, **kwargs):
super(DittoheadWatcher, self).__init__(*args, **kwargs)
self.log = configure_logging(config["log_directory"])
self.config = config
def path_is_period(self, path):
return os.path.basename(path).startswith(".")
def on_created(self, event):
self.log.info("Got created: {0}".format(event))
if event.is_directory:
# If the thing that was created was a directory and it doesn't start with ., throw a warning
if not self.path_is_period(event.src_path):
self.log.warn("Directory was created without a period: {0}".format(event.src_path))
else:
self.log.info("New directory was created: {0}".format(event.src_path))
def on_moved(self, event):
if event.is_directory:
# If the thing that moved was a directory,
# and its new name does not start with a period,
# we should operate on it
if self.path_is_period(event.src_path) and not self.path_is_period(event.dest_path):
self.log.info("Got moved directory, firing worker: {0}".format(event))
operate(event.dest_path)
def main():
config = load_yaml("config.yaml")
input_dir = config["input_directory"]
# First, if there are any pending things in the directory already, operate on them!
for f in os.listdir(input_dir):
path = os.path.join(input_dir, f)
if os.path.isdir(path) and not os.path.basename(path).startswith("."):
operate(path)
# Now, watch the input directory using Watchdog
observer = Observer()
watcher = DittoheadWatcher(config)
observer.schedule(watcher, input_dir)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
if __name__ == '__main__':
main()
| dgfitch/dittohead | src/server/dittohead_watcher.py | Python | unlicense | 3,682 |
# -*- coding: utf-8 -*-
#
# libxmlquery documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 5 15:13:45 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'libxmlquery'
copyright = u'2010, Frederico Gonçalves, Vasco Fernandes'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'libxmlquerydoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'libxmlquery.tex', u'libxmlquery Documentation',
u'Frederico Gonçalves, Vasco Fernandes', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'libxmlquery', u'libxmlquery Documentation',
[u'Frederico Gonçalves, Vasco Fernandes'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'libxmlquery'
epub_author = u'Frederico Gonçalves, Vasco Fernandes'
epub_publisher = u'Frederico Gonçalves, Vasco Fernandes'
epub_copyright = u'2010, Frederico Gonçalves, Vasco Fernandes'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| nullable/libxmlquery | documentation/conf.py | Python | mit | 8,376 |
from pymongo import MongoClient
import app
import settings
def connect(collection):
client = MongoClient()
d = client[settings.MONGO_DATABASE]
return d[collection]
def bake():
from flask import g
for route in ['index']:
with (app.app.test_request_context(path="/%s.html" % route)):
view = globals()['app'].__dict__[route]
file_path = "www/%s.html" % route
html = view().encode('utf-8')
with open(file_path, "w") as writefile:
writefile.write(html)
print("Wrote %s" % file_path)
| ireapps/lightning-talks | utils.py | Python | mit | 588 |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
"""
This module has two important functions:
htmlparser.find_download_links(): this function is used to scrape webpages for download links and html tables,
allowing users to find and download items from a page.
htmlparser.compare(): this function is used to compare some data frame with a date column in it to the years in office
of the relevant president or party in office at the time.
"""
# \/ Local Imports \/
from .tablereader import get_fe
# \/ Third-Party Imports \/
from bokeh import plotting
import bokeh.palettes as palettes
from bokeh.embed import components
from flask import render_template
from urllib.parse import urlparse
from bs4 import BeautifulSoup
import pandas as pd
import os
import urllib.request
import logging
import datetime
from dateparser import parse
logging.basicConfig(filename='webservice.log', level=logging.DEBUG ) # set up the logger at level debug
def prlog(msg, log=True, prnt=False, level='ERROR'):
"""logging/printing function
:type log: bool
:param msg: msg to print/log
:param log: if the function should log the msg or not
:param prnt: if the function should print the msg or not
:param level: what level the logger records the message at
:return: None
"""
if prnt:
print(msg)
elif log:
print("logging." + level)
logging.error(msg)
def download_approved(ext, filep):
"""checks if the file extension is correct AND the file is readable
:param ext: the expected file extension of the file
:param filep: the filepath of the file
:return: True if approved, False if not
"""
approved_exts = ['.csv', '.tsv']
delim = '' # namespace scope expansion
problem = 'none' # by default, no problems
for item in approved_exts:
if ext == item:
if item is '.csv':
delim = ','
elif item is '.tsv':
delim = '\t'
else:
problem = 'some' # if problem, some problem
try:
filep.encode('utf-8').strip()
pd.read_table(filep, delim, header=0, engine='python')
except Exception as exc: # if problem, some problem
logging.error(exc)
problem = 'some'
if problem == 'none':
return True
elif problem == 'some':
return False
def previous_element(_list, index):
"""
Defines the previous element in a list
"""
return _list[index-1]
def find_download_links(url, filetype, output_name, in_number=0, download=False, clobber=True):
"""looks around webpages for download links
:param url: url to be scraped
:param filetype: filetype to look for in the scraped url
:param output_name: name of the file to be downloaded
:param in_number: the number by order of the link in the url
:param download: if the function should download a file or just return
:param clobber: if files with the same name should be clobbered
:return: a dictionary that returns an error as 'error', the outputname as 'filename'
and the list of links in the url as 'href_list'
"""
error = 'None'
p_url = urlparse(url)
domain = '{urm.scheme}://{urm.netloc}'.format(urm=p_url) # using outdated urllib1 - this line obtains domain
dl_name = output_name # set to the output name by default if no transformations are needed
link_list = []
no_tags = ''
ext_length = len(filetype)
extension = url[-ext_length:]
output_path = os.getcwd() + '/data/' + dl_name
log_message = 'file downloaded succesfully as ' + dl_name + extension + 'from ' + no_tags
if filetype == output_name[-ext_length:]: # Format the file name so user input is flexible
dl_name = output_name[:len(output_name)-ext_length] # Can include file extension or none
# First and foremost, check if the user is entering a valid data format
if extension == filetype:
link_list.append(url) # Before anything, check if the url entered IS a dl link
if download and clobber: # check if directory exists, check if file is parseable, check allowed file extensions
urllib.request.urlretrieve(url, output_path)
if not download_approved(extension, output_path):
os.remove(output_path)
else:
prlog('file downloaded successfully as ' + dl_name)
else:
r = urllib.request.urlopen(url)
soup = BeautifulSoup(r, 'html.parser')
for link in soup.find_all('a', string=True): # look through the link tags as strings
no_tags = link.get('href')
logging.info(str(no_tags))
if filetype == str(no_tags)[-ext_length:]: # Check the three letter file extension
if 'http://' not in no_tags and 'https://' not in no_tags: # If no first part of the url, add it
no_tags = domain + '/' + no_tags
link_list.append(no_tags)
if download and clobber:
if no_tags is not None: # Null check
if len(link_list) == 1:
prlog("One link found: " + no_tags)
urllib.request.urlretrieve(no_tags, output_path)
if not download_approved(extension, output_path):
os.remove(output_path)
else:
prlog(log_message)
elif len(link_list) > 1:
for idx, item in enumerate(link_list):
prlog(str(idx) + '. ' + item)
if int(in_number) <= len(link_list):
no_tags = link_list[int(in_number)]
urllib.request.urlretrieve(no_tags, output_path)
if not download_approved(extension, output_path):
os.remove(output_path)
else:
prlog(log_message)
else:
error = 'No link found with that number (' + str(in_number) + ')'
else:
error = 'No links found!'
elif filetype not in str(no_tags):
error = 'No file found for that extension (' + filetype + ')'
prlog(error)
return {'error': error, 'download_name': dl_name, 'href_list': link_list}
def file_to_htmltable(filepath, delim=','):
"""read a file and generate an html table
:return: html table script as 'htmltable' variable
"""
dataframe = pd.read_table(filepath, delim, header=0, engine='python')
htmltable = dataframe.to_html(bold_rows=True, escape=True)
return htmltable
def compare(df1, col, title, x_lb, y_lb,
fedf=get_fe(),
year_col='Year',
html='plotlocal.html',
render=True,
urlth='patella'
):
"""compares a dataframe to the changes in political parties
:param df1: the first data frame to be compared.
:param fedf: the second dataframe to be compared, always fe_list.
:param col: the data column of the first data frame which holds the dates needed to be compared.
:param title: the title of the plot to be exported.
:param x_lb: the title of the x axis for the exported plot.
:param y_lb: the title of the y axis for the exported plot.
:param year_col: the titel of column of the data frame input to set as the index
:param html: the html page to be rendered
:param render: if the function should return js components for a bokeh plot or just the input/resulting dataframes
:param urlth: the path the url takes to render
:return: a render_template object which sends a bokeh js/html/css plot to the page as well as a special div.
"""
data_col = int(col)-1 # subtract 1 from it so the column counting starts from one in the input
df1.set_index(year_col, drop=True, inplace=True) # if year_col and type(year_col) is "string" else prlog('year col not string')
ind_list = [parse(str(int(x))).year for x in df1.index.get_level_values(0).values.tolist()]
office_yr_list = []
party_office_list = []
yrs_in_office = 1
party_yrs_in_office = 1
term_st_list = fedf.index.get_level_values(0)
totalfe_list = []
totalparty_list = []
# populate a list with tuples of the year and the index where the year changes by looping through the index of the
# data frame, which is the year column. This spits out a tuple that looks like (years, number of data points)
# afterwards , the lists interval and years_list are used to store the parts of the tuple individually
year_and_interval_list = [(i, previous_element(ind_list, i)) for i, year in
enumerate(ind_list) if i >= 1 and previous_element(ind_list, i) != ind_list[i]]
interval = [tpl[0] for tpl in year_and_interval_list]
years_list = [tpl[1] for tpl in year_and_interval_list]
total_chg = [(df1.iloc[0, data_col]-df1.iloc[interval[i], data_col]) if i == 0
else (df1.iloc[previous_element(interval, i), data_col] - df1.iloc[interval[i], data_col]) if interval[i] < len(ind_list)
else (df1.iloc[previous_element(interval, i), data_col] - df1.iloc[(len(ind_list) - previous_element(interval, i)), data_col])
for i, item in enumerate(years_list)]
# A list of variation from year to year, created by dividing the list at index by the previous element,
# then multiplying by 100 to get a percentage value.
cov_list = [total_chg[i]/df1.iloc[interval[i], data_col]*100 if previous_element(total_chg, i) != 0
else total_chg[i] for i, n in enumerate(years_list)]
# Create lists of all the parties and years over all the us years by finding the distance from from the first
# presidential term served to the last and appending incremented years in that range to the totalfe totalparty lists
for idx, item in enumerate(term_st_list):
if idx != len(term_st_list)-1:
dist = int(term_st_list[idx+1]) - int(item)
else:
dist = datetime.datetime.now().year - int(item) + 1
for n in range(dist):
totalfe_list.append(fedf.iloc[idx, 0])
totalparty_list.append(fedf.iloc[idx, 1])
# A list of all years there have been presidents
us_years = [yr+int(term_st_list[0]) for yr in range(int(term_st_list[-1]) - int(term_st_list[0]))]
# A list of all the parties over every year of the data set
party_list = [totalparty_list[i] for i, item in enumerate(us_years) for years in years_list if years == item]
# A list of all the fes over every year of the data set
fe_list = [totalfe_list[i] for i, item in enumerate(us_years) for years in years_list if years == item]
# Make the 'years in office' and 'party in office' lists by finding the number of consecutive
# a party/fe was in office
# list comprehension possible?
for idx, item in enumerate(fe_list):
if idx >= 1 and fe_list[idx - 1] == fe_list[idx]: # After at least one loop (to avoid oob error) and if the index doesnt change
yrs_in_office += 1 # increment 1
else:
yrs_in_office = 1 # reset to year 1
office_yr_list.append(yrs_in_office) # append the year number
for idx, item in enumerate(party_list):
if idx >= 1 and party_list[idx - 1] == party_list[idx]: # After at least one loop (to avoid oob error) and if the index doesnt change
party_yrs_in_office += 1 # increment 1
else:
party_yrs_in_office = 1 # reset to year 1
party_office_list.append(party_yrs_in_office) # append the year number
# Create the dataframe 'plotframe' that will be used to create the bokeh graph
plotframe = pd.DataFrame({'foo': []})
plotframe['Total Change'] = total_chg
plotframe['Percent Change'] = cov_list
plotframe['Years'] = years_list
plotframe['Party'] = party_list
plotframe['First Executive'] = fe_list
plotframe['Years in Office'] = office_yr_list
plotframe['Years Party in Office'] = party_office_list
plotframe.set_index('foo', drop=True, inplace=True) # remove stock column in a roundabout way
plotframe.set_index('Years', drop=True, inplace=True) # remove the year column and set as index
grouped = plotframe.groupby('First Executive')
# Bokeh Plot
p = plotting.figure(title=title,
x_axis_label=x_lb,
y_axis_label='Percent Change in ' + y_lb,
tools="pan,box_zoom,reset,save",
toolbar_location='below',
toolbar_sticky=False,
plot_height= 800,
plot_width= 800
)
# use the plasma palette from bokeh using the number of lines in the figure to create a palette from plasma256
palette = [palettes.viridis(len(grouped))[i] for i, _ in enumerate(grouped)]
for idx, (name, data) in enumerate(grouped):
p.line(x=data['Years in Office'],
y=data['Percent Change'],
color=palette[idx],
legend=name,
)
p.background_fill_color = "LightGrey"
script, div = components(p) # split the graph into JSON/JS components script and div
# convert the datframe to html for easy display
df_htmltable = plotframe.to_html(bold_rows=True, escape=True, classes='dftable')
if render:
# If rendering, display the web page
plotting.show(p)
return render_template(html, script=script, div=div, table=df_htmltable, var=urlth)
else:
# otherwise, return the result dataframe and the input dataframe
return {'plotframe': plotframe, 'input_frame': df1}
| salbrandi/patella | patella/htmlparser.py | Python | mit | 13,807 |
# Copyright cf-units contributors
#
# This file is part of cf-units and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Unit tests for the :mod:`cf_units` package."""
| SciTools/cf_units | cf_units/tests/unit/__init__.py | Python | lgpl-3.0 | 251 |
import argparse
import uuid
import os
import sys
import hypergan as hg
import hyperchamber as hc
from hypergan.inputs import *
from hypergan.search.random_search import RandomSearch
from hypergan.discriminators.base_discriminator import BaseDiscriminator
from hypergan.generators.base_generator import BaseGenerator
from hypergan.gans.base_gan import BaseGAN
from hypergan.layer_shape import LayerShape
from common import *
from torch import optim
from torch.autograd import Variable
from torchvision import datasets, transforms
arg_parser = ArgumentParser(description='Train an classifier G(x) = label')
arg_parser.parser.add_argument('--dataset', '-D', type=str, default='mnist', help='dataset to use - options are mnist / cifar10')
arg_parser.parser.add_argument('--augment', '-A', type=bool, default=True, help='flip crop and normalize input')
args = arg_parser.parse_args()
config_name = args.config
save_file = "saves/"+config_name+"/model.ckpt"
os.makedirs(os.path.expanduser(os.path.dirname(save_file)), exist_ok=True)
class InputLoader:
def __init__(self, batch_size):
kwargs = {'num_workers': 0, 'pin_memory': True}
dataset_folder = args.dataset
dataset = datasets.MNIST
if args.dataset == 'cifar10':
dataset = datasets.CIFAR10
transform_train = transforms.Compose([
#transforms.RandomCrop(32, padding=4),
#transforms.RandomHorizontalFlip(),
transforms.ToTensor()#,
#transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
transform_test = transforms.Compose([
transforms.ToTensor()#,
#transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
if args.augment:
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
train_loader = torch.utils.data.DataLoader(
dataset(dataset_folder, train=True, download=True,
transform=transform_train),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
dataset(dataset_folder, train=False, transform=transform_test),
batch_size=args.batch_size, shuffle=False, **kwargs)
self.train_loader = train_loader
self.test_loader = test_loader
self.train_dataset = iter(self.train_loader)
self.test_dataset = iter(self.test_loader)
self.eye = torch.eye(10).cuda()
def batch_size(self):
return args.batch_size
def width(self):
return 28
def height(self):
return 28
def channels(self):
return 1
def next(self, index=0):
try:
self.sample = self.train_dataset.next()
self.sample = [self.sample[0].cuda(), self.eye[self.sample[1].cuda()]]
return self.sample
except StopIteration:
self.train_dataset = iter(self.train_loader)
return self.next(index)
def testdata(self, index=0):
self.test_dataset = iter(self.test_loader)
while True:
try:
self.sample = self.test_dataset.next()
self.sample = [self.sample[0].cuda(), self.eye[self.sample[1].cuda()]]
yield self.sample
except StopIteration:
return
class GAN(BaseGAN):
def __init__(self, *args, **kwargs):
self.discriminator = None
self.generator = None
self.loss = None
BaseGAN.__init__(self, *args, **kwargs)
self.x, self.y = self.inputs.next()
def create(self):
self.latent = self.create_component("latent")
self.generator = self.create_component("generator", input=self.inputs.next()[0])
if self.config.generator2:
self.generator2 = self.create_component("generator2", input=self.inputs.next()[1])
self.discriminator = self.create_component("discriminator", context_shapes={"digit": LayerShape(10)})
self.loss = self.create_component("loss")
def forward_discriminator(self, inputs):
return self.discriminator(inputs[0], {"digit": inputs[1]})
def forward_pass(self):
self.latent.next()
self.x, self.y = self.inputs.next()
g = self.generator(self.x)
if self.config.generator2:
g2 = self.generator2(self.y)
gy = self.generator(g2)
self.gy = gy
self.g2 = g2
self.g = g
d_real = self.forward_discriminator([self.x, self.y])
d_fake = self.forward_discriminator([self.x, g])
correct = torch.floor((torch.round(g) == self.y).long().sum(axis=1)/10.0).view(-1,1)
if self.config.generator2:
d_fake += correct * self.forward_discriminator([g2, gy])
self.d_fake = d_fake
self.d_real = d_real
self.adversarial_norm_fake_targets = [
[self.x, self.g]
]
if self.config.generator2:
self.adversarial_norm_fake_targets += [
[g2, self.gy]
]
return d_real, d_fake
def discriminator_fake_inputs(self):
return self.adversarial_norm_fake_targets
def discriminator_real_inputs(self):
if hasattr(self, 'x'):
return [self.x, self.y]
else:
return self.inputs.next()
def generator_components(self):
if self.config.generator2:
return [self.generator2, self.generator]
return [self.generator]
def discriminator_components(self):
return [self.discriminator]
class Generator(BaseGenerator):
def create(self):
self.linear = torch.nn.Linear(28*28*1, 1024)
self.relu = torch.nn.ReLU()
self.linear2 = torch.nn.Linear(1024, 10)
self.sigmoid = torch.nn.Sigmoid()
def forward(self, input, context={}):
net = input
net = self.linear(net.reshape(self.gan.batch_size(), -1))
net = self.relu(net)
net = self.linear2(net)
net = self.sigmoid(net)
return net
class Discriminator(BaseDiscriminator):
def create(self):
self.linear = torch.nn.Linear(28*28*1+10, 1024)
self.linear2 = torch.nn.Linear(1024, 1)
self.tanh = torch.nn.Hardtanh()
self.relu = torch.nn.ReLU()
def forward(self, input, context={}):
net = torch.cat([input.reshape(self.gan.batch_size(), -1),context['digit']], 1)
net = self.linear(net)
net = self.relu(net)
net = self.linear2(net)
net = self.tanh(net)
return net
config = lookup_config(args)
if args.action == 'search':
search = RandomSearch({
'generator': {'class': Generator, 'end_features': 10},
'discriminator': {'class': Discriminator}
})
config = search.random_config()
inputs = InputLoader(args.batch_size)
def setup_gan(config, inputs, args):
gan = GAN(config, inputs=inputs)
return gan
def train(config, args):
gan = setup_gan(config, inputs, args)
trainable_gan = hg.TrainableGAN(gan, save_file = save_file, devices = args.devices, backend_name = args.backend)
test_batches = []
accuracy = 0
for i in range(args.steps):
trainable_gan.step()
if i == args.steps-1 or i % args.sample_every == 0 and i > 0:
correct_prediction = 0
total = 0
for (x,y) in gan.inputs.testdata():
prediction = gan.generator(x)
correct_prediction += (torch.argmax(prediction,1) == torch.argmax(y,1)).sum()
total += y.shape[0]
accuracy = (float(correct_prediction) / total)*100
print(config_name)
print("accuracy: ", accuracy)
return accuracy
def search(config, args):
metrics = train(config, args)
config_filename = "classification-"+str(uuid.uuid4())+'.json'
hc.Selector().save(config_filename, config)
with open(args.search_output, "a") as myfile:
print("Writing result")
myfile.write(config_filename+","+",".join([str(x) for x in metrics])+"\n")
if args.action == 'train':
metrics = train(config, args)
print(config_name + ": resulting metrics:", metrics)
elif args.action == 'search':
search(config, args)
else:
print("Unknown action: "+args.action)
| 255BITS/HyperGAN | examples/classification.py | Python | mit | 8,744 |
"""
Extends Yapsy IPlugin interface to pass information about the board to plugins.
Fields of interest for plugins:
args: list of arguments passed to the plugins
sample_rate: actual sample rate of the board
eeg_channels: number of EEG
aux_channels: number of AUX channels
If needed, plugins that need to report an error can set self.is_activated to False during activate() call.
NB: because of how yapsy discovery system works, plugins must use the following syntax to inherit to use polymorphism (see http://yapsy.sourceforge.net/Advices.html):
import plugin_interface as plugintypes
class PluginExample(plugintypes.IPluginExtended):
...
"""
from yapsy.IPlugin import IPlugin
class IPluginExtended(IPlugin):
# args: passed by command line
def pre_activate(self, args, sample_rate=250, eeg_channels=8, aux_channels=3, imp_channels=0):
self.args = args
self.sample_rate = sample_rate
self.eeg_channels = eeg_channels
self.aux_channels = aux_channels
self.imp_channels = imp_channels
# by default we say that activation was okay -- inherited from IPlugin
self.is_activated = True
self.activate()
# tell outside world if init went good or bad
return self.is_activated
# inherited from IPlugin
def activate(self):
print "Plugin %s activated." % (self.__class__.__name__)
# inherited from IPlugin
def deactivate(self):
print "Plugin %s deactivated." % (self.__class__.__name__)
# plugins that require arguments should implement this method
def show_help(self):
print "I, %s, do not need any parameter." % (self.__class__.__name__)
| jfrey-xx/OpenBCI_Python | plugin_interface.py | Python | mit | 1,639 |
#!/usr/bin/env python
# Test whether a client subscribed to a topic receives its own message sent to that topic.
import subprocess
import socket
import time
import inspect, os, sys
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import mosq_test
rc = 1
mid = 530
keepalive = 60
connect_packet = mosq_test.gen_connect("subpub-qos2-test", keepalive=keepalive)
connack_packet = mosq_test.gen_connack(rc=0)
subscribe_packet = mosq_test.gen_subscribe(mid, "subpub/qos2", 2)
suback_packet = mosq_test.gen_suback(mid, 2)
mid = 301
publish_packet = mosq_test.gen_publish("subpub/qos2", qos=2, mid=mid, payload="message")
pubrec_packet = mosq_test.gen_pubrec(mid)
pubrel_packet = mosq_test.gen_pubrel(mid)
pubcomp_packet = mosq_test.gen_pubcomp(mid)
mid = 1
publish_packet2 = mosq_test.gen_publish("subpub/qos2", qos=2, mid=mid, payload="message")
pubrec_packet2 = mosq_test.gen_pubrec(mid)
pubrel_packet2 = mosq_test.gen_pubrel(mid)
pubcomp_packet2 = mosq_test.gen_pubcomp(mid)
cmd = ['../../src/mosquitto', '-p', '1888']
broker = mosq_test.start_broker(filename=os.path.basename(__file__), cmd=cmd)
try:
sock = mosq_test.do_client_connect(connect_packet, connack_packet, timeout=20)
sock.send(subscribe_packet)
if mosq_test.expect_packet(sock, "suback", suback_packet):
sock.send(publish_packet)
if mosq_test.expect_packet(sock, "pubrec", pubrec_packet):
sock.send(pubrel_packet)
if mosq_test.expect_packet(sock, "pubcomp", pubcomp_packet):
if mosq_test.expect_packet(sock, "publish2", publish_packet2):
sock.send(pubrec_packet2)
if mosq_test.expect_packet(sock, "pubrel2", pubrel_packet2):
# Broker side of flow complete so can quit here.
rc = 0
sock.close()
finally:
broker.terminate()
broker.wait()
if rc:
(stdo, stde) = broker.communicate()
print(stde)
exit(rc)
| telefonicaid/fiware-IoTAgent-Cplusplus | third_party/mosquitto-1.4.4/test/broker/02-subpub-qos2.py | Python | agpl-3.0 | 2,204 |
from setuptools import setup
setup(name='GithubEmailHook',
version='1.0',
description='Github Email Hook',
author='David Shea',
author_email='[email protected]',
url='http://github.com/dashea/github-email-hook',
packages=['github_email_hook'],
install_requires=open("requirements.txt").readlines()
)
| rhinstaller/github-email-hook | setup.py | Python | gpl-2.0 | 347 |
import os
import unittest
import tempfile
import sqlite3
from subsetter import Db
class DummyArgs(object):
logarithmic = False
fraction = 0.25
force_rows = {}
children = 25
config = {}
exclude_tables = []
full_tables = []
buffer = 1000
dummy_args = DummyArgs()
class OverallTest(unittest.TestCase):
def setUp(self):
schema = ["CREATE TABLE state (abbrev, name)",
"CREATE TABLE zeppos (name, home_city)",
"""CREATE TABLE city (name, state_abbrev,
FOREIGN KEY (state_abbrev)
REFERENCES state(abbrev))""",
"""CREATE TABLE landmark (name, city,
FOREIGN KEY (city)
REFERENCES city(name))""",
"""CREATE TABLE zeppelins (name, home_city,
FOREIGN KEY (home_city)
REFERENCES city(name))""", # NULL FKs
"""CREATE TABLE languages_better_than_python (name)""", # empty table
]
self.source_db_filename = tempfile.mktemp()
self.source_db = sqlite3.connect(self.source_db_filename)
self.source_sqla = "sqlite:///%s" % self.source_db_filename
self.dest_db_filename = tempfile.mktemp()
self.dest_db = sqlite3.connect(self.dest_db_filename)
self.dest_sqla = "sqlite:///%s" % self.dest_db_filename
for statement in schema:
self.source_db.execute(statement)
self.dest_db.execute(statement)
for params in (('MN', 'Minnesota'), ('OH', 'Ohio'),
('MA', 'Massachussetts'), ('MI', 'Michigan')):
self.source_db.execute("INSERT INTO state VALUES (?, ?)", params)
for params in (('Duluth', 'MN'), ('Dayton', 'OH'),
('Boston', 'MA'), ('Houghton', 'MI')):
self.source_db.execute("INSERT INTO city VALUES (?, ?)", params)
for params in (('Lift Bridge', 'Duluth'), ("Mendelson's", 'Dayton'),
('Trinity Church', 'Boston'), ('Michigan Tech', 'Houghton')):
self.source_db.execute("INSERT INTO landmark VALUES (?, ?)", params)
for params in (('Graf Zeppelin', None), ('USS Los Angeles', None),
('Nordstern', None), ('Bodensee', None)):
self.source_db.execute("INSERT INTO zeppelins VALUES (?, ?)", params)
for params in (('Zeppo Marx', 'New York City'), ):
self.source_db.execute("INSERT INTO zeppos VALUES (?, ?)", params)
self.source_db.commit()
self.dest_db.commit()
def tearDown(self):
self.source_db.close()
os.unlink(self.source_db_filename)
self.dest_db.close()
os.unlink(self.dest_db_filename)
def test_parents_kept(self):
src = Db(self.source_sqla, dummy_args)
dest = Db(self.dest_sqla, dummy_args)
src.assign_target(dest)
src.create_subset_in(dest)
cities = self.dest_db.execute("SELECT * FROM city").fetchall()
self.assertEqual(len(cities), 1)
joined = self.dest_db.execute("""SELECT c.name, s.name
FROM city c JOIN state s
ON (c.state_abbrev = s.abbrev)""")
joined = joined.fetchall()
self.assertEqual(len(joined), 1)
def test_null_foreign_keys(self):
src = Db(self.source_sqla, dummy_args)
dest = Db(self.dest_sqla, dummy_args)
src.assign_target(dest)
src.create_subset_in(dest)
zeppelins = self.dest_db.execute("SELECT * FROM zeppelins").fetchall()
self.assertEqual(len(zeppelins), 1)
def test_exclude_tables(self):
args_with_exclude = DummyArgs()
args_with_exclude.exclude_tables = ['zeppelins',]
src = Db(self.source_sqla, args_with_exclude)
dest = Db(self.dest_sqla, args_with_exclude)
src.assign_target(dest)
src.create_subset_in(dest)
zeppelins = self.dest_db.execute("SELECT * FROM zeppelins").fetchall()
self.assertEqual(len(zeppelins), 0)
def test_full_tables(self):
args_with_full = DummyArgs()
args_with_full.full_tables = ['city',]
src = Db(self.source_sqla, args_with_full)
dest = Db(self.dest_sqla, args_with_full)
src.assign_target(dest)
src.create_subset_in(dest)
cities = self.dest_db.execute("SELECT * FROM city").fetchall()
self.assertEqual(len(cities), 4)
def test_exclude_tables_wildcard(self):
args_with_exclude = DummyArgs()
args_with_exclude.exclude_tables = ['zep*',]
src = Db(self.source_sqla, args_with_exclude)
dest = Db(self.dest_sqla, args_with_exclude)
src.assign_target(dest)
src.create_subset_in(dest)
zeppelins = self.dest_db.execute("SELECT * FROM zeppelins").fetchall()
self.assertEqual(len(zeppelins), 0)
zeppos = self.dest_db.execute("SELECT * FROM zeppos").fetchall()
self.assertEqual(len(zeppos), 0)
| Shoptap/rdbms-subsetter | test_subsetter.py | Python | cc0-1.0 | 5,202 |
from qtpy import QtGui, QtCore, QtWidgets
from qtpy.QtCore import QPoint
class StandardItemModelIterator(object):
def __init__(self, model):
self.model = model
self.pos = 0
def __next__(self):
if self.pos < self.model.rowCount():
item = self.model.item(self.pos)
self.pos += 1
return item
else:
raise StopIteration
next = __next__
class SequenceStandardItemModel(QtGui.QStandardItemModel):
"""
an iterable and indexable StandardItemModel
"""
def __iter__(self):
return StandardItemModelIterator(self)
def __getitem__(self, key):
if isinstance(key, slice):
start, stop, step = key.start, key.stop, key.step
if start is None:
start = 0
if stop is None:
stop = self.rowCount()
if step is None:
step = 1
return [self.item(i) for i in range(start, stop, step)]
else:
if key >= self.rowCount():
raise IndexError("index %d is out of range" % key)
return self.item(key)
def __len__(self):
return self.rowCount()
class StandardItem(QtGui.QStandardItem):
def __init__(self, value):
super(StandardItem, self).__init__(value)
def get_checked(self):
return self.checkState() == QtCore.Qt.Checked
def set_checked(self, value):
if isinstance(value, bool):
qtvalue = (QtCore.Qt.Unchecked, QtCore.Qt.Checked)[value]
else:
qtvalue = QtCore.Qt.PartiallyChecked
self.setCheckState(qtvalue)
checked = property(get_checked, set_checked)
class FilterMenu(QtWidgets.QMenu):
activate = QtCore.Signal(int)
checkedItemsChanged = QtCore.Signal(list)
def __init__(self, parent=None):
super(QtWidgets.QMenu, self).__init__(parent)
self._list_view = QtWidgets.QListView(parent)
self._list_view.setFrameStyle(0)
model = SequenceStandardItemModel()
self._list_view.setModel(model)
self._model = model
self.addItem("(select all)")
model[0].setTristate(True)
action = QtWidgets.QWidgetAction(self)
action.setDefaultWidget(self._list_view)
self.addAction(action)
self.installEventFilter(self)
self._list_view.installEventFilter(self)
self._list_view.window().installEventFilter(self)
model.itemChanged.connect(self.on_model_item_changed)
self._list_view.pressed.connect(self.on_list_view_pressed)
self.activate.connect(self.on_activate)
def on_list_view_pressed(self, index):
item = self._model.itemFromIndex(index)
# item is None when the button has not been used yet (and this is
# triggered via enter)
if item is not None:
item.checked = not item.checked
def on_activate(self, row):
target_item = self._model[row]
for item in self._model[1:]:
item.checked = item is target_item
def on_model_item_changed(self, item):
model = self._model
model.blockSignals(True)
if item.index().row() == 0:
# (un)check first => (un)check others
for other in model[1:]:
other.checked = item.checked
items_checked = [item for item in model[1:] if item.checked]
num_checked = len(items_checked)
if num_checked == 0 or num_checked == len(model) - 1:
model[0].checked = bool(num_checked)
elif num_checked == 1:
model[0].checked = 'partial'
else:
model[0].checked = 'partial'
model.blockSignals(False)
checked_indices = [i for i, item in enumerate(model[1:]) if item.checked]
self.checkedItemsChanged.emit(checked_indices)
def select_offset(self, offset):
"""offset: 1 for next, -1 for previous"""
model = self._model
model.blockSignals(True)
indices_checked = [i for i, item in enumerate(model) if item.checked]
first_checked = indices_checked[0]
# check first_checked + offset, uncheck the rest
to_check = first_checked + offset
# wrap around
to_check = to_check if to_check < len(model) else 1
to_check = to_check if to_check > 0 else len(model) - 1
is_checked = ["partial"] + [i == to_check for i in range(1, len(model))]
for checked, item in zip(is_checked, model):
item.checked = checked
model.blockSignals(False)
self.checkedItemsChanged.emit([to_check - 1])
def addItem(self, text):
item = StandardItem(text)
# not editable
item.setFlags(QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled)
item.checked = True
self._model.appendRow(item)
def addItems(self, items):
for item in items:
self.addItem(item)
def eventFilter(self, obj, event):
event_type = event.type()
if event_type == QtCore.QEvent.KeyRelease:
key = event.key()
# tab key closes the popup
if obj == self._list_view.window() and key == QtCore.Qt.Key_Tab:
self.hide()
# return key activates *one* item and closes the popup
# first time the key is sent to the menu, afterwards to
# list_view
elif (obj == self._list_view and
key in (QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return)):
self.activate.emit(self._list_view.currentIndex().row())
self.hide()
return True
return False
class FilterComboBox(QtWidgets.QToolButton):
checkedItemsChanged = QtCore.Signal(list)
def __init__(self, parent=None):
super(FilterComboBox, self).__init__(parent)
self.setText("(no filter)")
# QtGui.QToolButton.InstantPopup would be slightly less work (the
# whole button works by default, instead of only the arrow) but it is
# uglier
self.setPopupMode(QtWidgets.QToolButton.MenuButtonPopup)
menu = FilterMenu(self)
self.setMenu(menu)
self._menu = menu
menu.checkedItemsChanged.connect(self.on_checked_items_changed)
self.installEventFilter(self)
def on_checked_items_changed(self, indices_checked):
num_checked = len(indices_checked)
model = self._menu._model
if num_checked == 0 or num_checked == len(model) - 1:
self.setText("(no filter)")
elif num_checked == 1:
self.setText(model[indices_checked[0] + 1].text())
else:
self.setText("multi")
self.checkedItemsChanged.emit(indices_checked)
def addItem(self, text):
self._menu.addItem(text)
def addItems(self, items):
self._menu.addItems(items)
def eventFilter(self, obj, event):
event_type = event.type()
# this is not enabled because it causes all kind of troubles
# if event_type == QtCore.QEvent.KeyPress:
# key = event.key()
#
# # allow opening the popup via enter/return
# if (obj == self and
# key in (QtCore.Qt.Key_Return, QtCore.Qt.Key_Enter)):
# self.showMenu()
# return True
if event_type == QtCore.QEvent.KeyRelease:
key = event.key()
# allow opening the popup with up/down
if (obj == self and
key in (QtCore.Qt.Key_Up, QtCore.Qt.Key_Down,
QtCore.Qt.Key_Space)):
self.showMenu()
return True
# return key activates *one* item and closes the popup
# first time the key is sent to self, afterwards to list_view
elif (obj == self and
key in (QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return)):
self._menu.activate.emit(self._list_view.currentIndex().row())
self._menu.hide()
return True
if event_type == QtCore.QEvent.MouseButtonRelease:
# clicking anywhere (not just arrow) on the button shows the popup
if obj == self:
self.showMenu()
return False
def wheelEvent(self, event):
delta = event.angleDelta()
assert isinstance(delta, QPoint)
offset = 1 if delta.y() < 0 else -1
self._menu.select_offset(offset)
if __name__ == '__main__':
import sys
class TestDialog(QtWidgets.QDialog):
def __init__(self):
super(QtWidgets.QDialog, self).__init__()
layout = QtWidgets.QVBoxLayout()
self.setLayout(layout)
combo = FilterComboBox(self)
for i in range(20):
combo.addItem('Item %s' % i)
layout.addWidget(combo)
app = QtWidgets.QApplication(sys.argv)
dialog = TestDialog()
dialog.resize(200, 200)
dialog.show()
sys.exit(app.exec_())
| larray-project/larray-editor | larray_editor/combo.py | Python | gpl-3.0 | 9,067 |
"""
ThisIsMyJam OAuth1 backend, docs at:
http://psa.matiasaguirre.net/docs/backends/thisismyjam.html
"""
from social.backends.oauth import BaseOAuth1
class ThisIsMyJamOAuth1(BaseOAuth1):
"""ThisIsMyJam OAuth1 authentication backend"""
name = 'thisismyjam'
REQUEST_TOKEN_URL = 'http://www.thisismyjam.com/oauth/request_token'
AUTHORIZATION_URL = 'http://www.thisismyjam.com/oauth/authorize'
ACCESS_TOKEN_URL = 'http://www.thisismyjam.com/oauth/access_token'
REDIRECT_URI_PARAMETER_NAME = 'oauth_callback'
def get_user_details(self, response):
"""Return user details from ThisIsMyJam account"""
return {
'username': response.get('person').get('name'),
'fullname': response.get('person').get('fullname'),
'email': '',
'first_name': '',
'last_name': ''
}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
return self.get_json('http://api.thisismyjam.com/1/verify.json',
auth=self.oauth_auth(access_token))
| HackerEcology/SuggestU | suggestu/social/backends/thisismyjam.py | Python | gpl-3.0 | 1,108 |
from django.shortcuts import render
# Create your views here.
def home(request):
return render(request, 'index.html', locals()) | SnoozeTime/pythonanywhere | myapi/views.py | Python | apache-2.0 | 129 |
Subsets and Splits