repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
glemaitre/protoclass | protoclass/tool/dicom_manip.py | 1 | 14985 | #title :dicom_manip.py
#description :This will create a header for a python script.
#author :Guillaume Lemaitre
#date :2015/04/20
#version :0.1
#notes :
#python_version :2.7.6
#==============================================================================
# Import the needed libraries
# Numpy library
import numpy as np
# SimpleITK library
import SimpleITK as sitk
# Joblib library
### Module to performed parallel processing
from joblib import Parallel, delayed
### Module to performed parallel processing
import multiprocessing
# OS library
import os
from os.path import join, isdir, isfile
# Import namedtuple
from collections import namedtuple
def OpenRawImageOCT(filename, size, dtype='uint8', reverse=True):
"""Function to read a raw image. The size as to be known
Parameters
----------
filename: str
Filename of the raw image.
size: tuple of ints (X, Y, Z)
Tuple with 2 or 3 values depending of the dimensionality of the data.
dtype: default - uint8
Type of the raw data.
reverse: bool
We have maybe to return the data for more convenience.
Returns
-------
im_numpy: ndarray
A 2D or 3D numpy array in the order ()
"""
from skimage import img_as_float
size_OCT = (size[1], size[2], size[0])
# Data are stored as (Y, Z, X)
im_numpy = np.fromfile(filename, dtype=dtype, sep="").reshape(size_OCT)
# We need to roll the x axis to obtain (X, Y, Z)
im_numpy = np.rollaxis(im_numpy, 2, 0)
# Return the data if needed
im_numpy_cp = im_numpy.copy()
if reverse == True:
for sl in range(im_numpy.shape[2]):
im_numpy[:,:,-sl] = im_numpy_cp[:,:,sl]
return img_as_float(im_numpy)
def OpenOneSerieDCM(path_to_serie, reverse=False):
"""Function to read a single serie DCM to return a 3D volume
Parameters
----------
path_to_serie: str
The path to the folder containing all the dicom images.
reverse: bool
Since that there is a mistake in the data we need to flip in z the gt.
Have to be corrected in the future.
Returns
-------
im_numpy: ndarray
A 3D array containing the volume extracted from the DCM serie.
"""
# Define the object in order to read the DCM serie
reader = sitk.ImageSeriesReader()
# Get the DCM filenames of the serie
dicom_names = reader.GetGDCMSeriesFileNames(path_to_serie)
# Set the filenames to read
reader.SetFileNames(dicom_names)
# Build the volume from the set of 2D images
im = reader.Execute()
# Convert the image into a numpy matrix
im_numpy = sitk.GetArrayFromImage(im)
# The Matlab convention is (Y, X, Z)
# The Numpy convention is (Z, Y, X)
# We have to swap these axis
### Swap Z and X
im_numpy = np.swapaxes(im_numpy, 0, 2)
im_numpy = np.swapaxes(im_numpy, 0, 1)
im_numpy_cp = im_numpy.copy()
if reverse == True:
#print 'Inversing the GT'
for sl in range(im_numpy.shape[2]):
im_numpy[:,:,-sl] = im_numpy_cp[:,:,sl]
return im_numpy.astype(float)
def OpenVolumeNumpy(filename, reverse_volume=False, **kwargs):
"""Function to read a numpy array previously saved
Parameters
----------
filename: str
Filename of the numpy array *.npy.
reverse_volume: bool
Since that there is a mistake in the data we need to flip in z the gt.
Have to be corrected in the future.
Returns
-------
im_numpy: ndarray
A 3D array containing the volume.
"""
if filename.endswith('.npy'):
# Open the volume
im_numpy = np.load(filename)
# Copy the volume temporary
im_numpy_cp = im_numpy.copy()
if reverse_volume == True:
#print 'Inversing the GT'
for sl in range(im_numpy.shape[2]):
im_numpy[:,:,-sl] = im_numpy_cp[:,:,sl]
return im_numpy
elif filename.endswith('.npz'):
# Get the keyword of the name of the variable to extract
name_var_extract = kwargs.pop('name_var_extract', None)
# Get the volume from file
npzfile = np.load(filename)
im_numpy = npzfile[name_var_extract]
# Copy the volume temporary
im_numpy_cp = im_numpy.copy()
if reverse_volume == True:
#print 'Inversing the GT'
for sl in range(im_numpy.shape[2]):
im_numpy[:,:,-sl] = im_numpy_cp[:,:,sl]
return im_numpy
def OpenSerieUsingGTDCM(path_to_data, path_to_gt, reverse_gt=True, reverse_data=False):
"""Function to read a DCM volume and apply a GT mask
Parameters
----------
path_to_data: str
Path containing the modality data.
path_to_gt: str
Path containing the gt.
reverse_gt: bool
Since that there is a mistake in the data we need to flip in z the gt.
Have to be corrected in the future.
Returns
-------
volume_data: ndarray
A 3D array containing the volume extracted from the DCM serie.
The data not corresponding to the GT of interest will be tagged NaN.
"""
# Open the data volume
volume_data = OpenOneSerieDCM(path_to_data, reverse_data)
# Open the gt volume
tmp_volume_gt = OpenOneSerieDCM(path_to_gt)
volume_gt = tmp_volume_gt.copy()
if reverse_gt == True:
#print 'Inversing the GT'
for sl in range(volume_gt.shape[2]):
volume_gt[:,:,-sl] = tmp_volume_gt[:,:,sl]
# Affect all the value which are 0 in the gt to NaN
volume_data[(volume_gt == 0).nonzero()] = np.NaN
# Return the volume read
return volume_data
def OpenDataLabel(path_to_data):
"""Function to read data and label form an *.npz file
Parameters
----------
path_to_serie: str
The path to the *.npz file.
Returns
-------
data: ndarray
A list of 2D matrix containing the data.
label: ndarray
A list of 1D vector containing the label associated to the data matrix.
"""
if not (isfile(path_to_data) and path_to_data.endswith('.npz')):
# Check that the path is in fact a file and npz format
raise ValueError('protoclass.tool.OpenDataLabel: An *.npz file is expected.')
else:
# The file can be considered
npzfile = np.load(path_to_data)
# return the desired variable
return (npzfile['data'], npzfile['label'])
def GetGTSamples(path_to_gt, reverse_gt=True, pos_value=255.):
"""Function to return the samples corresponding to the ground-truth
Parameters
----------
path_to_gt: str
Path containing the gt.
reverse_gt: bool
Since that there is a mistake in the data we need to flip in z the gt.
Have to be corrected in the future.
reverse_gt: numeric or bool
Value considered as the positive class. By default it is 255., but it could be
1 or True
Returns
-------
idx_gt: ndarray
A 3D array containing the volume extracted from the DCM serie.
The data not corresponding to the GT of interest will be tagged NaN.
"""
# Open the gt volume
tmp_volume_gt = OpenOneSerieDCM(path_to_gt)
volume_gt = tmp_volume_gt.copy()
if reverse_gt == True:
#print 'Inversing the GT'
for sl in range(volume_gt.shape[2]):
volume_gt[:,:,-sl] = tmp_volume_gt[:,:,sl]
# Get the samples that we are interested with
return np.nonzero(volume_gt == pos_value)
def VolumeToLabelUsingGT(volume, path_to_gt, reverse_gt=True):
return BinariseLabel(volume[GetGTSamples(path_to_gt, reverse_gt)])
def OpenResult(path_to_result):
"""Function to read results: label and roc information
Parameters
----------
path_to_result: str
Path containing the filename of the result file.
Returns
-------
pred_label: 1D array
The label results for the patient considered as test.
roc: namedtuple
A named tuple such as roc_auc = namedtuple('roc_auc', ['fpr', 'tpr', 'thresh', 'auc'])
"""
# The results are saved into a npz file
if not (isfile(path_to_result) and path_to_result.endswith('.npz')):
raise ValueError('protoclass.tool.dicom_manip: The result file is not an *.npz file')
else:
# Load the file
npzfile = np.load(path_to_result)
# Define our namedtuple
roc_auc = namedtuple('roc_auc', ['fpr', 'tpr', 'thresh', 'auc'])
roc = roc_auc._make(npzfile['roc'])
pred_label = npzfile['pred_label']
return (pred_label, roc)
def __VolumeMinMax__(path_patient):
"""Private function in order to return min max of a 3D volume
Parameters
----------
path_patient: str
Path where the data are localised.
Returns
-------
(min_int, max_int): tuple
Return a tuple containing the minimum and maximum for the patient.
"""
# Check if we have either a file or a directory
if isdir(path_patient):
# Read a volume for the current patient
volume = OpenOneSerieDCM(path_patient)
elif isfile(path_patient):
volume = OpenVolumeNumpy(path_patient)
# Return a tuple with the min and max
return(np.min(volume), np.max(volume))
def FindExtremumDataSet(path_to_data, **kwargs):
"""Function to find the minimum and maximum intensities
in a 3D volume
Parameters
----------
path_to_data: str
Path containing the modality data.
modality: str
String containing the name of the modality to treat.
Returns
-------
(min_int, max_int): tuple
A tuple containing the minimum and the maximum intensities.
"""
# Define the path to the modality
path_modality = kwargs.pop('modality', 'T2W')
# Create a list with the path name
path_patients = []
for dirs in os.listdir(path_to_data):
# Create the path variable
path_patient = join(path_to_data, dirs)
path_patients.append(join(path_patient, path_modality))
# Compute the Haralick statistic in parallel
num_cores = multiprocessing.cpu_count()
# Check if we have original DICOM or Numpy volume
min_max_list = Parallel(n_jobs=num_cores)(delayed(__VolumeMinMax__)(path) for path in path_patients)
# Convert the list into numpy array
min_max_array = np.array(min_max_list)
return (np.min(min_max_array), np.max(min_max_array))
def BinariseLabel(label):
"""Function to find the minimum and maximum intensities
in a 3D volume
Parameters
----------
label: array
Array with values usually 0. and 255. .
Returns
-------
label: array
Array with values either -1. or 1. .
"""
label[np.nonzero(label>0)] = 1.
label = label * 2. - 1.
return label
def __VolumePercentilesFromPath__(path_patient, path_gt, n_landmarks=5, min_perc=2., max_perc=98.):
"""Private function in order to find the different percentiles of a dataset
Parameters
----------
path_patient: str
Path where the data are localised.
path_gt: str
Path where the data are localised.
n_landmarks: int (default=5)
Number of landmarks which have to be extracted
min_perc: float (default=2.)
The minimum percentile of interest
max_perc: float (default=98.)
The maximum percentile of interest
Returns
-------
intensities_arr: array
Return an array with the intensities corresponding to the percentiles of interest.
"""
# Check if we have either a file or a directory
if isdir(path_patient):
# Read a volume for the current patient
volume = OpenOneSerieDCM(path_patient)
volume_emd_gt = OpenSerieUsingGTDCM(path_patient, path_gt)
elif isfile(path_patient):
volume = OpenVolumeNumpy(path_patient)
prostate_data = volume_emd_gt[np.nonzero(~np.isnan(volume_emd_gt))]
intensities_arr = []
# Find iteratively the different percentiles of the volume of interest
### Create the array of percentiles to find
perc_arr = np.linspace(min_perc, max_perc, num=n_landmarks, endpoint=True)
for perc in perc_arr:
intensities_arr.append(np.percentile(prostate_data, perc))
# Return a tuple with the min and max
return np.array(intensities_arr)
def __VolumePercentilesFromData__(volume, n_landmarks=5, min_perc=2., max_perc=98.):
"""Private function in order to find the different percentiles of a dataset
Parameters
----------
volume: array
Array with the data.
n_landmarks: int (default=5)
Number of landmarks which have to be extracted
min_perc: float (default=2.)
The minimum percentile of interest
max_perc: float (default=98.)
The maximum percentile of interest
Returns
-------
intensities_arr: array
Return an array with the intensities corresponding to the percentiles of interest.
"""
intensities_arr = []
# Find iteratively the different percentiles of the volume of interest
### Create the array of percentiles to find
perc_arr = np.linspace(min_perc, max_perc, num=n_landmarks, endpoint=True)
for perc in perc_arr:
intensities_arr.append(np.percentile(volume, perc))
# Return a tuple with the min and max
return np.array(intensities_arr)
def FindLandmarksDataset(path_to_data, path_modality, path_gt, n_landmarks=5, min_perc=2., max_perc=98.):
"""Function to find the minimum and maximum intensities
in a 3D volume
Parameters
----------
path_to_data: str
Path containing the modality data.
modality: str
String containing the name of the modality to treat.
Returns
-------
(min_int, max_int): tuple
A tuple containing the minimum and the maximum intensities.
"""
# Create a list with the path name
path_patients = []
path_patients_gt = []
for dirs in os.listdir(path_to_data):
# Create the path variable
path_patient = join(path_to_data, dirs)
path_patients.append(join(path_patient, path_modality))
path_patients_gt.append(join(path_patient, path_gt))
# Compute the Haralick statistic in parallel
num_cores = multiprocessing.cpu_count()
# Check if we have original DICOM or Numpy volume
intensities_list = Parallel(n_jobs=num_cores)(delayed(__VolumePercentilesFromPath__)(path, path2, n_landmarks, min_perc, max_perc)
for (path, path2) in zip(path_patients, path_patients_gt))
# Convert the list into numpy array
intensities_list = np.array(intensities_list)
# We have to return the mean landmarks
return (np.mean(intensities_list, axis=0))
| gpl-2.0 | 5,627,377,868,135,266,000 | 29.833333 | 135 | 0.624424 | false |
andburn/python-unitypack | unitypack/environment.py | 1 | 2335 | import os
from urllib.parse import urlparse
from .asset import Asset
from .assetbundle import AssetBundle
class UnityEnvironment:
def __init__(self, base_path=""):
self.bundles = {}
self.assets = {}
self.base_path = base_path
self.files = []
def __del__(self):
for f in self.files:
f.close()
def __repr__(self):
return "%s(base_path=%r)" % (self.__class__.__name__, self.base_path)
def load(self, file):
for bundle in self.bundles.values():
if os.path.abspath(file.name) == os.path.abspath(bundle.path):
return bundle
ret = AssetBundle(self)
ret.load(file)
self.bundles[ret.name.lower()] = ret
for asset in ret.assets:
self.assets[asset.name.lower()] = asset
return ret
def discover(self, name):
for bundle in list(self.bundles.values()):
dirname = os.path.dirname(os.path.abspath(bundle.path))
for filename in os.listdir(dirname):
basename = os.path.splitext(os.path.basename(filename))[0]
if name.lower() == "cab-" + basename.lower():
f = open(os.path.join(dirname, filename), "rb")
self.files.append(f)
self.load(f)
def get_asset_by_filename(self, name):
if name not in self.assets:
path = os.path.join(self.base_path, name)
if os.path.exists(path):
f = open(path, "rb")
self.files.append(f)
self.assets[name] = Asset.from_file(f)
else:
self.discover(name)
self.populate_assets()
if name not in self.assets:
raise KeyError("No such asset: %r" % (name))
return self.assets[name]
def populate_assets(self):
for bundle in self.bundles.values():
for asset in bundle.assets:
asset_name = asset.name.lower()
if asset_name not in self.assets:
self.assets[asset_name] = asset
def get_asset(self, url):
if not url:
return None
u = urlparse(url)
if u.scheme == "archive":
archive, name = os.path.split(u.path.lstrip("/").lower())
else:
raise NotImplementedError("Unsupported scheme: %r" % (u.scheme))
if archive not in self.bundles:
self.discover(archive)
# Still didn't find it? Give up...
if archive not in self.bundles:
raise NotImplementedError("Cannot find %r in %r" % (archive, self.bundles))
bundle = self.bundles[archive]
for asset in bundle.assets:
if asset.name.lower() == name:
return asset
raise KeyError("No such asset: %r" % (name))
| mit | -976,772,978,860,595,700 | 26.470588 | 79 | 0.659529 | false |
google/fedjax | fedjax/legacy/datasets/toy_regression_test.py | 1 | 1381 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for fedjax.legacy.datasets.toy_regression."""
from fedjax.legacy.datasets import toy_regression
import tensorflow as tf
class ToyRegressionDataTest(tf.test.TestCase):
def test_load_data(self):
num_clients = 10
train_data, test_data = toy_regression.load_data(
num_clients=num_clients, num_domains=2, num_points=100, seed=10)
client_id = train_data.client_ids[3]
train_client_data = list(train_data.create_tf_dataset_for_client(client_id))
test_client_data = list(test_data.create_tf_dataset_for_client(client_id))
self.assertLen(train_data.client_ids, num_clients)
self.assertEqual(train_data.client_ids, test_data.client_ids)
self.assertNotAllEqual(train_client_data[0]['y'], test_client_data[0]['y'])
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | -6,813,115,389,254,925,000 | 36.324324 | 80 | 0.731354 | false |
phev8/ward-metrics | wardmetrics/visualisations.py | 1 | 16641 | import matplotlib.pyplot as plt
def plot_events_with_segment_scores(segment_results, ground_truth_events, detected_events, use_datetime_x=False, show=True):
"""
Test
:param segment_results:
:param ground_truth_events:
:param detected_events:
:param use_datetime_x:
:param show:
:return:
"""
fig = plt.figure(figsize=(10, 3))
a = 3
# TODO: convert times to datetime if flag is set
# write y axis labels for ground truth and detections
plt.yticks([0.2, 0.5, 0.8], ["detections", "segment score", "actual events"])
plt.ylim([0, 1])
for d in detected_events:
plt.axvspan(d[0], d[1], 0, 0.5)
for gt in ground_truth_events:
plt.axvspan(gt[0], gt[1], 0.5, 1)
for s in segment_results:
color = "black"
index_of_cat = 4
if s[index_of_cat] == "TP":
color = "green"
elif s[index_of_cat] == "FP":
color = "red"
elif s[index_of_cat] == "FN":
color = "yellow"
elif s[index_of_cat] == "TN":
color = "blue"
# TODO: format text nicely
plt.text((s[1]+s[0])/2, 0.8, s[2], horizontalalignment='center', verticalalignment='center')
plt.text((s[1]+s[0])/2, 0.2, s[3], horizontalalignment='center', verticalalignment='center')
plt.text((s[1]+s[0])/2, 0.5, s[5], horizontalalignment='center', verticalalignment='center')
plt.axvspan(s[0], s[1], 0.4, 0.6, color=color)
plt.axvline(s[0], color="black")
plt.axvline(s[1], color="black")
plt.tight_layout()
if show:
plt.show()
else:
plt.draw()
def plot_events_with_event_scores(gt_event_scores, detected_event_scores, ground_truth_events, detected_events, show=True):
fig = plt.figure(figsize=(10, 3))
for i in range(len(detected_events)):
d = detected_events[i]
plt.axvspan(d[0], d[1], 0, 0.5)
plt.text((d[1] + d[0]) / 2, 0.2, detected_event_scores[i], horizontalalignment='center', verticalalignment='center')
for i in range(len(ground_truth_events)):
gt = ground_truth_events[i]
plt.axvspan(gt[0], gt[1], 0.5, 1)
plt.text((gt[1] + gt[0]) / 2, 0.8, gt_event_scores[i], horizontalalignment='center', verticalalignment='center')
plt.tight_layout()
if show:
plt.show()
else:
plt.draw()
def plot_twoset_metrics(results, startangle=120):
fig1, axarr = plt.subplots(1, 2)
# plot positive rates:
labels_1 = ["tpr", "us", "ue", "fr", "dr"]
values_1 = [
results["tpr"],
results["us"],
results["ue"],
results["fr"],
results["dr"]
]
axarr[0].pie(values_1, labels=labels_1, autopct='%1.0f%%', startangle=startangle)
axarr[0].axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
# TODO: add title
# plot negative rates:
labels_2 = ["1-fpr", "os", "oe", "mr", "ir"]
values_2 = [
1-results["fpr"],
results["os"],
results["oe"],
results["mr"],
results["ir"]
]
axarr[1].pie(values_2, labels=labels_2, autopct='%1.0f%%', startangle=startangle)
axarr[1].axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
# TODO: add title
plt.show()
def plot_segment_counts(results):
# TODO: add title
labels = results.keys()
values = []
for label in labels:
values.append(results[label])
#explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
total = sum(values)
fig1, ax1 = plt.subplots()
ax1.pie(values, labels=labels, autopct=lambda p: '{:.0f}'.format(p * total / 100), startangle=90)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.show()
def plot_event_analysis_diagram(event_results, **kwargs):
""" Plot the event analysis diagram (EAD) for the given results
Visualisation of the distribution of specific error types either with the actual event count or
showing the percentage of the total events. Elements of the plot can be adjusted (like color, fontsize etc.)
Args:
event_results (dictionary): Dictionary containing event counts for "total_gt", "total_det", "D", "F", "FM", "M",
"C", "M'", "FM'", "F'", "I'" as returned by core_methods.event_metrics' third value
Keyword Arguments:
fontsize (int): Size of the text inside the bar plot (Reduce the value if some event types are too short)
use_percentage (bool): whether percentage values or to show actual event counts on the chart (default: False)
show (bool): whether to call plt.show (blocking) or plt.draw() for later displaying (default: True)
color_deletion: any matplotlib color for deletion events
color_fragmented: any matplotlib color for fragmented ground truth events
color_fragmented_merged: any matplotlib color for merged and fragmented ground truth events
color_merged: any matplotlib color for merged ground truth events
color_correct: any matplotlib color for correct events
color_merging: any matplotlib color for merging detection events
color_merging_fragmenting: any matplotlib color for merging and fragmenting detection events
color_fragmenting: any matplotlib color for merging detection events
color_insertion: any matplotlib color for insertion events
Returns:
matplotlib Figure: matplotlib figure reference
"""
fig = plt.figure(figsize=(10, 2))
total = event_results["total_gt"] + event_results["total_det"] - event_results["C"]
# Layout settings:
y_min = 0.3
y_max = 0.7
width = 0.02
text_x_offset = 0
text_y_pos_1 = 0.55
text_y_pos_2 = 0.4
fontsize = kwargs.pop('fontsize', 10)
fontsize_extern = 12
use_percentage = kwargs.pop('use_percentage', False)
# Color settings:
cmap = plt.get_cmap("Paired")
color_deletion = kwargs.pop('color_deletion', cmap(4))
color_fragmented = kwargs.pop('color_fragmented', cmap(6))
color_fragmented_merged = kwargs.pop('color_fragmented_merged', cmap(0))
color_merged = kwargs.pop('color_merged', cmap(8))
color_correct = kwargs.pop('color_correct', cmap(3))
color_merging = kwargs.pop('color_merging', cmap(9))
color_merging_fragmenting = kwargs.pop('color_merging_fragmenting', cmap(1))
color_fragmenting = kwargs.pop('color_fragmenting', cmap(7))
color_insertion = kwargs.pop('color_insertion', cmap(5))
# Show deletions:
current_score = "D"
current_x_start = 0
current_x_end = event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_deletion)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, "{:.0f}".format(event_results[current_score]*100/event_results["total_gt"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show fragmented events:
current_score = "F"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_fragmented)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_gt"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show fragmented and merged events:
current_score = "FM"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_fragmented_merged)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_gt"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show merged events:
current_score = "M"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_merged)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_gt"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show correct events:
current_score = "C"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_correct)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_gt"]) + "%/" + "{:.0f}".format(event_results[current_score] * 100 / event_results["total_det"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show merging detections:
current_score = "M'"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_merging)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_det"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show fragmenting and merging detections:
current_score = "FM'"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_merging_fragmenting)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_det"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show fragmenting detections:
current_score = "F'"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_fragmenting)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_det"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show insertions:
current_score = "I'"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_insertion)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_det"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Draw line for total events:
plt.axvspan(0, event_results["total_gt"], y_max, y_max + width, color="black")
plt.axvspan( total - event_results["total_det"], total, y_min, y_min - width, color="black")
plt.text((0 + event_results["total_gt"]) / 2, 0.8, "Actual events (total=" + str(event_results["total_gt"]) + ")",
fontsize=fontsize_extern, horizontalalignment='center', verticalalignment='center')
plt.text((2*total - event_results["total_det"]) / 2, 0.18, "Detected events (total=" + str(event_results["total_det"]) + ")",
horizontalalignment='center', fontsize=fontsize_extern, verticalalignment='center')
plt.tight_layout()
if kwargs.pop('show', True):
plt.show()
else:
plt.draw()
return fig
| mit | -3,669,431,086,910,914,000 | 47.944118 | 197 | 0.625684 | false |
msimacek/koschei | koschei/backend/depsolve.py | 1 | 5237 | # Copyright (C) 2014-2016 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Author: Michael Simacek <[email protected]>
# Author: Mikolaj Izdebski <[email protected]>
"""
This module contains functions wrapping dependency resolution functionality
from hawkey/libdnf.
"""
import hawkey
from koschei.config import get_config
def _get_builddep_selector(sack, dep):
# Try to find something by provides
sltr = hawkey.Selector(sack)
sltr.set(provides=dep)
found = sltr.matches()
if not found and dep.startswith("/"):
# Nothing matches by provides and since it's file, try by files
sltr = hawkey.Selector(sack)
sltr.set(file=dep)
return sltr
def run_goal(sack, br, group):
"""
Perform resolution (simulated installation) of given dependencies and build group.
The only difference in treatment of dependencies vs. packages from the build group is
that missing packages in build group are silently skipped, whereas missing packages
in dependencies are reported as problems and fail the resolution.
:param sack: hawkey.Sack to use for the resolution.
:param br: List of dependencies (strings from BuildRequires)
:param group: list of packages in the build group (strings)
:return: If the resolution succeeded:
(True, [], installs), where installs is list of string names of packages
that would be installed.
If the resolution failed (something was not installable):
(False, problems, None), where problems is a list of human-readable strings
describing the problems that prevent installation.
"""
# pylint:disable=E1101
goal = hawkey.Goal(sack)
problems = []
for name in group:
sltr = _get_builddep_selector(sack, name)
if sltr.matches():
# missing packages are silently skipped as in dnf
goal.install(select=sltr)
for r in br:
sltr = _get_builddep_selector(sack, r)
# pylint: disable=E1103
if not sltr.matches():
problems.append("No package found for: {}".format(r))
else:
goal.install(select=sltr)
kwargs = {}
if get_config('dependency.ignore_weak_deps'):
kwargs = {'ignore_weak_deps': True}
goal.run(**kwargs)
for first, *rest in goal.problem_rules():
problems.append(
f"Problem: {first}" +
''.join(f'\n - {problem}' for problem in rest)
)
resolved = not problems
return resolved, problems, goal.list_installs() if resolved else None
class DependencyWithDistance(object):
"""
Object with same fields as Dependency + additional distance field used to represent
distance from first-level dependencies in the object graph.
"""
def __init__(self, name, epoch, version, release, arch):
self.name = name
self.epoch = epoch
self.version = version
self.release = release
self.arch = arch
self.distance = None
def compute_dependency_distances(sack, br, deps):
"""
Computes dependency distance of given dependencies.
Dependency distance is the length of the shortest path from any of the first-level
dependencies (BuildRequires) to the dependency node in the dependency graph.
The algorithm is only a best-effort approximation that uses hawkey queries.
It is a variant of depth-limited BFS (depth limit is hardcoded to 5).
Dependency objects are mutated in place. Objects that weren't reached keep their
original distance (None).
:param sack: hawkey.Sack used for dependency queries
:param br: List of BuildRequires -- first-level dependencies. Build group should not
be included.
:param deps: List of DependencyWithDistance objects for all dependencies that were
marked to be installed.
"""
dep_map = {dep.name: dep for dep in deps}
visited = set()
level = 1
# pylint:disable=E1103
pkgs_on_level = {x for r in br for x in
_get_builddep_selector(sack, r).matches()}
while pkgs_on_level:
for pkg in pkgs_on_level:
dep = dep_map.get(pkg.name)
if dep and dep.distance is None:
dep.distance = level
level += 1
if level >= 5:
break
reldeps = {req for pkg in pkgs_on_level if pkg not in visited
for req in pkg.requires}
visited.update(pkgs_on_level)
pkgs_on_level = set(hawkey.Query(sack).filter(provides=reldeps))
| gpl-2.0 | 3,479,328,165,184,086,500 | 38.08209 | 89 | 0.668322 | false |
dagnello/ansible-modules-core | cloud/amazon/ec2_asg.py | 1 | 33907 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: ec2_asg
short_description: Create or delete AWS Autoscaling Groups
description:
- Can create or delete AWS Autoscaling Groups
- Works with the ec2_lc module to manage Launch Configurations
version_added: "1.6"
author: "Gareth Rushgrove (@garethr)"
options:
state:
description:
- register or deregister the instance
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for group to be created or deleted
required: true
load_balancers:
description:
- List of ELB names to use for the group
required: false
availability_zones:
description:
- List of availability zone names in which to create the group. Defaults to all the availability zones in the region if vpc_zone_identifier is not set.
required: false
launch_config_name:
description:
- Name of the Launch configuration to use for the group. See the ec2_lc module for managing these.
required: true
min_size:
description:
- Minimum number of instances in group
required: false
max_size:
description:
- Maximum number of instances in group
required: false
desired_capacity:
description:
- Desired number of instances in group
required: false
replace_all_instances:
description:
- In a rolling fashion, replace all instances with an old launch configuration with one from the current launch configuration.
required: false
version_added: "1.8"
default: False
replace_batch_size:
description:
- Number of instances you'd like to replace at a time. Used with replace_all_instances.
required: false
version_added: "1.8"
default: 1
replace_instances:
description:
- List of instance_ids belonging to the named ASG that you would like to terminate and be replaced with instances matching the current launch configuration.
required: false
version_added: "1.8"
default: None
lc_check:
description:
- Check to make sure instances that are being replaced with replace_instances do not aready have the current launch_config.
required: false
version_added: "1.8"
default: True
vpc_zone_identifier:
description:
- List of VPC subnets to use
required: false
default: None
tags:
description:
- A list of tags to add to the Auto Scale Group. Optional key is 'propagate_at_launch', which defaults to true.
required: false
default: None
version_added: "1.7"
health_check_period:
description:
- Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
required: false
default: 500 seconds
version_added: "1.7"
health_check_type:
description:
- The service you want the health status from, Amazon EC2 or Elastic Load Balancer.
required: false
default: EC2
version_added: "1.7"
choices: ['EC2', 'ELB']
default_cooldown:
description:
- The number of seconds after a scaling activity completes before another can begin.
required: false
default: 300 seconds
version_added: "2.0"
wait_timeout:
description:
- how long before wait instances to become viable when replaced. Used in concjunction with instance_ids option.
default: 300
version_added: "1.8"
wait_for_instances:
description:
- Wait for the ASG instances to be in a ready state before exiting. If instances are behind an ELB, it will wait until the ELB determines all instances have a lifecycle_state of "InService" and a health_status of "Healthy".
version_added: "1.9"
default: yes
required: False
termination_policies:
description:
- An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity.
- For 'Default', when used to create a new autoscaling group, the "Default" value is used. When used to change an existent autoscaling group, the current termination policies are mantained
required: false
default: Default
choices: ['OldestInstance', 'NewestInstance', 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default']
version_added: "2.0"
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = '''
# Basic configuration
- ec2_asg:
name: special
load_balancers: [ 'lb1', 'lb2' ]
availability_zones: [ 'eu-west-1a', 'eu-west-1b' ]
launch_config_name: 'lc-1'
min_size: 1
max_size: 10
desired_capacity: 5
vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ]
tags:
- environment: production
propagate_at_launch: no
# Rolling ASG Updates
Below is an example of how to assign a new launch config to an ASG and terminate old instances.
All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in
a rolling fashion with instances using the current launch configuration, "my_new_lc".
This could also be considered a rolling deploy of a pre-baked AMI.
If this is a newly created group, the instances will not be replaced since all instances
will have the current launch configuration.
- name: create launch config
ec2_lc:
name: my_new_lc
image_id: ami-lkajsf
key_name: mykey
region: us-east-1
security_groups: sg-23423
instance_type: m1.small
assign_public_ip: yes
- ec2_asg:
name: myasg
launch_config_name: my_new_lc
health_check_period: 60
health_check_type: ELB
replace_all_instances: yes
min_size: 5
max_size: 5
desired_capacity: 5
region: us-east-1
To only replace a couple of instances instead of all of them, supply a list
to "replace_instances":
- ec2_asg:
name: myasg
launch_config_name: my_new_lc
health_check_period: 60
health_check_type: ELB
replace_instances:
- i-b345231
- i-24c2931
min_size: 5
max_size: 5
desired_capacity: 5
region: us-east-1
'''
import time
import logging as log
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
log.getLogger('boto').setLevel(log.CRITICAL)
#log.basicConfig(filename='/tmp/ansible_ec2_asg.log',level=log.DEBUG, format='%(asctime)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
try:
import boto.ec2.autoscale
from boto.ec2.autoscale import AutoScaleConnection, AutoScalingGroup, Tag
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
ASG_ATTRIBUTES = ('availability_zones', 'default_cooldown', 'desired_capacity',
'health_check_period', 'health_check_type', 'launch_config_name',
'load_balancers', 'max_size', 'min_size', 'name', 'placement_group',
'termination_policies', 'vpc_zone_identifier')
INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name')
def enforce_required_arguments(module):
''' As many arguments are not required for autoscale group deletion
they cannot be mandatory arguments for the module, so we enforce
them here '''
missing_args = []
for arg in ('min_size', 'max_size', 'launch_config_name'):
if module.params[arg] is None:
missing_args.append(arg)
if missing_args:
module.fail_json(msg="Missing required arguments for autoscaling group create/update: %s" % ",".join(missing_args))
def get_properties(autoscaling_group):
properties = dict((attr, getattr(autoscaling_group, attr)) for attr in ASG_ATTRIBUTES)
# Ugly hack to make this JSON-serializable. We take a list of boto Tag
# objects and replace them with a dict-representation. Needed because the
# tags are included in ansible's return value (which is jsonified)
if 'tags' in properties and isinstance(properties['tags'], list):
serializable_tags = {}
for tag in properties['tags']:
serializable_tags[tag.key] = [tag.value, tag.propagate_at_launch]
properties['tags'] = serializable_tags
properties['healthy_instances'] = 0
properties['in_service_instances'] = 0
properties['unhealthy_instances'] = 0
properties['pending_instances'] = 0
properties['viable_instances'] = 0
properties['terminating_instances'] = 0
if autoscaling_group.instances:
properties['instances'] = [i.instance_id for i in autoscaling_group.instances]
instance_facts = {}
for i in autoscaling_group.instances:
instance_facts[i.instance_id] = {'health_status': i.health_status,
'lifecycle_state': i.lifecycle_state,
'launch_config_name': i.launch_config_name }
if i.health_status == 'Healthy' and i.lifecycle_state == 'InService':
properties['viable_instances'] += 1
if i.health_status == 'Healthy':
properties['healthy_instances'] += 1
else:
properties['unhealthy_instances'] += 1
if i.lifecycle_state == 'InService':
properties['in_service_instances'] += 1
if i.lifecycle_state == 'Terminating':
properties['terminating_instances'] += 1
if i.lifecycle_state == 'Pending':
properties['pending_instances'] += 1
properties['instance_facts'] = instance_facts
properties['load_balancers'] = autoscaling_group.load_balancers
if getattr(autoscaling_group, "tags", None):
properties['tags'] = dict((t.key, t.value) for t in autoscaling_group.tags)
return properties
def elb_dreg(asg_connection, module, group_name, instance_id):
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
as_group = asg_connection.get_all_groups(names=[group_name])[0]
wait_timeout = module.params.get('wait_timeout')
props = get_properties(as_group)
count = 1
if as_group.load_balancers and as_group.health_check_type == 'ELB':
try:
elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
else:
return
exists = True
for lb in as_group.load_balancers:
elb_connection.deregister_instances(lb, instance_id)
log.debug("De-registering {0} from ELB {1}".format(instance_id, lb))
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and count > 0:
count = 0
for lb in as_group.load_balancers:
lb_instances = elb_connection.describe_instance_health(lb)
for i in lb_instances:
if i.instance_id == instance_id and i.state == "InService":
count += 1
log.debug("{0}: {1}, {2}".format(i.instance_id, i.state, i.description))
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for instance to deregister. {0}".format(time.asctime()))
def elb_healthy(asg_connection, elb_connection, module, group_name):
healthy_instances = []
as_group = asg_connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
# get healthy, inservice instances from ASG
instances = []
for instance, settings in props['instance_facts'].items():
if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
instances.append(instance)
log.debug("ASG considers the following instances InService and Healthy: {0}".format(instances))
log.debug("ELB instance status:")
for lb in as_group.load_balancers:
# we catch a race condition that sometimes happens if the instance exists in the ASG
# but has not yet show up in the ELB
try:
lb_instances = elb_connection.describe_instance_health(lb, instances=instances)
except boto.exception.InvalidInstance, e:
pass
for i in lb_instances:
if i.state == "InService":
healthy_instances.append(i.instance_id)
log.debug("{0}: {1}".format(i.instance_id, i.state))
return len(healthy_instances)
def wait_for_elb(asg_connection, module, group_name):
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
wait_timeout = module.params.get('wait_timeout')
# if the health_check_type is ELB, we want to query the ELBs directly for instance
# status as to avoid health_check_grace period that is awarded to ASG instances
as_group = asg_connection.get_all_groups(names=[group_name])[0]
if as_group.load_balancers and as_group.health_check_type == 'ELB':
log.debug("Waiting for ELB to consider intances healthy.")
try:
elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
wait_timeout = time.time() + wait_timeout
healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name)
while healthy_instances < as_group.min_size and wait_timeout > time.time():
healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name)
log.debug("ELB thinks {0} instances are healthy.".format(healthy_instances))
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for ELB instances to be healthy. %s" % time.asctime())
log.debug("Waiting complete. ELB thinks {0} instances are healthy.".format(healthy_instances))
def create_autoscaling_group(connection, module):
group_name = module.params.get('name')
load_balancers = module.params['load_balancers']
availability_zones = module.params['availability_zones']
launch_config_name = module.params.get('launch_config_name')
min_size = module.params['min_size']
max_size = module.params['max_size']
desired_capacity = module.params.get('desired_capacity')
vpc_zone_identifier = module.params.get('vpc_zone_identifier')
set_tags = module.params.get('tags')
health_check_period = module.params.get('health_check_period')
health_check_type = module.params.get('health_check_type')
default_cooldown = module.params.get('default_cooldown')
wait_for_instances = module.params.get('wait_for_instances')
as_groups = connection.get_all_groups(names=[group_name])
wait_timeout = module.params.get('wait_timeout')
termination_policies = module.params.get('termination_policies')
if not vpc_zone_identifier and not availability_zones:
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
elif vpc_zone_identifier:
vpc_zone_identifier = ','.join(vpc_zone_identifier)
asg_tags = []
for tag in set_tags:
for k,v in tag.iteritems():
if k !='propagate_at_launch':
asg_tags.append(Tag(key=k,
value=v,
propagate_at_launch=bool(tag.get('propagate_at_launch', True)),
resource_id=group_name))
if not as_groups:
if not vpc_zone_identifier and not availability_zones:
availability_zones = module.params['availability_zones'] = [zone.name for zone in ec2_connection.get_all_zones()]
enforce_required_arguments(module)
launch_configs = connection.get_all_launch_configurations(names=[launch_config_name])
ag = AutoScalingGroup(
group_name=group_name,
load_balancers=load_balancers,
availability_zones=availability_zones,
launch_config=launch_configs[0],
min_size=min_size,
max_size=max_size,
desired_capacity=desired_capacity,
vpc_zone_identifier=vpc_zone_identifier,
connection=connection,
tags=asg_tags,
health_check_period=health_check_period,
health_check_type=health_check_type,
default_cooldown=default_cooldown,
termination_policies=termination_policies)
try:
connection.create_auto_scaling_group(ag)
if wait_for_instances == True:
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
wait_for_elb(connection, module, group_name)
as_group = connection.get_all_groups(names=[group_name])[0]
asg_properties = get_properties(as_group)
changed = True
return(changed, asg_properties)
except BotoServerError, e:
module.fail_json(msg=str(e))
else:
as_group = as_groups[0]
changed = False
for attr in ASG_ATTRIBUTES:
if module.params.get(attr, None) is not None:
module_attr = module.params.get(attr)
if attr == 'vpc_zone_identifier':
module_attr = ','.join(module_attr)
group_attr = getattr(as_group, attr)
# we do this because AWS and the module may return the same list
# sorted differently
try:
module_attr.sort()
except:
pass
try:
group_attr.sort()
except:
pass
if group_attr != module_attr:
changed = True
setattr(as_group, attr, module_attr)
if len(set_tags) > 0:
have_tags = {}
want_tags = {}
for tag in asg_tags:
want_tags[tag.key] = [tag.value, tag.propagate_at_launch]
dead_tags = []
for tag in as_group.tags:
have_tags[tag.key] = [tag.value, tag.propagate_at_launch]
if not tag.key in want_tags:
changed = True
dead_tags.append(tag)
if dead_tags != []:
connection.delete_tags(dead_tags)
if have_tags != want_tags:
changed = True
connection.create_or_update_tags(asg_tags)
# handle loadbalancers separately because None != []
load_balancers = module.params.get('load_balancers') or []
if load_balancers and as_group.load_balancers != load_balancers:
changed = True
as_group.load_balancers = module.params.get('load_balancers')
if changed:
try:
as_group.update()
except BotoServerError, e:
module.fail_json(msg=str(e))
if wait_for_instances == True:
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
wait_for_elb(connection, module, group_name)
try:
as_group = connection.get_all_groups(names=[group_name])[0]
asg_properties = get_properties(as_group)
except BotoServerError, e:
module.fail_json(msg=str(e))
return(changed, asg_properties)
def delete_autoscaling_group(connection, module):
group_name = module.params.get('name')
groups = connection.get_all_groups(names=[group_name])
if groups:
group = groups[0]
group.max_size = 0
group.min_size = 0
group.desired_capacity = 0
group.update()
instances = True
while instances:
tmp_groups = connection.get_all_groups(names=[group_name])
if tmp_groups:
tmp_group = tmp_groups[0]
if not tmp_group.instances:
instances = False
time.sleep(10)
group.delete()
while len(connection.get_all_groups(names=[group_name])):
time.sleep(5)
changed=True
return changed
else:
changed=False
return changed
def get_chunks(l, n):
for i in xrange(0, len(l), n):
yield l[i:i+n]
def update_size(group, max_size, min_size, dc):
log.debug("setting ASG sizes")
log.debug("minimum size: {0}, desired_capacity: {1}, max size: {2}".format(min_size, dc, max_size ))
group.max_size = max_size
group.min_size = min_size
group.desired_capacity = dc
group.update()
def replace(connection, module):
batch_size = module.params.get('replace_batch_size')
wait_timeout = module.params.get('wait_timeout')
group_name = module.params.get('name')
max_size = module.params.get('max_size')
min_size = module.params.get('min_size')
desired_capacity = module.params.get('desired_capacity')
lc_check = module.params.get('lc_check')
replace_instances = module.params.get('replace_instances')
as_group = connection.get_all_groups(names=[group_name])[0]
wait_for_new_inst(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances')
props = get_properties(as_group)
instances = props['instances']
if replace_instances:
instances = replace_instances
# check to see if instances are replaceable if checking launch configs
new_instances, old_instances = get_instances_by_lc(props, lc_check, instances)
num_new_inst_needed = desired_capacity - len(new_instances)
if lc_check:
if num_new_inst_needed == 0 and old_instances:
log.debug("No new instances needed, but old instances are present. Removing old instances")
terminate_batch(connection, module, old_instances, instances, True)
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
changed = True
return(changed, props)
# we don't want to spin up extra instances if not necessary
if num_new_inst_needed < batch_size:
log.debug("Overriding batch size to {0}".format(num_new_inst_needed))
batch_size = num_new_inst_needed
if not old_instances:
changed = False
return(changed, props)
# set temporary settings and wait for them to be reached
# This should get overriden if the number of instances left is less than the batch size.
as_group = connection.get_all_groups(names=[group_name])[0]
update_size(as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size)
wait_for_new_inst(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances')
wait_for_elb(connection, module, group_name)
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
instances = props['instances']
if replace_instances:
instances = replace_instances
log.debug("beginning main loop")
for i in get_chunks(instances, batch_size):
# break out of this loop if we have enough new instances
break_early, desired_size, term_instances = terminate_batch(connection, module, i, instances, False)
wait_for_term_inst(connection, module, term_instances)
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, 'viable_instances')
wait_for_elb(connection, module, group_name)
as_group = connection.get_all_groups(names=[group_name])[0]
if break_early:
log.debug("breaking loop")
break
update_size(as_group, max_size, min_size, desired_capacity)
as_group = connection.get_all_groups(names=[group_name])[0]
asg_properties = get_properties(as_group)
log.debug("Rolling update complete.")
changed=True
return(changed, asg_properties)
def get_instances_by_lc(props, lc_check, initial_instances):
new_instances = []
old_instances = []
# old instances are those that have the old launch config
if lc_check:
for i in props['instances']:
if props['instance_facts'][i]['launch_config_name'] == props['launch_config_name']:
new_instances.append(i)
else:
old_instances.append(i)
else:
log.debug("Comparing initial instances with current: {0}".format(initial_instances))
for i in props['instances']:
if i not in initial_instances:
new_instances.append(i)
else:
old_instances.append(i)
log.debug("New instances: {0}, {1}".format(len(new_instances), new_instances))
log.debug("Old instances: {0}, {1}".format(len(old_instances), old_instances))
return new_instances, old_instances
def list_purgeable_instances(props, lc_check, replace_instances, initial_instances):
instances_to_terminate = []
instances = ( inst_id for inst_id in replace_instances if inst_id in props['instances'])
# check to make sure instances given are actually in the given ASG
# and they have a non-current launch config
if lc_check:
for i in instances:
if props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']:
instances_to_terminate.append(i)
else:
for i in instances:
if i in initial_instances:
instances_to_terminate.append(i)
return instances_to_terminate
def terminate_batch(connection, module, replace_instances, initial_instances, leftovers=False):
batch_size = module.params.get('replace_batch_size')
min_size = module.params.get('min_size')
desired_capacity = module.params.get('desired_capacity')
group_name = module.params.get('name')
wait_timeout = int(module.params.get('wait_timeout'))
lc_check = module.params.get('lc_check')
decrement_capacity = False
break_loop = False
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
desired_size = as_group.min_size
new_instances, old_instances = get_instances_by_lc(props, lc_check, initial_instances)
num_new_inst_needed = desired_capacity - len(new_instances)
# check to make sure instances given are actually in the given ASG
# and they have a non-current launch config
instances_to_terminate = list_purgeable_instances(props, lc_check, replace_instances, initial_instances)
log.debug("new instances needed: {0}".format(num_new_inst_needed))
log.debug("new instances: {0}".format(new_instances))
log.debug("old instances: {0}".format(old_instances))
log.debug("batch instances: {0}".format(",".join(instances_to_terminate)))
if num_new_inst_needed == 0:
decrement_capacity = True
if as_group.min_size != min_size:
as_group.min_size = min_size
as_group.update()
log.debug("Updating minimum size back to original of {0}".format(min_size))
#if are some leftover old instances, but we are already at capacity with new ones
# we don't want to decrement capacity
if leftovers:
decrement_capacity = False
break_loop = True
instances_to_terminate = old_instances
desired_size = min_size
log.debug("No new instances needed")
if num_new_inst_needed < batch_size and num_new_inst_needed !=0 :
instances_to_terminate = instances_to_terminate[:num_new_inst_needed]
decrement_capacity = False
break_loop = False
log.debug("{0} new instances needed".format(num_new_inst_needed))
log.debug("decrementing capacity: {0}".format(decrement_capacity))
for instance_id in instances_to_terminate:
elb_dreg(connection, module, group_name, instance_id)
log.debug("terminating instance: {0}".format(instance_id))
connection.terminate_instance(instance_id, decrement_capacity=decrement_capacity)
# we wait to make sure the machines we marked as Unhealthy are
# no longer in the list
return break_loop, desired_size, instances_to_terminate
def wait_for_term_inst(connection, module, term_instances):
batch_size = module.params.get('replace_batch_size')
wait_timeout = module.params.get('wait_timeout')
group_name = module.params.get('name')
lc_check = module.params.get('lc_check')
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
count = 1
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and count > 0:
log.debug("waiting for instances to terminate")
count = 0
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
instance_facts = props['instance_facts']
instances = ( i for i in instance_facts if i in term_instances)
for i in instances:
lifecycle = instance_facts[i]['lifecycle_state']
health = instance_facts[i]['health_status']
log.debug("Instance {0} has state of {1},{2}".format(i,lifecycle,health ))
if lifecycle == 'Terminating' or healthy == 'Unhealthy':
count += 1
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for old instances to terminate. %s" % time.asctime())
def wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, prop):
# make sure we have the latest stats after that last loop.
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop]))
# now we make sure that we have enough instances in a viable state
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and desired_size > props[prop]:
log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop]))
time.sleep(10)
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for new instances to become viable. %s" % time.asctime())
log.debug("Reached {0}: {1}".format(prop, desired_size))
return props
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
load_balancers=dict(type='list'),
availability_zones=dict(type='list'),
launch_config_name=dict(type='str'),
min_size=dict(type='int'),
max_size=dict(type='int'),
desired_capacity=dict(type='int'),
vpc_zone_identifier=dict(type='list'),
replace_batch_size=dict(type='int', default=1),
replace_all_instances=dict(type='bool', default=False),
replace_instances=dict(type='list', default=[]),
lc_check=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=300),
state=dict(default='present', choices=['present', 'absent']),
tags=dict(type='list', default=[]),
health_check_period=dict(type='int', default=300),
health_check_type=dict(default='EC2', choices=['EC2', 'ELB']),
default_cooldown=dict(type='int', default=300),
wait_for_instances=dict(type='bool', default=True),
termination_policies=dict(type='list', default='Default')
),
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [['replace_all_instances', 'replace_instances']]
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
replace_instances = module.params.get('replace_instances')
replace_all_instances = module.params.get('replace_all_instances')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
if not connection:
module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region))
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
changed = create_changed = replace_changed = False
if state == 'present':
create_changed, asg_properties=create_autoscaling_group(connection, module)
elif state == 'absent':
changed = delete_autoscaling_group(connection, module)
module.exit_json( changed = changed )
if replace_all_instances or replace_instances:
replace_changed, asg_properties=replace(connection, module)
if create_changed or replace_changed:
changed = True
module.exit_json( changed = changed, **asg_properties )
main()
| gpl-3.0 | -7,845,743,638,671,991,000 | 39.851807 | 232 | 0.643731 | false |
chippey/gaffer | python/GafferUI/PathWidget.py | 1 | 7446 | ##########################################################################
#
# Copyright (c) 2011, John Haddon. All rights reserved.
# Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import os
import warnings
import IECore
import Gaffer
import GafferUI
QtCore = GafferUI._qtImport( "QtCore" )
class PathWidget( GafferUI.TextWidget ) :
def __init__( self, path, **kw ) :
GafferUI.TextWidget.__init__( self, str( path ), **kw )
# we can be fairly sure that the average path requires a bit more space
# than the other things that go in TextWidgets.
self.setPreferredCharacterWidth( 60 )
self.__keyPressConnection = self.keyPressSignal().connect( Gaffer.WeakMethod( self.__keyPress ) )
self.__selectingFinishedConnection = self.selectingFinishedSignal().connect( Gaffer.WeakMethod( self.__selectingFinished ) )
self.__textChangedConnection = self.textChangedSignal().connect( Gaffer.WeakMethod( self.__textChanged ) )
self.__popupMenu = None
self.__path = None
self.setPath( path )
def path( self ) :
warnings.warn( "PathWidget.path() is deprecated, use PathWidget.getPath() instead.", DeprecationWarning, 2 )
return self.__path
def setPath( self, path ) :
self.__path = path
self.__pathChangedConnection = self.__path.pathChangedSignal().connect( Gaffer.WeakMethod( self.__pathChanged, fallbackResult = None ) )
self.setText( str( self.__path ) )
def getPath( self ) :
return self.__path
def __keyPress( self, widget, event ) :
if not self.getEditable() :
return False
if event.key=="Tab" :
# do tab completion
position = self.getCursorPosition()
truncatedPath = self.__path.copy()
truncatedPath.setFromString( str( truncatedPath )[:position] )
if len( truncatedPath ) :
matchStart = truncatedPath[-1]
del truncatedPath[-1]
else :
matchStart = ""
matches = [ x[-1] for x in truncatedPath.children() if x[-1].startswith( matchStart ) ]
match = os.path.commonprefix( matches )
if match :
self.__path[:] = truncatedPath[:] + [ match ]
if len( matches )==1 and not self.__path.isLeaf() :
text = self.getText()
if not text.endswith( "/" ) :
self.setText( text + "/" )
self.setCursorPosition( len( self.getText() ) )
return True
elif event.key == "Down" :
if event.modifiers & GafferUI.ModifiableEvent.Modifiers.Shift :
# select all!
self.setSelection( None, None )
else :
text = self.getText()
position = self.getCursorPosition()
if position == len( text ) and len( text ) and text[-1]=="/" :
# select last character to trigger menu for next path entry
self.setSelection( -1, None )
else :
# select path entry around the cursor
low = text.rfind( "/", 0, position )
high = text.find( "/", position )
if low != -1 :
self.setSelection( low+1, high if high != -1 else None )
self.__popupMenuForSelection()
return True
return False
def __selectingFinished( self, widget ) :
assert( widget is self )
if self.getEditable() :
self.__popupMenuForSelection()
def __popupMenuForSelection( self ) :
start, end = self.getSelection()
if start == end :
return
text = self.getText()
selectedText = text[start:end]
if text == selectedText :
self.__popupHierarchy()
elif selectedText == "/" and end == len( text ) :
# the final slash was selected
self.__popupListing( end )
elif "/" not in selectedText and text[start-1] == "/" and ( end >= len( text ) or text[end] == "/" ) :
self.__popupListing( start )
def __popupHierarchy( self ) :
pathCopy = self.__path.copy()
md = IECore.MenuDefinition()
i = 0
while len( pathCopy ) :
md.append(
"/" + str( i ),
IECore.MenuItemDefinition(
label = str( pathCopy ),
command = IECore.curry( Gaffer.WeakMethod( self.__path.setFromString ), str( pathCopy ) ),
)
)
del pathCopy[-1]
i += 1
self.__popupMenu = GafferUI.Menu( md )
self.__popupMenu.popup( parent = self, position = self.__popupPosition( 0 ), forcePosition=True, grabFocus=False )
def __popupListing( self, textIndex ) :
dirPath = self.__path.copy()
n = os.path.dirname( self.getText()[:textIndex] ) or "/"
dirPath.setFromString( n )
options = dirPath.children()
options = [ x[-1] for x in options ]
if len( options ) :
md = IECore.MenuDefinition()
for o in options :
md.append( "/" + o,
IECore.MenuItemDefinition(
label=o,
command = IECore.curry( Gaffer.WeakMethod( self.__replacePathEntry ), len( dirPath ), o )
)
)
self.__popupMenu = GafferUI.Menu( md )
self.__popupMenu.popup( parent = self, position = self.__popupPosition( textIndex ), forcePosition=True, grabFocus=False )
def __replacePathEntry( self, position, newEntry ) :
if position==len( self.__path ) :
self.__path.append( newEntry )
else :
self.__path[position] = newEntry
self.__path.truncateUntilValid()
if position==len( self.__path )-1 and not self.__path.isLeaf() :
self.setText( self.getText() + "/" )
def __popupPosition( self, textIndex ) :
## \todo Surely there's a better way?
for x in range( 0, 10000 ) :
if self._qtWidget().cursorPositionAt( QtCore.QPoint( x, 5 ) ) >= textIndex :
break
bound = self.bound()
return IECore.V2i( bound.min.x + x, bound.max.y )
def __pathChanged( self, path ) :
self.setText( str( path ) )
def __textChanged( self, widget ) :
text = self.getText()
with Gaffer.BlockedConnection( self.__pathChangedConnection ) :
try :
self.__path.setFromString( self.getText() )
except :
# no need to worry too much - it's up to the user to enter
# something valid. maybe they'll get it right next time.
pass
| bsd-3-clause | -3,532,087,557,409,567,000 | 29.768595 | 138 | 0.657266 | false |
mricon/totp-cgi | test.py | 1 | 28974 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##
# Copyright (C) 2012 by Konstantin Ryabitsev and contributors
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
# 02111-1307, USA.
#
from __future__ import (absolute_import,
division,
print_function,
with_statement,
unicode_literals)
__author__ = 'Konstantin Ryabitsev <[email protected]>'
import unittest
import pyotp
import time
import logging
import totpcgi.utils
import sys
import os
import subprocess
import totpcgi
secrets_dir = 'test/'
pincode_file = 'test/pincodes'
state_dir = 'test/state'
pg_connect_string = ''
ldap_dn = ''
ldap_url = ''
ldap_cacert = ''
mysql_connect_host = ''
mysql_connect_user = ''
mysql_connect_password = ''
mysql_connect_db = ''
SECRET_BACKEND = 'File'
PINCODE_BACKEND = 'File'
STATE_BACKEND = 'File'
logger = logging.getLogger('totpcgi')
logger.setLevel(logging.DEBUG)
ch = logging.FileHandler('test.log')
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("[%(asctime)s] {%(module)s:%(funcName)s:%(lineno)s} %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
VALID_SECRET = None
VALID_SCRATCH_TOKENS = []
def db_connect():
import psycopg2
conn = psycopg2.connect(pg_connect_string)
return conn
def getBackends():
import totpcgi.backends
backends = totpcgi.backends.Backends()
import totpcgi.backends.file
if STATE_BACKEND == 'File':
backends.state_backend = totpcgi.backends.file.GAStateBackend(state_dir)
elif STATE_BACKEND == 'pgsql':
import totpcgi.backends.pgsql
backends.state_backend = totpcgi.backends.pgsql.GAStateBackend(pg_connect_string)
elif STATE_BACKEND == 'mysql':
import totpcgi.backends.mysql
backends.state_backend = totpcgi.backends.mysql.GAStateBackend(mysql_connect_host, mysql_connect_user,
mysql_connect_password, mysql_connect_db)
if SECRET_BACKEND == 'File':
backends.secret_backend = totpcgi.backends.file.GASecretBackend(secrets_dir)
elif SECRET_BACKEND == 'pgsql':
backends.secret_backend = totpcgi.backends.pgsql.GASecretBackend(pg_connect_string)
elif SECRET_BACKEND == 'mysql':
backends.secret_backend = totpcgi.backends.mysql.GASecretBackend(mysql_connect_host, mysql_connect_user,
mysql_connect_password, mysql_connect_db)
if PINCODE_BACKEND == 'File':
backends.pincode_backend = totpcgi.backends.file.GAPincodeBackend(pincode_file)
elif PINCODE_BACKEND == 'pgsql':
backends.pincode_backend = totpcgi.backends.pgsql.GAPincodeBackend(pg_connect_string)
elif PINCODE_BACKEND == 'mysql':
backends.pincode_backend = totpcgi.backends.mysql.GAPincodeBackend(mysql_connect_host, mysql_connect_user,
mysql_connect_password, mysql_connect_db)
elif PINCODE_BACKEND == 'ldap':
import totpcgi.backends.ldap
backends.pincode_backend = totpcgi.backends.ldap.GAPincodeBackend(ldap_url, ldap_dn, ldap_cacert)
return backends
def setCustomPincode(pincode, algo='sha256', user='valid', addjunk=False):
hashcode = totpcgi.utils.hash_pincode(pincode, algo=algo)
logger.debug('generated hashcode=%s', hashcode)
if addjunk:
hashcode += ':junk'
backends = getBackends()
if PINCODE_BACKEND == 'File':
backends.pincode_backend.save_user_hashcode(user, hashcode)
elif PINCODE_BACKEND in ('pgsql', 'mysql'):
backends.pincode_backend.save_user_hashcode(user, hashcode)
def cleanState(user='valid'):
logger.debug('Cleaning state for user %s', user)
backends = getBackends()
backends.state_backend.delete_user_state(user)
if 'ldap_user' in os.environ and user != os.environ['ldap_user']:
cleanState(user=os.environ['ldap_user'])
def setCustomState(state, user='valid'):
logger.debug('Setting custom state for user %s', user)
backends = getBackends()
backends.state_backend.get_user_state(user)
backends.state_backend.update_user_state(user, state)
def getValidUser():
logger.debug('Setting up user valid')
backends = getBackends()
gau = totpcgi.GAUser('valid', backends)
if SECRET_BACKEND == 'File':
with open(os.path.join(secrets_dir, 'valid.totp'), 'r') as fh:
logger.debug('valid.totp follows\n%s', fh.read())
return gau
class GATest(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
def assertCountEqual(*args, **kwargs):
if sys.version_info < (3, 0):
return unittest.TestCase.assertItemsEqual(*args, **kwargs)
# noinspection PyCompatibility
return unittest.TestCase.assertCountEqual(*args, **kwargs)
def assertRaisesRegex(*args, **kwargs):
if sys.version_info < (3, 0):
return unittest.TestCase.assertRaisesRegexp(*args, **kwargs)
# noinspection PyCompatibility
return unittest.TestCase.assertRaisesRegex(*args, **kwargs)
def setUp(self):
# Remove any existing state files for user "valid"
cleanState()
def tearDown(self):
cleanState()
if os.access(pincode_file, os.W_OK):
os.unlink(pincode_file)
if os.access(pincode_file + '.db', os.W_OK):
os.unlink(pincode_file + '.db')
def testValidSecretParsing(self):
logger.debug('Running testValidSecretParsing')
gau = getValidUser()
backends = getBackends()
secret = backends.secret_backend.get_user_secret(gau.user)
self.assertEqual(secret.otp.secret, VALID_SECRET,
'Secret read from valid.totp did not match')
self.assertEqual(gau.user, 'valid',
'User did not match')
self.assertEqual(secret.rate_limit, (4, 30),
'RATE_LIMIT did not parse correctly')
self.assertEqual(secret.window_size, 3,
'WINDOW_SIZE did not parse correctly')
compare_tokens = []
for token in VALID_SCRATCH_TOKENS:
compare_tokens.append(token)
self.assertCountEqual(compare_tokens, secret.scratch_tokens)
def testInvalidSecretParsing(self):
logger.debug('Running testInvalidSecretParsing')
backends = getBackends()
gau = totpcgi.GAUser('invalid', backends)
with self.assertRaises(totpcgi.UserSecretError):
gau.verify_token(555555)
def testInvalidUsername(self):
logger.debug('Running testInvalidUsername')
backends = getBackends()
with self.assertRaisesRegex(totpcgi.VerifyFailed,
'invalid characters'):
totpcgi.GAUser('../../etc/passwd', backends)
def testNonExistentValidUser(self):
logger.debug('Running testNonExistentValidUser')
backends = getBackends()
gau = totpcgi.GAUser('[email protected]', backends)
with self.assertRaises(totpcgi.UserNotFound):
gau.verify_token(555555)
def testValidToken(self):
logger.debug('Running testValidToken')
gau = getValidUser()
backends = getBackends()
secret = backends.secret_backend.get_user_secret(gau.user)
totp = pyotp.TOTP(secret.otp.secret)
token = totp.now()
self.assertEqual(gau.verify_token(token), 'Valid TOTP token used')
# try using it again
with self.assertRaisesRegex(totpcgi.VerifyFailed, 'been used once'):
gau.verify_token(token)
# and again, to make sure it is preserved in state
with self.assertRaisesRegex(totpcgi.VerifyFailed, 'been used once'):
gau.verify_token(token)
gau = totpcgi.GAUser('hotp', backends)
# Save custom state for HOTP user, as some backends rely on it to trigger HOTP mode
state = totpcgi.GAUserState()
state.counter = 0
setCustomState(state, 'hotp')
hotp = pyotp.HOTP(secret.otp.secret)
token = hotp.at(0)
self.assertEqual(gau.verify_token(token), 'Valid HOTP token used')
# make sure the counter now validates at 1 and 2
self.assertEqual(gau.verify_token(hotp.at(1)), 'Valid HOTP token used')
self.assertEqual(gau.verify_token(hotp.at(2)), 'Valid HOTP token used')
# make sure trying "1" or "2" fails now
with self.assertRaisesRegex(totpcgi.VerifyFailed, 'HOTP token failed to verify'):
gau.verify_token(hotp.at(1))
with self.assertRaisesRegex(totpcgi.VerifyFailed, 'HOTP token failed to verify'):
gau.verify_token(hotp.at(2))
# but we're good to go at 3
self.assertEqual(gau.verify_token(hotp.at(3)), 'Valid HOTP token used')
# and we're good to go with 7, which is max window size
self.assertEqual(gau.verify_token(hotp.at(7)), 'Valid HOTP token within window size used')
# Trying with "5" should fail now
with self.assertRaisesRegex(totpcgi.VerifyFailed, 'HOTP token failed to verify'):
gau.verify_token(hotp.at(5))
# but we're good to go at 8
self.assertEqual(gau.verify_token(hotp.at(8)), 'Valid HOTP token used')
# should fail with 13, which is beyond window size of 9+3
with self.assertRaisesRegex(totpcgi.VerifyFailed, 'HOTP token failed to verify'):
gau.verify_token(hotp.at(13))
cleanState('hotp')
def testTOTPWindowSize(self):
logger.debug('Running testWindowSize')
gau = getValidUser()
backends = getBackends()
secret = backends.secret_backend.get_user_secret(gau.user)
totp = pyotp.TOTP(secret.otp.secret)
# go back until we get the previous token
timestamp = int(time.time())
token = totp.at(timestamp)
past_token = future_token = None
past_timestamp = future_timestamp = timestamp
while past_token is None or past_token == token:
past_timestamp -= 10
past_token = totp.at(past_timestamp)
while future_token is None or future_token == token:
future_timestamp += 10
future_token = totp.at(future_timestamp)
logger.debug('past_token=%s', past_token)
logger.debug('token=%s', token)
logger.debug('future_token=%s', future_token)
# this should work
self.assertEqual(gau.verify_token(past_token),
'Valid TOTP token within window size used')
self.assertEqual(gau.verify_token(future_token),
'Valid TOTP token within window size used')
# trying to reuse them should fail
with self.assertRaisesRegex(totpcgi.VerifyFailed, 'been used once'):
gau.verify_token(past_token)
with self.assertRaisesRegex(totpcgi.VerifyFailed, 'been used once'):
gau.verify_token(future_token)
# get some tokens from +/- 600 seconds
past_token = totp.at(int(time.time())-600)
future_token = totp.at(int(time.time())+600)
logger.debug('past_token=%s', past_token)
logger.debug('future_token=%s', future_token)
# this should fail
with self.assertRaisesRegex(totpcgi.VerifyFailed, 'TOTP token failed to verify'):
gau.verify_token(past_token)
with self.assertRaisesRegex(totpcgi.VerifyFailed, 'TOTP token failed to verify'):
gau.verify_token(future_token)
def testTOTPRateLimit(self):
logger.debug('Running testTOTPRateLimit')
gau = getValidUser()
backends = getBackends()
secret = backends.secret_backend.get_user_secret(gau.user)
token = '555555'
# We now fail 4 times consecutively
with self.assertRaisesRegex(totpcgi.VerifyFailed, 'TOTP token failed to verify'):
gau.verify_token(token)
with self.assertRaisesRegex(totpcgi.VerifyFailed, 'TOTP token failed to verify'):
gau.verify_token(token)
with self.assertRaisesRegex(totpcgi.VerifyFailed, 'TOTP token failed to verify'):
gau.verify_token(token)
with self.assertRaisesRegex(totpcgi.VerifyFailed, 'TOTP token failed to verify'):
gau.verify_token(token)
# We should now get a rate-limited error
with self.assertRaisesRegex(totpcgi.VerifyFailed, 'Rate-limit'):
gau.verify_token(token)
# Same with a valid token
with self.assertRaisesRegex(totpcgi.VerifyFailed, 'Rate-limit'):
gau.verify_token(secret.get_totp_token())
# Make sure we recover from rate-limiting correctly
old_timestamp = secret.timestamp-(31+(secret.rate_limit[1]*10))
state = totpcgi.GAUserState()
state.fail_timestamps = [
old_timestamp,
old_timestamp,
old_timestamp,
old_timestamp
]
setCustomState(state)
with self.assertRaisesRegex(totpcgi.VerifyFailed, 'TOTP token failed to verify'):
gau.verify_token(token)
# Valid token should work, too
setCustomState(state)
ret = gau.verify_token(secret.get_totp_token())
self.assertIn(ret, ('Valid TOTP token used', 'Valid TOTP token within window size used'))
def testHOTPRateLimit(self):
logger.debug('Running testHOTPRateLimit')
backends = getBackends()
# Save custom state for HOTP user, as some backends rely on it to trigger HOTP mode
state = totpcgi.GAUserState()
state.counter = 1
setCustomState(state, 'hotp')
gau = totpcgi.GAUser('hotp', backends)
secret = backends.secret_backend.get_user_secret(gau.user)
hotp = pyotp.HOTP(secret.otp.secret)
token = hotp.at(1)
self.assertEqual(gau.verify_token(token), 'Valid HOTP token used')
# counter is now at 2
token = '555555'
# We now fail 4 times consecutively
with self.assertRaisesRegex(totpcgi.VerifyFailed, 'HOTP token failed to verify'):
gau.verify_token(token)
with self.assertRaisesRegex(totpcgi.VerifyFailed, 'HOTP token failed to verify'):
gau.verify_token(token)
with self.assertRaisesRegex(totpcgi.VerifyFailed, 'HOTP token failed to verify'):
gau.verify_token(token)
with self.assertRaisesRegex(totpcgi.VerifyFailed, 'HOTP token failed to verify'):
gau.verify_token(token)
# We should now get a rate-limited error
with self.assertRaisesRegex(totpcgi.VerifyFailed, 'Rate-limit'):
gau.verify_token(token)
# Same with a valid token
with self.assertRaisesRegex(totpcgi.VerifyFailed, 'Rate-limit'):
gau.verify_token(hotp.at(2))
# Make sure we recover from rate-limiting correctly
old_timestamp = secret.timestamp-(31+(secret.rate_limit[1]*10))
state = totpcgi.GAUserState()
state.fail_timestamps = [
old_timestamp,
old_timestamp,
old_timestamp,
old_timestamp
]
state.counter = 2
setCustomState(state, 'hotp')
with self.assertRaisesRegex(totpcgi.VerifyFailed, 'HOTP token failed to verify'):
gau.verify_token(token)
# Valid token should work, too
setCustomState(state, 'hotp')
self.assertEqual(gau.verify_token(hotp.at(2)), 'Valid HOTP token used')
cleanState('hotp')
def testInvalidToken(self):
logger.debug('Running testInvalidToken')
gau = getValidUser()
token = '555555'
logger.debug('Testing with an invalid 6-digit token')
with self.assertRaisesRegex(totpcgi.VerifyFailed, 'TOTP token failed to verify'):
gau.verify_token(token)
logger.debug('Test right away with a valid token')
backends = getBackends()
secret = backends.secret_backend.get_user_secret(gau.user)
totp = pyotp.TOTP(secret.otp.secret)
validtoken = totp.now()
with self.assertRaisesRegex(totpcgi.VerifyFailed, 'been used once'):
gau.verify_token(validtoken)
logger.debug('Testing with a non-integer token')
with self.assertRaisesRegex(totpcgi.VerifyFailed, 'not an integer'):
cleanState()
gau.verify_token('WAKKA')
logger.debug('Testing with an invalid 8-digit scratch-token')
with self.assertRaisesRegex(totpcgi.VerifyFailed,
'Not a valid scratch-token'):
gau.verify_token('11112222')
def testScratchTokens(self):
gau = getValidUser()
ret = gau.verify_token(VALID_SCRATCH_TOKENS[0])
self.assertEqual(ret, 'Scratch-token used')
# try using it again
with self.assertRaisesRegex(totpcgi.VerifyFailed,
'Scratch-token already used once'):
gau.verify_token(VALID_SCRATCH_TOKENS[0])
# try using another token
ret = gau.verify_token(VALID_SCRATCH_TOKENS[1])
self.assertEqual(ret, 'Scratch-token used')
# use first one again to make sure it's preserved in the state file
with self.assertRaisesRegex(totpcgi.VerifyFailed,
'Scratch-token already used once'):
gau.verify_token(VALID_SCRATCH_TOKENS[0])
def testTotpCGI(self):
# Very basic test -- it should return 'user not found'
os.environ['REMOTE_ADDR'] = '127.0.0.1'
os.environ['QUERY_STRING'] = 'user=bupkis&token=555555&mode=PAM_SM_AUTH'
os.environ['PYTHONPATH'] = '.'
command = ['env', 'python', 'cgi/totp.cgi', 'conf/totpcgi.conf']
ret = subprocess.check_output(command).decode('utf-8')
self.assertTrue(ret.find('bupkis.totp does not exist') >= 0)
def testPincodes(self):
logger.debug('Running testPincodes')
logger.debug('Testing in non-required mode')
backends = getBackends()
ga = totpcgi.GoogleAuthenticator(backends)
gau = getValidUser()
pincode = 'wakkawakka'
secret = backends.secret_backend.get_user_secret(gau.user)
tokencode = secret.get_totp_token()
token = pincode + tokencode
logger.debug('scratch_token=%s', VALID_SCRATCH_TOKENS[0])
logger.debug('pincode=%s', pincode)
if PINCODE_BACKEND == 'File':
logger.debug('Testing without pincodes file')
with self.assertRaisesRegex(totpcgi.UserNotFound,
'pincodes file not found'):
ga.verify_user_token('valid', token)
cleanState()
logger.debug('Testing with junk at the end')
setCustomPincode(pincode, addjunk=True)
ret = ga.verify_user_token('valid', token)
self.assertIn(ret, ('Valid TOTP token used', 'Valid TOTP token within window size used'))
cleanState()
if PINCODE_BACKEND in ('pgsql', 'mysql'):
backends.pincode_backend.delete_user_hashcode('valid')
logger.debug('Testing without a user pincode record present')
with self.assertRaisesRegex(totpcgi.UserNotFound,
'no pincodes record'):
ga.verify_user_token('valid', token)
if PINCODE_BACKEND in ('pgsql', 'mysql', 'File'):
logger.debug('Testing with 1-digit long pincode')
setCustomPincode('1')
ret = ga.verify_user_token('valid', '1'+tokencode)
self.assertIn(ret, ('Valid TOTP token used', 'Valid TOTP token within window size used'))
cleanState()
logger.debug('Testing with 2-digit long pincode + valid tokencode')
setCustomPincode('99')
ret = ga.verify_user_token('valid', '99'+tokencode)
self.assertIn(ret, ('Valid TOTP token used', 'Valid TOTP token within window size used'))
cleanState()
logger.debug('Testing with 2-digit long pincode + invalid tokencode')
setCustomPincode('99')
with self.assertRaisesRegex(totpcgi.VerifyFailed,
'TOTP token failed to verify'):
ga.verify_user_token('valid', '99'+'000000')
cleanState()
logger.debug('Testing with bcrypt')
setCustomPincode(pincode, algo='bcrypt')
ret = ga.verify_user_token('valid', token)
self.assertIn(ret, ('Valid TOTP token used', 'Valid TOTP token within window size used'))
cleanState()
logger.debug('Testing with md5')
setCustomPincode(pincode, algo='md5')
ret = ga.verify_user_token('valid', token)
self.assertIn(ret, ('Valid TOTP token used', 'Valid TOTP token within window size used'))
cleanState()
setCustomPincode(pincode)
if PINCODE_BACKEND == 'ldap':
valid_user = os.environ['ldap_user']
pincode = os.environ['ldap_password']
token = pincode + tokencode
else:
valid_user = 'valid'
pincode = 'wakkawakka'
setCustomPincode(pincode)
logger.debug('Testing with pincode+scratch-code')
ret = ga.verify_user_token(valid_user, pincode+VALID_SCRATCH_TOKENS[0])
self.assertEqual(ret, 'Scratch-token used')
logger.debug('Testing with pincode+scratch-code (starting with 00)')
ret = ga.verify_user_token(valid_user, pincode+VALID_SCRATCH_TOKENS[1])
self.assertEqual(ret, 'Scratch-token used')
logger.debug('Testing with pincode+invalid-scratch-code')
with self.assertRaisesRegex(totpcgi.VerifyFailed, 'Not a valid scratch-token'):
ga.verify_user_token(valid_user, pincode+'00000000')
cleanState()
logger.debug('Turning on pincode enforcing')
ga = totpcgi.GoogleAuthenticator(backends, require_pincode=True)
logger.debug('Trying valid token without pincode')
with self.assertRaisesRegex(totpcgi.UserPincodeError,
'Pincode is required'):
ga.verify_user_token(valid_user, tokencode)
cleanState()
logger.debug('Trying valid scratch token without pincode')
with self.assertRaisesRegex(totpcgi.UserPincodeError,
'Pincode is required'):
ga.verify_user_token(valid_user, VALID_SCRATCH_TOKENS[0])
cleanState()
logger.debug('Trying valid token with pincode in enforcing')
ret = ga.verify_user_token(valid_user, token)
# The tests sometimes take longer than our window
self.assertIn(ret, ('Valid TOTP token used', 'Valid TOTP token within window size used'))
cleanState()
logger.debug('Testing valid pincode+scratch-code in enforcing')
logger.debug('pincode=%s', pincode+VALID_SCRATCH_TOKENS[0])
ret = ga.verify_user_token(valid_user, pincode+VALID_SCRATCH_TOKENS[0])
self.assertEqual(ret, 'Scratch-token used')
cleanState()
if PINCODE_BACKEND == 'ldap':
raisedmsg = 'LDAP bind failed'
else:
raisedmsg = 'Pincode did not match'
logger.debug('Testing with valid token but invalid pincode')
with self.assertRaisesRegex(totpcgi.UserPincodeError, raisedmsg):
ga.verify_user_token(valid_user, 'blarg'+tokencode)
# Refresh our token to grab the latest
totp = pyotp.TOTP(VALID_SECRET)
tokencode = str(totp.now()).zfill(6)
token = pincode + tokencode
logger.debug('Latest token=%s', token)
logger.debug('Testing again with valid token and valid pincode')
with self.assertRaisesRegex(totpcgi.VerifyFailed,
'already been used'):
ga.verify_user_token(valid_user, token)
cleanState()
logger.debug('Testing with valid pincode but invalid token')
with self.assertRaisesRegex(totpcgi.VerifyFailed,
'TOTP token failed to verify'):
ga.verify_user_token(valid_user, pincode+'555555')
def testEncryptedSecret(self):
if PINCODE_BACKEND in 'ldap':
valid_user = os.environ['ldap_user']
pincode = os.environ['ldap_password']
else:
pincode = 'wakkawakka'
valid_user = 'encrypted'
setCustomPincode(pincode, user=valid_user)
logger.debug('Running testEncryptedSecret')
backends = getBackends()
ga = totpcgi.GoogleAuthenticator(backends)
totp = pyotp.TOTP(VALID_SECRET)
token = str(totp.now()).zfill(6)
ga.verify_user_token(valid_user, pincode+token)
# This should fail, as we ignore scratch tokens with encrypted secrets
with self.assertRaisesRegex(totpcgi.VerifyFailed,
'Not a valid scratch-token'):
ga.verify_user_token(valid_user, pincode+'12345678')
cleanState(user=valid_user)
# We don't need to run this for ldap
if PINCODE_BACKEND in 'ldap':
return
setCustomPincode(pincode, user='encrypted-bad')
with self.assertRaisesRegex(totpcgi.UserSecretError,
'Failed to parse'):
ga.verify_user_token('encrypted-bad', pincode+token)
cleanState(user='encrypted-bad')
if __name__ == '__main__':
# To test postgresql backend, do:
# export pg_connect_string='blah blah'
if 'pg_connect_string' in os.environ.keys():
STATE_BACKEND = SECRET_BACKEND = PINCODE_BACKEND = 'pgsql'
pg_connect_string = os.environ['pg_connect_string']
# To test ldap backend, set env vars for
# ldap_url, ldap_dn, ldap_cacert, ldap_user and ldap_password
if 'ldap_url' in os.environ.keys():
PINCODE_BACKEND = 'ldap'
ldap_url = os.environ['ldap_url']
ldap_dn = os.environ['ldap_dn']
ldap_cacert = os.environ['ldap_cacert']
if 'mysql_connect_host' in os.environ.keys():
STATE_BACKEND = SECRET_BACKEND = PINCODE_BACKEND = 'mysql'
mysql_connect_host = os.environ['mysql_connect_host']
mysql_connect_user = os.environ['mysql_connect_user']
mysql_connect_password = os.environ['mysql_connect_password']
mysql_connect_db = os.environ['mysql_connect_db']
be = getBackends()
# valid user
gaus = totpcgi.utils.generate_secret(rate_limit=(4, 30))
# make the 2nd scratch token start with 00
gaus.scratch_tokens[1] = '00' + gaus.scratch_tokens[1][2:]
be.secret_backend.save_user_secret('valid', gaus)
if 'ldap_user' in os.environ:
be.secret_backend.save_user_secret(os.environ['ldap_user'], gaus)
VALID_SECRET = gaus.otp.secret
VALID_SCRATCH_TOKENS = gaus.scratch_tokens
# hotp is using HOTP mode
gaus.set_hotp(0)
be.secret_backend.save_user_secret('hotp', gaus)
# switch back to totp for the rest
gaus.counter = -1
gaus.otp = pyotp.TOTP(VALID_SECRET)
# encrypted-secret user is same as valid, just encrypted
be.secret_backend.save_user_secret('encrypted', gaus, 'wakkawakka')
# invalid user (bad secret)
gaus = totpcgi.utils.generate_secret()
gaus.otp.secret = 'WAKKA"WAKKA'
be.secret_backend.save_user_secret('invalid', gaus)
# encrypted-bad (bad encryption)
gaus.otp.secret = 'aes256+hmac256$WAKKAWAKKA$WAKKAWAKKA'
be.secret_backend.save_user_secret('encrypted-bad', gaus)
try:
unittest.main()
finally:
test_users = ['valid', 'invalid', 'encrypted', 'encrypted-bad', 'hotp']
if 'ldap_user' in os.environ and os.environ['ldap_user'] not in test_users:
test_users.append(os.environ['ldap_user'])
for username in test_users:
be.state_backend.delete_user_state(username)
be.secret_backend.delete_user_secret(username)
pass
| gpl-2.0 | -1,990,789,207,096,743,700 | 36.579767 | 116 | 0.627459 | false |
ntt-sic/nova | nova/tests/virt/libvirt/test_libvirt.py | 1 | 303692 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack Foundation
# Copyright 2012 University Of Minho
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import errno
import eventlet
import fixtures
import functools
import mox
import os
import re
import shutil
import tempfile
from eventlet import greenthread
from lxml import etree
import mock
from oslo.config import cfg
from xml.dom import minidom
from nova.api.ec2 import cloud
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova.objects import instance as instance_obj
from nova.openstack.common import fileutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import loopingcall
from nova.openstack.common import processutils
from nova.openstack.common import uuidutils
from nova import test
from nova.tests import fake_network
import nova.tests.image.fake
from nova.tests import matchers
from nova.tests.objects import test_pci_device
from nova.tests.virt.libvirt import fake_libvirt_utils
from nova import utils
from nova import version
from nova.virt.disk import api as disk
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import fake
from nova.virt import firewall as base_firewall
from nova.virt import images
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import driver as libvirt_driver
from nova.virt.libvirt import firewall
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt import netutils
try:
import libvirt
except ImportError:
import nova.tests.virt.libvirt.fakelibvirt as libvirt
libvirt_driver.libvirt = libvirt
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('base_dir_name', 'nova.virt.libvirt.imagecache')
CONF.import_opt('instances_path', 'nova.compute.manager')
_fake_network_info = fake_network.fake_get_instance_nw_info
_fake_stub_out_get_nw_info = fake_network.stub_out_nw_api_get_instance_nw_info
_ipv4_like = fake_network.ipv4_like
_fake_NodeDevXml = \
{"pci_0000_04_00_3": """
<device>
<name>pci_0000_04_00_3</name>
<parent>pci_0000_00_01_1</parent>
<driver>
<name>igb</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>0</slot>
<function>3</function>
<product id='0x1521'>I350 Gigabit Network Connection</product>
<vendor id='0x8086'>Intel Corporation</vendor>
<capability type='virt_functions'>
<address domain='0x0000' bus='0x04' slot='0x10' function='0x3'/>
<address domain='0x0000' bus='0x04' slot='0x10' function='0x7'/>
<address domain='0x0000' bus='0x04' slot='0x11' function='0x3'/>
<address domain='0x0000' bus='0x04' slot='0x11' function='0x7'/>
</capability>
</capability>
</device>""",
"pci_0000_04_10_7": """
<device>
<name>pci_0000_04_10_7</name>
<parent>pci_0000_00_01_1</parent>
<driver>
<name>igbvf</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>16</slot>
<function>7</function>
<product id='0x1520'>I350 Ethernet Controller Virtual Function</product>
<vendor id='0x8086'>Intel Corporation</vendor>
<capability type='phys_function'>
<address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
</capability>
<capability type='virt_functions'>
</capability>
</capability>
</device>"""}
def _concurrency(signal, wait, done, target):
signal.send()
wait.wait()
done.send()
class FakeVirDomainSnapshot(object):
def __init__(self, dom=None):
self.dom = dom
def delete(self, flags):
pass
class FakeVirtDomain(object):
def __init__(self, fake_xml=None, uuidstr=None):
self.uuidstr = uuidstr
if fake_xml:
self._fake_dom_xml = fake_xml
else:
self._fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
</devices>
</domain>
"""
def name(self):
return "fake-domain %s" % self
def info(self):
return [power_state.RUNNING, None, None, None, None]
def create(self):
pass
def managedSave(self, *args):
pass
def createWithFlags(self, launch_flags):
pass
def XMLDesc(self, *args):
return self._fake_dom_xml
def UUIDString(self):
return self.uuidstr
def attachDeviceFlags(self, xml, flags):
pass
def detachDeviceFlags(self, xml, flags):
pass
def snapshotCreateXML(self, xml, flags):
pass
def blockCommit(self, disk, base, top, bandwidth=0, flags=0):
pass
def blockRebase(self, disk, base, bandwidth=0, flags=0):
pass
def blockJobInfo(self, path, flags):
pass
class CacheConcurrencyTestCase(test.TestCase):
def setUp(self):
super(CacheConcurrencyTestCase, self).setUp()
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
# utils.synchronized() will create the lock_path for us if it
# doesn't already exist. It will also delete it when it's done,
# which can cause race conditions with the multiple threads we
# use for tests. So, create the path here so utils.synchronized()
# won't delete it out from under one of the threads.
self.lock_path = os.path.join(CONF.instances_path, 'locks')
fileutils.ensure_tree(self.lock_path)
def fake_exists(fname):
basedir = os.path.join(CONF.instances_path, CONF.base_dir_name)
if fname == basedir or fname == self.lock_path:
return True
return False
def fake_execute(*args, **kwargs):
pass
def fake_extend(image, size, use_cow=False):
pass
self.stubs.Set(os.path, 'exists', fake_exists)
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(imagebackend.disk, 'extend', fake_extend)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def test_same_fname_concurrency(self):
# Ensures that the same fname cache runs at a sequentially.
uuid = uuidutils.generate_uuid()
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
sig1 = eventlet.event.Event()
thr1 = eventlet.spawn(backend.image({'name': 'instance',
'uuid': uuid},
'name').cache,
_concurrency, 'fname', None,
signal=sig1, wait=wait1, done=done1)
eventlet.sleep(0)
# Thread 1 should run before thread 2.
sig1.wait()
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
sig2 = eventlet.event.Event()
thr2 = eventlet.spawn(backend.image({'name': 'instance',
'uuid': uuid},
'name').cache,
_concurrency, 'fname', None,
signal=sig2, wait=wait2, done=done2)
wait2.send()
eventlet.sleep(0)
try:
self.assertFalse(done2.ready())
finally:
wait1.send()
done1.wait()
eventlet.sleep(0)
self.assertTrue(done2.ready())
# Wait on greenthreads to assert they didn't raise exceptions
# during execution
thr1.wait()
thr2.wait()
def test_different_fname_concurrency(self):
# Ensures that two different fname caches are concurrent.
uuid = uuidutils.generate_uuid()
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
sig1 = eventlet.event.Event()
thr1 = eventlet.spawn(backend.image({'name': 'instance',
'uuid': uuid},
'name').cache,
_concurrency, 'fname2', None,
signal=sig1, wait=wait1, done=done1)
eventlet.sleep(0)
# Thread 1 should run before thread 2.
sig1.wait()
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
sig2 = eventlet.event.Event()
thr2 = eventlet.spawn(backend.image({'name': 'instance',
'uuid': uuid},
'name').cache,
_concurrency, 'fname1', None,
signal=sig2, wait=wait2, done=done2)
eventlet.sleep(0)
# Wait for thread 2 to start.
sig2.wait()
wait2.send()
eventlet.sleep(0)
try:
self.assertTrue(done2.ready())
finally:
wait1.send()
eventlet.sleep(0)
# Wait on greenthreads to assert they didn't raise exceptions
# during execution
thr1.wait()
thr2.wait()
class FakeVolumeDriver(object):
def __init__(self, *args, **kwargs):
pass
def attach_volume(self, *args):
pass
def detach_volume(self, *args):
pass
def get_xml(self, *args):
return ""
class FakeConfigGuestDisk(object):
def __init__(self, *args, **kwargs):
self.source_type = None
self.driver_cache = None
class FakeConfigGuest(object):
def __init__(self, *args, **kwargs):
self.driver_cache = None
class FakeNodeDevice(object):
def __init__(self, fakexml):
self.xml = fakexml
def XMLDesc(self, *args):
return self.xml
class LibvirtConnTestCase(test.TestCase):
def setUp(self):
super(LibvirtConnTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
self.flags(fake_call=True)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.get_admin_context()
temp_dir = self.useFixture(fixtures.TempDir()).path
self.flags(instances_path=temp_dir)
self.flags(libvirt_snapshots_directory=temp_dir)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
# Force libvirt to return a host UUID that matches the serial in
# nova.tests.fakelibvirt. This is necessary because the host UUID
# returned by libvirt becomes the serial whose value is checked for in
# test_xml_and_uri_* below.
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.LibvirtDriver.get_host_uuid',
lambda _: 'cef19ce0-0ca2-11df-855d-b19fbce37686'))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def fake_extend(image, size, use_cow=False):
pass
self.stubs.Set(libvirt_driver.disk, 'extend', fake_extend)
class FakeConn():
def getCapabilities(self):
"""Ensure standard capabilities being returned."""
return """<capabilities>
<host><cpu><arch>x86_64</arch></cpu></host>
</capabilities>"""
def getVersion(self):
return 1005001
def getLibVersion(self):
return (0 * 1000 * 1000) + (9 * 1000) + 11
def registerCloseCallback(self, cb, opaque):
pass
def nwfilterDefineXML(self, *args, **kwargs):
pass
def nodeDeviceLookupByName(self, x):
pass
self.conn = FakeConn()
self.stubs.Set(libvirt_driver.LibvirtDriver, '_connect',
lambda *a, **k: self.conn)
instance_type = db.flavor_get(self.context, 5)
sys_meta = flavors.save_flavor_info({}, instance_type)
nova.tests.image.fake.stub_out_image_service(self.stubs)
self.test_instance = {
'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310',
'memory_kb': '1024000',
'basepath': '/some/path',
'bridge_name': 'br100',
'vcpus': 2,
'project_id': 'fake',
'bridge': 'br101',
'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'root_gb': 10,
'ephemeral_gb': 20,
'instance_type_id': '5', # m1.small
'extra_specs': {},
'system_metadata': sys_meta,
"pci_devices": []}
def relpath(self, path):
return os.path.relpath(path, CONF.instances_path)
def tearDown(self):
nova.tests.image.fake.FakeImageService_reset()
super(LibvirtConnTestCase, self).tearDown()
def create_fake_libvirt_mock(self, **kwargs):
"""Defining mocks for LibvirtDriver(libvirt is not used)."""
# A fake libvirt.virConnect
class FakeLibvirtDriver(object):
def defineXML(self, xml):
return FakeVirtDomain()
# Creating mocks
volume_driver = ('iscsi=nova.tests.virt.libvirt.test_libvirt'
'.FakeVolumeDriver')
self.flags(libvirt_volume_drivers=[volume_driver])
fake = FakeLibvirtDriver()
# Customizing above fake if necessary
for key, val in kwargs.items():
fake.__setattr__(key, val)
self.flags(libvirt_vif_driver="nova.tests.fake_network.FakeVIFDriver")
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn = fake
def fake_lookup(self, instance_name):
return FakeVirtDomain()
def fake_execute(self, *args, **kwargs):
open(args[-1], "a").close()
def create_service(self, **kwargs):
service_ref = {'host': kwargs.get('host', 'dummy'),
'binary': 'nova-compute',
'topic': 'compute',
'report_count': 0}
return db.service_create(context.get_admin_context(), service_ref)
def test_prepare_pci_device(self):
pci_devices = [dict(hypervisor_name='xxx')]
self.flags(libvirt_type='xen')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
class FakeDev():
def attach(self):
pass
def dettach(self):
pass
def reset(self):
pass
self.mox.StubOutWithMock(self.conn, 'nodeDeviceLookupByName')
self.conn.nodeDeviceLookupByName('xxx').AndReturn(FakeDev())
self.conn.nodeDeviceLookupByName('xxx').AndReturn(FakeDev())
self.mox.ReplayAll()
conn._prepare_pci_devices_for_use(pci_devices)
def test_prepare_pci_device_exception(self):
pci_devices = [dict(hypervisor_name='xxx',
id='id1',
instance_uuid='uuid')]
self.flags(libvirt_type='xen')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
class FakeDev():
def attach(self):
pass
def dettach(self):
raise libvirt.libvirtError("xxxxx")
def reset(self):
pass
self.stubs.Set(self.conn, 'nodeDeviceLookupByName',
lambda x: FakeDev())
self.assertRaises(exception.PciDevicePrepareFailed,
conn._prepare_pci_devices_for_use, pci_devices)
def test_detach_pci_devices_exception(self):
pci_devices = [dict(hypervisor_name='xxx',
id='id1',
instance_uuid='uuid')]
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
'has_min_version')
libvirt_driver.LibvirtDriver.has_min_version = lambda x, y: False
self.assertRaises(exception.PciDeviceDetachFailed,
conn._detach_pci_devices, None, pci_devices)
def test_detach_pci_devices(self):
fake_domXML1 =\
"""<domain> <devices>
<hostdev mode="subsystem" type="pci" managed="yes">
<source>
<address function="0x1" slot="0x10" domain="0x0000" bus="0x04"/>
</source>
</hostdev></devices></domain>"""
pci_devices = [dict(hypervisor_name='xxx',
id='id1',
instance_uuid='uuid',
address="0001:04:10:1")]
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
'has_min_version')
libvirt_driver.LibvirtDriver.has_min_version = lambda x, y: True
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
'get_guest_pci_device')
class FakeDev():
def to_xml(self):
pass
libvirt_driver.LibvirtDriver.get_guest_pci_device =\
lambda x, y: FakeDev()
class FakeDomain():
def detachDeviceFlags(self, xml, flag):
pci_devices[0]['hypervisor_name'] = 'marked'
pass
def XMLDesc(self, flag):
return fake_domXML1
conn._detach_pci_devices(FakeDomain(), pci_devices)
self.assertEqual(pci_devices[0]['hypervisor_name'], 'marked')
def test_detach_pci_devices_timeout(self):
fake_domXML1 =\
"""<domain>
<devices>
<hostdev mode="subsystem" type="pci" managed="yes">
<source>
<address function="0x1" slot="0x10" domain="0x0000" bus="0x04"/>
</source>
</hostdev>
</devices>
</domain>"""
pci_devices = [dict(hypervisor_name='xxx',
id='id1',
instance_uuid='uuid',
address="0000:04:10:1")]
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
'has_min_version')
libvirt_driver.LibvirtDriver.has_min_version = lambda x, y: True
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
'get_guest_pci_device')
class FakeDev():
def to_xml(self):
pass
libvirt_driver.LibvirtDriver.get_guest_pci_device =\
lambda x, y: FakeDev()
class FakeDomain():
def detachDeviceFlags(self, xml, flag):
pass
def XMLDesc(self, flag):
return fake_domXML1
self.assertRaises(exception.PciDeviceDetachFailed,
conn._detach_pci_devices, FakeDomain(), pci_devices)
def test_get_connector(self):
initiator = 'fake.initiator.iqn'
ip = 'fakeip'
host = 'fakehost'
wwpns = ['100010604b019419']
wwnns = ['200010604b019419']
self.flags(my_ip=ip)
self.flags(host=host)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
expected = {
'ip': ip,
'initiator': initiator,
'host': host,
'wwpns': wwpns,
'wwnns': wwnns
}
volume = {
'id': 'fake'
}
result = conn.get_volume_connector(volume)
self.assertThat(expected, matchers.DictMatches(result))
def test_close_callback(self):
def get_lib_version_stub():
return (1 * 1000 * 1000) + (0 * 1000) + 1
self.close_callback = None
def set_close_callback(cb, opaque):
self.close_callback = cb
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.stubs.Set(self.conn, "getLibVersion", get_lib_version_stub)
self.mox.StubOutWithMock(conn, '_connect')
self.mox.StubOutWithMock(self.conn, 'registerCloseCallback')
conn._connect(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(self.conn)
self.conn.registerCloseCallback(
mox.IgnoreArg(), mox.IgnoreArg()).WithSideEffects(
set_close_callback)
conn._connect(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(self.conn)
self.conn.registerCloseCallback(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
# verify that the driver registers for the close callback
# and re-connects after receiving the callback
conn._get_connection()
self.assertTrue(self.close_callback)
self.close_callback(self.conn, 1, None)
conn._get_connection()
def test_get_guest_config(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEqual(cfg.acpi, True)
self.assertEqual(cfg.apic, True)
self.assertEqual(cfg.memory, 1024 * 1024 * 2)
self.assertEqual(cfg.vcpus, 1)
self.assertEqual(cfg.os_type, vm_mode.HVM)
self.assertEqual(cfg.os_boot_dev, ["hd"])
self.assertEqual(cfg.os_root, None)
self.assertEqual(len(cfg.devices), 7)
self.assertEqual(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestInterface)
self.assertEqual(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestSerial)
self.assertEqual(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestSerial)
self.assertEqual(type(cfg.devices[5]),
vconfig.LibvirtConfigGuestInput)
self.assertEqual(type(cfg.devices[6]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEqual(type(cfg.clock),
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "utc")
self.assertEqual(len(cfg.clock.timers), 2)
self.assertEqual(type(cfg.clock.timers[0]),
vconfig.LibvirtConfigGuestTimer)
self.assertEqual(type(cfg.clock.timers[1]),
vconfig.LibvirtConfigGuestTimer)
self.assertEqual(cfg.clock.timers[0].name, "pit")
self.assertEqual(cfg.clock.timers[0].tickpolicy,
"delay")
self.assertEqual(cfg.clock.timers[1].name, "rtc")
self.assertEqual(cfg.clock.timers[1].tickpolicy,
"catchup")
def test_get_guest_config_windows(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
instance_ref['os_type'] = 'windows'
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEqual(type(cfg.clock),
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "localtime")
def test_get_guest_config_with_two_nics(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 2),
None, disk_info)
self.assertEqual(cfg.acpi, True)
self.assertEqual(cfg.memory, 1024 * 1024 * 2)
self.assertEqual(cfg.vcpus, 1)
self.assertEqual(cfg.os_type, vm_mode.HVM)
self.assertEqual(cfg.os_boot_dev, ["hd"])
self.assertEqual(cfg.os_root, None)
self.assertEqual(len(cfg.devices), 8)
self.assertEqual(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestInterface)
self.assertEqual(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestInterface)
self.assertEqual(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestSerial)
self.assertEqual(type(cfg.devices[5]),
vconfig.LibvirtConfigGuestSerial)
self.assertEqual(type(cfg.devices[6]),
vconfig.LibvirtConfigGuestInput)
self.assertEqual(type(cfg.devices[7]),
vconfig.LibvirtConfigGuestGraphics)
def test_get_guest_config_bug_1118829(self):
self.flags(libvirt_type='uml')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = {'disk_bus': 'virtio',
'cdrom_bus': 'ide',
'mapping': {u'vda': {'bus': 'virtio',
'type': 'disk',
'dev': u'vda'},
'root': {'bus': 'virtio',
'type': 'disk',
'dev': 'vda'}}}
# NOTE(jdg): For this specific test leave this blank
# This will exercise the failed code path still,
# and won't require fakes and stubs of the iscsi discovery
block_device_info = {}
cfg = conn.get_guest_config(instance_ref, [], None, disk_info,
None, block_device_info)
instance_ref = db.instance_get(self.context, instance_ref['id'])
self.assertEqual(instance_ref['root_device_name'], '/dev/vda')
def test_get_guest_config_with_root_device_name(self):
self.flags(libvirt_type='uml')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
block_device_info = {'root_device_name': '/dev/vdb'}
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref,
block_device_info)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info,
None, block_device_info)
self.assertEqual(cfg.acpi, False)
self.assertEqual(cfg.memory, 1024 * 1024 * 2)
self.assertEqual(cfg.vcpus, 1)
self.assertEqual(cfg.os_type, "uml")
self.assertEqual(cfg.os_boot_dev, [])
self.assertEqual(cfg.os_root, '/dev/vdb')
self.assertEqual(len(cfg.devices), 3)
self.assertEqual(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestConsole)
def test_get_guest_config_with_block_device(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
conn_info = {'driver_volume_type': 'fake'}
info = {'block_device_mapping': [
{'connection_info': conn_info, 'mount_device': '/dev/vdc'},
{'connection_info': conn_info, 'mount_device': '/dev/vdd'}]}
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref, info)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info,
None, info)
self.assertEqual(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[2].target_dev, 'vdc')
self.assertEqual(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[3].target_dev, 'vdd')
def test_get_guest_config_with_configdrive(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
# make configdrive.required_by() return True
instance_ref['config_drive'] = True
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEqual(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[2].target_dev, 'hdd')
def test_get_guest_config_with_vnc(self):
self.flags(libvirt_type='kvm',
vnc_enabled=True,
use_usb_tablet=False)
self.flags(enabled=False, group='spice')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEqual(len(cfg.devices), 5)
self.assertEqual(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestSerial)
self.assertEqual(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestSerial)
self.assertEqual(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEqual(cfg.devices[4].type, "vnc")
def test_get_guest_config_with_vnc_and_tablet(self):
self.flags(libvirt_type='kvm',
vnc_enabled=True,
use_usb_tablet=True)
self.flags(enabled=False, group='spice')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEqual(len(cfg.devices), 6)
self.assertEqual(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestSerial)
self.assertEqual(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestSerial)
self.assertEqual(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestInput)
self.assertEqual(type(cfg.devices[5]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].type, "vnc")
def test_get_guest_config_with_spice_and_tablet(self):
self.flags(libvirt_type='kvm',
vnc_enabled=False,
use_usb_tablet=True)
self.flags(enabled=True,
agent_enabled=False,
group='spice')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEqual(len(cfg.devices), 6)
self.assertEqual(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestSerial)
self.assertEqual(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestSerial)
self.assertEqual(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestInput)
self.assertEqual(type(cfg.devices[5]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].type, "spice")
def test_get_guest_config_with_spice_and_agent(self):
self.flags(libvirt_type='kvm',
vnc_enabled=False,
use_usb_tablet=True)
self.flags(enabled=True,
agent_enabled=True,
group='spice')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEqual(len(cfg.devices), 6)
self.assertEqual(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestSerial)
self.assertEqual(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestSerial)
self.assertEqual(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestChannel)
self.assertEqual(type(cfg.devices[5]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEqual(cfg.devices[4].target_name, "com.redhat.spice.0")
self.assertEqual(cfg.devices[5].type, "spice")
def test_get_guest_config_with_vnc_and_spice(self):
self.flags(libvirt_type='kvm',
vnc_enabled=True,
use_usb_tablet=True)
self.flags(enabled=True,
agent_enabled=True,
group='spice')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertEqual(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestSerial)
self.assertEqual(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestSerial)
self.assertEqual(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestInput)
self.assertEqual(type(cfg.devices[5]),
vconfig.LibvirtConfigGuestChannel)
self.assertEqual(type(cfg.devices[6]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEqual(type(cfg.devices[7]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].target_name, "com.redhat.spice.0")
self.assertEqual(cfg.devices[6].type, "vnc")
self.assertEqual(cfg.devices[7].type, "spice")
def test_get_guest_config_with_qga_through_image_meta(self):
self.flags(libvirt_type='kvm')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
image_meta = {"properties": {"hw_qemu_guest_agent": "yes"}}
cfg = conn.get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertEqual(len(cfg.devices), 7)
self.assertEqual(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestSerial)
self.assertEqual(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestSerial)
self.assertEqual(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestInput)
self.assertEqual(type(cfg.devices[5]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEqual(type(cfg.devices[6]),
vconfig.LibvirtConfigGuestChannel)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].type, "vnc")
self.assertEqual(cfg.devices[6].type, "unix")
self.assertEqual(cfg.devices[6].target_name, "org.qemu.guest_agent.0")
def test_get_guest_config_without_qga_through_image_meta(self):
self.flags(libvirt_type='kvm')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
image_meta = {"properties": {"hw_qemu_guest_agent": "no"}}
cfg = conn.get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertEqual(len(cfg.devices), 6)
self.assertEqual(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestSerial)
self.assertEqual(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestSerial)
self.assertEqual(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestInput)
self.assertEqual(type(cfg.devices[5]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].type, "vnc")
def _create_fake_service_compute(self):
service_info = {
'host': 'fake',
'report_count': 0
}
service_ref = db.service_create(self.context, service_info)
compute_info = {
'vcpus': 2,
'memory_mb': 1024,
'local_gb': 2048,
'vcpus_used': 0,
'memory_mb_used': 0,
'local_gb_used': 0,
'free_ram_mb': 1024,
'free_disk_gb': 2048,
'hypervisor_type': 'xen',
'hypervisor_version': 1,
'running_vms': 0,
'cpu_info': '',
'current_workload': 0,
'service_id': service_ref['id']
}
compute_ref = db.compute_node_create(self.context, compute_info)
return (service_ref, compute_ref)
def test_get_guest_config_with_pci_passthrough_kvm(self):
self.flags(libvirt_type='kvm')
service_ref, compute_ref = self._create_fake_service_compute()
instance_ref = db.instance_create(self.context, self.test_instance)
pci_device_info = dict(test_pci_device.fake_db_dev)
pci_device_info.update(compute_node_id=1,
label='fake',
status='allocated',
address='0000:00:00.1',
compute_id=compute_ref['id'],
instance_uuid=instance_ref['uuid'],
extra_info=jsonutils.dumps({}))
db.pci_device_update(self.context, pci_device_info['compute_node_id'],
pci_device_info['address'], pci_device_info)
instance_ref = db.instance_get(self.context, instance_ref['id'])
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
had_pci = 0
# care only about the PCI devices
for dev in cfg.devices:
if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI:
had_pci += 1
self.assertEqual(dev.type, 'pci')
self.assertEqual(dev.managed, 'yes')
self.assertEqual(dev.mode, 'subsystem')
self.assertEqual(dev.domain, "0000")
self.assertEqual(dev.bus, "00")
self.assertEqual(dev.slot, "00")
self.assertEqual(dev.function, "1")
self.assertEqual(had_pci, 1)
def test_get_guest_config_with_pci_passthrough_xen(self):
self.flags(libvirt_type='xen')
service_ref, compute_ref = self._create_fake_service_compute()
instance_ref = db.instance_create(self.context, self.test_instance)
pci_device_info = dict(test_pci_device.fake_db_dev)
pci_device_info.update(compute_node_id=1,
label='fake',
status='allocated',
address='0000:00:00.2',
compute_id=compute_ref['id'],
instance_uuid=instance_ref['uuid'],
extra_info=jsonutils.dumps({}))
db.pci_device_update(self.context, pci_device_info['compute_node_id'],
pci_device_info['address'], pci_device_info)
instance_ref = db.instance_get(self.context, instance_ref['id'])
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
had_pci = 0
# care only about the PCI devices
for dev in cfg.devices:
if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI:
had_pci += 1
self.assertEqual(dev.type, 'pci')
self.assertEqual(dev.managed, 'no')
self.assertEqual(dev.mode, 'subsystem')
self.assertEqual(dev.domain, "0000")
self.assertEqual(dev.bus, "00")
self.assertEqual(dev.slot, "00")
self.assertEqual(dev.function, "2")
self.assertEqual(had_pci, 1)
def test_get_guest_cpu_config_none(self):
self.flags(libvirt_cpu_mode="none")
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEqual(conf.cpu, None)
def test_get_guest_cpu_config_default_kvm(self):
self.flags(libvirt_type="kvm",
libvirt_cpu_mode=None)
def get_lib_version_stub():
return (0 * 1000 * 1000) + (9 * 1000) + 11
self.stubs.Set(self.conn,
"getLibVersion",
get_lib_version_stub)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEqual(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-model")
self.assertEqual(conf.cpu.model, None)
def test_get_guest_cpu_config_default_uml(self):
self.flags(libvirt_type="uml",
libvirt_cpu_mode=None)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEqual(conf.cpu, None)
def test_get_guest_cpu_config_default_lxc(self):
self.flags(libvirt_type="lxc",
libvirt_cpu_mode=None)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEqual(conf.cpu, None)
def test_get_guest_cpu_config_host_passthrough_new(self):
def get_lib_version_stub():
return (0 * 1000 * 1000) + (9 * 1000) + 11
self.stubs.Set(self.conn,
"getLibVersion",
get_lib_version_stub)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-passthrough")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEqual(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-passthrough")
self.assertEqual(conf.cpu.model, None)
def test_get_guest_cpu_config_host_model_new(self):
def get_lib_version_stub():
return (0 * 1000 * 1000) + (9 * 1000) + 11
self.stubs.Set(self.conn,
"getLibVersion",
get_lib_version_stub)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-model")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEqual(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-model")
self.assertEqual(conf.cpu.model, None)
def test_get_guest_cpu_config_custom_new(self):
def get_lib_version_stub():
return (0 * 1000 * 1000) + (9 * 1000) + 11
self.stubs.Set(self.conn,
"getLibVersion",
get_lib_version_stub)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="custom")
self.flags(libvirt_cpu_model="Penryn")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEqual(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "custom")
self.assertEqual(conf.cpu.model, "Penryn")
def test_get_guest_cpu_config_host_passthrough_old(self):
def get_lib_version_stub():
return (0 * 1000 * 1000) + (9 * 1000) + 7
self.stubs.Set(self.conn,
"getLibVersion",
get_lib_version_stub)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-passthrough")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
self.assertRaises(exception.NovaException,
conn.get_guest_config,
instance_ref,
_fake_network_info(self.stubs, 1),
None,
disk_info)
def test_get_guest_cpu_config_host_model_old(self):
def get_lib_version_stub():
return (0 * 1000 * 1000) + (9 * 1000) + 7
# Ensure we have a predictable host CPU
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.model = "Opteron_G4"
cpu.vendor = "AMD"
cpu.features.append(vconfig.LibvirtConfigGuestCPUFeature("tm2"))
cpu.features.append(vconfig.LibvirtConfigGuestCPUFeature("ht"))
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
return caps
self.stubs.Set(self.conn,
"getLibVersion",
get_lib_version_stub)
self.stubs.Set(libvirt_driver.LibvirtDriver,
"get_host_capabilities",
get_host_capabilities_stub)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-model")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEqual(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, None)
self.assertEqual(conf.cpu.model, "Opteron_G4")
self.assertEqual(conf.cpu.vendor, "AMD")
self.assertEqual(len(conf.cpu.features), 2)
self.assertEqual(conf.cpu.features[0].name, "tm2")
self.assertEqual(conf.cpu.features[1].name, "ht")
def test_get_guest_cpu_config_custom_old(self):
def get_lib_version_stub():
return (0 * 1000 * 1000) + (9 * 1000) + 7
self.stubs.Set(self.conn,
"getLibVersion",
get_lib_version_stub)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="custom")
self.flags(libvirt_cpu_model="Penryn")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEqual(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, None)
self.assertEqual(conf.cpu.model, "Penryn")
def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri_no_ramdisk_no_kernel_xen_hvm(self):
instance_data = dict(self.test_instance)
instance_data.update({'vm_mode': vm_mode.HVM})
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, expect_xen_hvm=True)
def test_xml_and_uri_no_ramdisk_no_kernel_xen_pv(self):
instance_data = dict(self.test_instance)
instance_data.update({'vm_mode': vm_mode.XEN})
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, expect_xen_hvm=False,
xen_only=True)
def test_xml_and_uri_no_ramdisk(self):
instance_data = dict(self.test_instance)
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=False)
def test_xml_and_uri_no_kernel(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=True)
def test_xml_and_uri_rescue(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=True,
expect_ramdisk=True, rescue=instance_data)
def test_xml_and_uri_rescue_no_kernel_no_ramdisk(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, rescue=instance_data)
def test_xml_and_uri_rescue_no_kernel(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=True, rescue=instance_data)
def test_xml_and_uri_rescue_no_ramdisk(self):
instance_data = dict(self.test_instance)
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=True,
expect_ramdisk=False, rescue=instance_data)
def test_xml_uuid(self):
self._check_xml_and_uuid({"disk_format": "raw"})
def test_lxc_container_and_uri(self):
instance_data = dict(self.test_instance)
self._check_xml_and_container(instance_data)
def test_xml_disk_prefix(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_prefix(instance_data)
def test_xml_user_specified_disk_prefix(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_prefix(instance_data, 'sd')
def test_xml_disk_driver(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_driver(instance_data)
def test_xml_disk_bus_virtio(self):
self._check_xml_and_disk_bus({"disk_format": "raw"},
None,
(("disk", "virtio", "vda"),))
def test_xml_disk_bus_ide(self):
self._check_xml_and_disk_bus({"disk_format": "iso"},
None,
(("cdrom", "ide", "hda"),))
def test_xml_disk_bus_ide_and_virtio(self):
swap = {'device_name': '/dev/vdc',
'swap_size': 1}
ephemerals = [{'device_type': 'disk',
'disk_bus': 'virtio',
'device_name': '/dev/vdb',
'size': 1}]
block_device_info = {
'swap': swap,
'ephemerals': ephemerals}
self._check_xml_and_disk_bus({"disk_format": "iso"},
block_device_info,
(("cdrom", "ide", "hda"),
("disk", "virtio", "vdb"),
("disk", "virtio", "vdc")))
def test_list_instances(self):
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByID = self.fake_lookup
libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 2
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: [0, 1]
libvirt_driver.LibvirtDriver._conn.listDefinedDomains = lambda: []
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instances = conn.list_instances()
# Only one should be listed, since domain with ID 0 must be skipped
self.assertEqual(len(instances), 1)
def test_list_instance_uuids(self):
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByID = self.fake_lookup
libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 2
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: [0, 1]
libvirt_driver.LibvirtDriver._conn.listDefinedDomains = lambda: []
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instances = conn.list_instance_uuids()
# Only one should be listed, since domain with ID 0 must be skipped
self.assertEqual(len(instances), 1)
def test_list_defined_instances(self):
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByID = self.fake_lookup
libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 1
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: [0]
libvirt_driver.LibvirtDriver._conn.listDefinedDomains = lambda: [1]
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instances = conn.list_instances()
# Only one defined domain should be listed
self.assertEqual(len(instances), 1)
def test_list_instances_when_instance_deleted(self):
def fake_lookup(instance_name):
raise libvirt.libvirtError("we deleted an instance!")
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup
libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 1
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: [0, 1]
libvirt_driver.LibvirtDriver._conn.listDefinedDomains = lambda: []
self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_code")
libvirt.libvirtError.get_error_code().AndReturn(
libvirt.VIR_ERR_NO_DOMAIN)
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instances = conn.list_instances()
# None should be listed, since we fake deleted the last one
self.assertEqual(len(instances), 0)
def test_list_instance_uuids_when_instance_deleted(self):
def fake_lookup(instance_name):
raise libvirt.libvirtError("we deleted an instance!")
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup
libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 1
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: [0, 1]
libvirt_driver.LibvirtDriver._conn.listDefinedDomains = lambda: []
self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_code")
libvirt.libvirtError.get_error_code().AndReturn(
libvirt.VIR_ERR_NO_DOMAIN)
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instances = conn.list_instance_uuids()
# None should be listed, since we fake deleted the last one
self.assertEqual(len(instances), 0)
def test_list_instances_throws_nova_exception(self):
def fake_lookup(instance_name):
raise libvirt.libvirtError("we deleted an instance!")
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup
libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 1
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: [0, 1]
libvirt_driver.LibvirtDriver._conn.listDefinedDomains = lambda: []
self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_code")
libvirt.libvirtError.get_error_code().AndReturn(
libvirt.VIR_ERR_INTERNAL_ERROR)
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.NovaException, conn.list_instances)
def test_list_instance_uuids_throws_nova_exception(self):
def fake_lookup(instance_name):
raise libvirt.libvirtError("we deleted an instance!")
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup
libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 1
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: [0, 1]
libvirt_driver.LibvirtDriver._conn.listDefinedDomains = lambda: []
self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_code")
libvirt.libvirtError.get_error_code().AndReturn(
libvirt.VIR_ERR_INTERNAL_ERROR)
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.NovaException, conn.list_instance_uuids)
def test_get_all_block_devices(self):
xml = [
# NOTE(vish): id 0 is skipped
None,
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
</disk>
</devices>
</domain>
""",
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
</devices>
</domain>
""",
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/3'/>
</disk>
</devices>
</domain>
""",
]
def fake_lookup(id):
return FakeVirtDomain(xml[id])
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 4
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: range(4)
libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
devices = conn.get_all_block_devices()
self.assertEqual(devices, ['/path/to/dev/1', '/path/to/dev/3'])
def test_get_disks(self):
xml = [
# NOTE(vish): id 0 is skipped
None,
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
</devices>
</domain>
""",
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
</devices>
</domain>
""",
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/3'/>
<target dev='vdb' bus='virtio'/>
</disk>
</devices>
</domain>
""",
]
def fake_lookup(id):
return FakeVirtDomain(xml[id])
def fake_lookup_name(name):
return FakeVirtDomain(xml[1])
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 4
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: range(4)
libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
libvirt_driver.LibvirtDriver._conn.listDefinedDomains = lambda: []
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
devices = conn.get_disks(conn.list_instances()[0])
self.assertEqual(devices, ['vda', 'vdb'])
def test_snapshot_in_ami_format(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assign different image_ref from nova/images/fakes for testing ami
test_instance = copy.deepcopy(self.test_instance)
test_instance["image_ref"] = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
libvirt_driver.libvirt_utils.disk_type = "qcow2"
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEqual(snapshot['properties']['image_state'], 'available')
self.assertEqual(snapshot['status'], 'active')
self.assertEqual(snapshot['disk_format'], 'ami')
self.assertEqual(snapshot['name'], snapshot_name)
def test_lxc_snapshot_in_ami_format(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./',
libvirt_type='lxc')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assign different image_ref from nova/images/fakes for testing ami
test_instance = copy.deepcopy(self.test_instance)
test_instance["image_ref"] = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
libvirt_driver.libvirt_utils.disk_type = "qcow2"
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEqual(snapshot['properties']['image_state'], 'available')
self.assertEqual(snapshot['status'], 'active')
self.assertEqual(snapshot['disk_format'], 'ami')
self.assertEqual(snapshot['name'], snapshot_name)
def test_snapshot_in_raw_format(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, self.test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
self.stubs.Set(libvirt_driver.libvirt_utils, 'disk_type', 'raw')
def convert_image(source, dest, out_format):
libvirt_driver.libvirt_utils.files[dest] = ''
self.stubs.Set(images, 'convert_image', convert_image)
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEqual(snapshot['properties']['image_state'], 'available')
self.assertEqual(snapshot['status'], 'active')
self.assertEqual(snapshot['disk_format'], 'raw')
self.assertEqual(snapshot['name'], snapshot_name)
def test_lxc_snapshot_in_raw_format(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./',
libvirt_type='lxc')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, self.test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
self.stubs.Set(libvirt_driver.libvirt_utils, 'disk_type', 'raw')
def convert_image(source, dest, out_format):
libvirt_driver.libvirt_utils.files[dest] = ''
self.stubs.Set(images, 'convert_image', convert_image)
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEqual(snapshot['properties']['image_state'], 'available')
self.assertEqual(snapshot['status'], 'active')
self.assertEqual(snapshot['disk_format'], 'raw')
self.assertEqual(snapshot['name'], snapshot_name)
def test_snapshot_in_qcow2_format(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(snapshot_image_format='qcow2',
libvirt_snapshots_directory='./')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, self.test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
libvirt_driver.libvirt_utils.disk_type = "qcow2"
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEqual(snapshot['properties']['image_state'], 'available')
self.assertEqual(snapshot['status'], 'active')
self.assertEqual(snapshot['disk_format'], 'qcow2')
self.assertEqual(snapshot['name'], snapshot_name)
def test_lxc_snapshot_in_qcow2_format(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(snapshot_image_format='qcow2',
libvirt_snapshots_directory='./',
libvirt_type='lxc')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, self.test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
libvirt_driver.libvirt_utils.disk_type = "qcow2"
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEqual(snapshot['properties']['image_state'], 'available')
self.assertEqual(snapshot['status'], 'active')
self.assertEqual(snapshot['disk_format'], 'qcow2')
self.assertEqual(snapshot['name'], snapshot_name)
def test_snapshot_no_image_architecture(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assign different image_ref from nova/images/fakes for
# testing different base image
test_instance = copy.deepcopy(self.test_instance)
test_instance["image_ref"] = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEqual(snapshot['properties']['image_state'], 'available')
self.assertEqual(snapshot['status'], 'active')
self.assertEqual(snapshot['name'], snapshot_name)
def test_lxc_snapshot_no_image_architecture(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./',
libvirt_type='lxc')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assign different image_ref from nova/images/fakes for
# testing different base image
test_instance = copy.deepcopy(self.test_instance)
test_instance["image_ref"] = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEqual(snapshot['properties']['image_state'], 'available')
self.assertEqual(snapshot['status'], 'active')
self.assertEqual(snapshot['name'], snapshot_name)
def test_snapshot_no_original_image(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assign a non-existent image
test_instance = copy.deepcopy(self.test_instance)
test_instance["image_ref"] = '661122aa-1234-dede-fefe-babababababa'
instance_ref = db.instance_create(self.context, test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEqual(snapshot['properties']['image_state'], 'available')
self.assertEqual(snapshot['status'], 'active')
self.assertEqual(snapshot['name'], snapshot_name)
def test_lxc_snapshot_no_original_image(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./',
libvirt_type='lxc')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assign a non-existent image
test_instance = copy.deepcopy(self.test_instance)
test_instance["image_ref"] = '661122aa-1234-dede-fefe-babababababa'
instance_ref = db.instance_create(self.context, test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEqual(snapshot['properties']['image_state'], 'available')
self.assertEqual(snapshot['status'], 'active')
self.assertEqual(snapshot['name'], snapshot_name)
def test_snapshot_metadata_image(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./')
image_service = nova.tests.image.fake.FakeImageService()
# Assign an image with an architecture defined (x86_64)
test_instance = copy.deepcopy(self.test_instance)
test_instance["image_ref"] = 'a440c04b-79fa-479c-bed1-0b816eaec379'
instance_ref = db.instance_create(self.context, test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id),
'architecture': 'fake_arch',
'key_a': 'value_a',
'key_b': 'value_b'}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEqual(snapshot['properties']['image_state'], 'available')
self.assertEqual(snapshot['properties']['architecture'], 'fake_arch')
self.assertEqual(snapshot['properties']['key_a'], 'value_a')
self.assertEqual(snapshot['properties']['key_b'], 'value_b')
self.assertEqual(snapshot['status'], 'active')
self.assertEqual(snapshot['name'], snapshot_name)
def test_snapshot_with_os_type(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./')
image_service = nova.tests.image.fake.FakeImageService()
# Assign a non-existent image
test_instance = copy.deepcopy(self.test_instance)
test_instance["image_ref"] = '661122aa-1234-dede-fefe-babababababa'
test_instance["os_type"] = 'linux'
instance_ref = db.instance_create(self.context, test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id),
'os_type': instance_ref['os_type']}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEqual(snapshot['properties']['image_state'], 'available')
self.assertEqual(snapshot['properties']['os_type'],
instance_ref['os_type'])
self.assertEqual(snapshot['status'], 'active')
self.assertEqual(snapshot['name'], snapshot_name)
def test_attach_invalid_volume_type(self):
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.VolumeDriverNotFound,
conn.attach_volume, None,
{"driver_volume_type": "badtype"},
{"name": "fake-instance"},
"/dev/sda")
def test_attach_blockio_invalid_hypervisor(self):
self.flags(libvirt_type='fake_type')
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.InvalidHypervisorType,
conn.attach_volume, None,
{"driver_volume_type": "fake",
"data": {"logical_block_size": "4096",
"physical_block_size": "4096"}
},
{"name": "fake-instance"},
"/dev/sda")
def test_attach_blockio_invalid_version(self):
def get_lib_version_stub():
return (0 * 1000 * 1000) + (9 * 1000) + 8
self.flags(libvirt_type='qemu')
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(self.conn, "getLibVersion", get_lib_version_stub)
self.assertRaises(exception.Invalid,
conn.attach_volume, None,
{"driver_volume_type": "fake",
"data": {"logical_block_size": "4096",
"physical_block_size": "4096"}
},
{"name": "fake-instance"},
"/dev/sda")
def test_multi_nic(self):
instance_data = dict(self.test_instance)
network_info = _fake_network_info(self.stubs, 2)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, instance_data)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = conn.to_xml(self.context, instance_ref, network_info, disk_info)
tree = etree.fromstring(xml)
interfaces = tree.findall("./devices/interface")
self.assertEqual(len(interfaces), 2)
self.assertEqual(interfaces[0].get('type'), 'bridge')
def _check_xml_and_container(self, instance):
user_context = context.RequestContext(self.user_id,
self.project_id)
instance_ref = db.instance_create(user_context, instance)
self.flags(libvirt_type='lxc')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEqual(conn.uri(), 'lxc:///')
network_info = _fake_network_info(self.stubs, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = conn.to_xml(self.context, instance_ref, network_info, disk_info)
tree = etree.fromstring(xml)
check = [
(lambda t: t.find('.').get('type'), 'lxc'),
(lambda t: t.find('./os/type').text, 'exe'),
(lambda t: t.find('./devices/filesystem/target').get('dir'), '/')]
for i, (check, expected_result) in enumerate(check):
self.assertEqual(check(tree),
expected_result,
'%s failed common check %d' % (xml, i))
target = tree.find('./devices/filesystem/source').get('dir')
self.assertTrue(len(target) > 0)
def _check_xml_and_disk_prefix(self, instance, prefix=None):
user_context = context.RequestContext(self.user_id,
self.project_id)
instance_ref = db.instance_create(user_context, instance)
def _get_prefix(p, default):
if p:
return p + 'a'
return default
type_disk_map = {
'qemu': [
(lambda t: t.find('.').get('type'), 'qemu'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'vda'))],
'xen': [
(lambda t: t.find('.').get('type'), 'xen'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'sda'))],
'kvm': [
(lambda t: t.find('.').get('type'), 'kvm'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'vda'))],
'uml': [
(lambda t: t.find('.').get('type'), 'uml'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'ubda'))]
}
for (libvirt_type, checks) in type_disk_map.iteritems():
self.flags(libvirt_type=libvirt_type)
if prefix:
self.flags(libvirt_disk_prefix=prefix)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
network_info = _fake_network_info(self.stubs, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = conn.to_xml(self.context, instance_ref,
network_info, disk_info)
tree = etree.fromstring(xml)
for i, (check, expected_result) in enumerate(checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed check %d' %
(check(tree), expected_result, i))
def _check_xml_and_disk_driver(self, image_meta):
os_open = os.open
directio_supported = True
def os_open_stub(path, flags, *args, **kwargs):
if flags & os.O_DIRECT:
if not directio_supported:
raise OSError(errno.EINVAL,
'%s: %s' % (os.strerror(errno.EINVAL), path))
flags &= ~os.O_DIRECT
return os_open(path, flags, *args, **kwargs)
self.stubs.Set(os, 'open', os_open_stub)
def connection_supports_direct_io_stub(*args, **kwargs):
return directio_supported
self.stubs.Set(libvirt_driver.LibvirtDriver,
'_supports_direct_io', connection_supports_direct_io_stub)
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = drv.to_xml(self.context, instance_ref,
network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for disk in disks:
self.assertEqual(disk.get("cache"), "none")
directio_supported = False
# The O_DIRECT availability is cached on first use in
# LibvirtDriver, hence we re-create it here
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = drv.to_xml(self.context, instance_ref,
network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for disk in disks:
self.assertEqual(disk.get("cache"), "writethrough")
def _check_xml_and_disk_bus(self, image_meta,
block_device_info, wantConfig):
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref,
block_device_info,
image_meta)
xml = drv.to_xml(self.context, instance_ref,
network_info, disk_info, image_meta,
block_device_info=block_device_info)
tree = etree.fromstring(xml)
got_disks = tree.findall('./devices/disk')
got_disk_targets = tree.findall('./devices/disk/target')
for i in range(len(wantConfig)):
want_device_type = wantConfig[i][0]
want_device_bus = wantConfig[i][1]
want_device_dev = wantConfig[i][2]
got_device_type = got_disks[i].get('device')
got_device_bus = got_disk_targets[i].get('bus')
got_device_dev = got_disk_targets[i].get('dev')
self.assertEqual(got_device_type, want_device_type)
self.assertEqual(got_device_bus, want_device_bus)
self.assertEqual(got_device_dev, want_device_dev)
def _check_xml_and_uuid(self, image_meta):
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = drv.to_xml(self.context, instance_ref,
network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
self.assertEqual(tree.find('./uuid').text,
instance_ref['uuid'])
def _check_xml_and_uri(self, instance, expect_ramdisk, expect_kernel,
rescue=None, expect_xen_hvm=False, xen_only=False):
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, instance)
network_ref = db.project_get_networks(context.get_admin_context(),
self.project_id)[0]
type_uri_map = {'qemu': ('qemu:///system',
[(lambda t: t.find('.').get('type'), 'qemu'),
(lambda t: t.find('./os/type').text,
vm_mode.HVM),
(lambda t: t.find('./devices/emulator'), None)]),
'kvm': ('qemu:///system',
[(lambda t: t.find('.').get('type'), 'kvm'),
(lambda t: t.find('./os/type').text,
vm_mode.HVM),
(lambda t: t.find('./devices/emulator'), None)]),
'uml': ('uml:///system',
[(lambda t: t.find('.').get('type'), 'uml'),
(lambda t: t.find('./os/type').text,
vm_mode.UML)]),
'xen': ('xen:///',
[(lambda t: t.find('.').get('type'), 'xen'),
(lambda t: t.find('./os/type').text,
vm_mode.XEN)])}
if expect_xen_hvm or xen_only:
hypervisors_to_check = ['xen']
else:
hypervisors_to_check = ['qemu', 'kvm', 'xen']
if expect_xen_hvm:
type_uri_map = {}
type_uri_map['xen'] = ('xen:///',
[(lambda t: t.find('.').get('type'),
'xen'),
(lambda t: t.find('./os/type').text,
vm_mode.HVM)])
for hypervisor_type in hypervisors_to_check:
check_list = type_uri_map[hypervisor_type][1]
if rescue:
suffix = '.rescue'
else:
suffix = ''
if expect_kernel:
check = (lambda t: self.relpath(t.find('./os/kernel').text).
split('/')[1], 'kernel' + suffix)
else:
check = (lambda t: t.find('./os/kernel'), None)
check_list.append(check)
# Hypervisors that only support vm_mode.HVM should
# not produce configuration that results in kernel
# arguments
if not expect_kernel and hypervisor_type in ['qemu', 'kvm']:
check = (lambda t: t.find('./os/root'), None)
check_list.append(check)
check = (lambda t: t.find('./os/cmdline'), None)
check_list.append(check)
if expect_ramdisk:
check = (lambda t: self.relpath(t.find('./os/initrd').text).
split('/')[1], 'ramdisk' + suffix)
else:
check = (lambda t: t.find('./os/initrd'), None)
check_list.append(check)
if hypervisor_type in ['qemu', 'kvm']:
xpath = "./sysinfo/system/entry"
check = (lambda t: t.findall(xpath)[0].get("name"),
"manufacturer")
check_list.append(check)
check = (lambda t: t.findall(xpath)[0].text,
version.vendor_string())
check_list.append(check)
check = (lambda t: t.findall(xpath)[1].get("name"),
"product")
check_list.append(check)
check = (lambda t: t.findall(xpath)[1].text,
version.product_string())
check_list.append(check)
check = (lambda t: t.findall(xpath)[2].get("name"),
"version")
check_list.append(check)
# NOTE(sirp): empty strings don't roundtrip in lxml (they are
# converted to None), so we need an `or ''` to correct for that
check = (lambda t: t.findall(xpath)[2].text or '',
version.version_string_with_package())
check_list.append(check)
check = (lambda t: t.findall(xpath)[3].get("name"),
"serial")
check_list.append(check)
check = (lambda t: t.findall(xpath)[3].text,
"cef19ce0-0ca2-11df-855d-b19fbce37686")
check_list.append(check)
check = (lambda t: t.findall(xpath)[4].get("name"),
"uuid")
check_list.append(check)
check = (lambda t: t.findall(xpath)[4].text,
instance['uuid'])
check_list.append(check)
if hypervisor_type in ['qemu', 'kvm']:
check = (lambda t: t.findall('./devices/serial')[0].get(
'type'), 'file')
check_list.append(check)
check = (lambda t: t.findall('./devices/serial')[1].get(
'type'), 'pty')
check_list.append(check)
check = (lambda t: self.relpath(t.findall(
'./devices/serial/source')[0].get('path')).
split('/')[1], 'console.log')
check_list.append(check)
else:
check = (lambda t: t.find('./devices/console').get(
'type'), 'pty')
check_list.append(check)
common_checks = [
(lambda t: t.find('.').tag, 'domain'),
(lambda t: t.find('./memory').text, '2097152')]
if rescue:
common_checks += [
(lambda t: self.relpath(t.findall('./devices/disk/source')[0].
get('file')).split('/')[1], 'disk.rescue'),
(lambda t: self.relpath(t.findall('./devices/disk/source')[1].
get('file')).split('/')[1], 'disk')]
else:
common_checks += [(lambda t: self.relpath(t.findall(
'./devices/disk/source')[0].get('file')).split('/')[1],
'disk')]
common_checks += [(lambda t: self.relpath(t.findall(
'./devices/disk/source')[1].get('file')).split('/')[1],
'disk.local')]
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
self.flags(libvirt_type=libvirt_type)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEqual(conn.uri(), expected_uri)
network_info = _fake_network_info(self.stubs, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref,
rescue=rescue)
xml = conn.to_xml(self.context, instance_ref,
network_info, disk_info, rescue=rescue)
tree = etree.fromstring(xml)
for i, (check, expected_result) in enumerate(checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed check %d' %
(check(tree), expected_result, i))
for i, (check, expected_result) in enumerate(common_checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed common check %d' %
(check(tree), expected_result, i))
filterref = './devices/interface/filterref'
vif = network_info[0]
nic_id = vif['address'].replace(':', '')
fw = firewall.NWFilterFirewall(fake.FakeVirtAPI(), conn)
instance_filter_name = fw._instance_filter_name(instance_ref,
nic_id)
self.assertEqual(tree.find(filterref).get('filter'),
instance_filter_name)
# This test is supposed to make sure we don't
# override a specifically set uri
#
# Deliberately not just assigning this string to CONF.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the CONF.
testuri = 'something completely different'
self.flags(libvirt_uri=testuri)
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
self.flags(libvirt_type=libvirt_type)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEqual(conn.uri(), testuri)
db.instance_destroy(user_context, instance_ref['uuid'])
def test_ensure_filtering_rules_for_instance_timeout(self):
# ensure_filtering_fules_for_instance() finishes with timeout.
# Preparing mocks
def fake_none(self, *args):
return
def fake_raise(self):
raise libvirt.libvirtError('ERR')
class FakeTime(object):
def __init__(self):
self.counter = 0
def sleep(self, t):
self.counter += t
fake_timer = FakeTime()
# _fake_network_info must be called before create_fake_libvirt_mock(),
# as _fake_network_info calls importutils.import_class() and
# create_fake_libvirt_mock() mocks importutils.import_class().
network_info = _fake_network_info(self.stubs, 1)
self.create_fake_libvirt_mock()
instance_ref = db.instance_create(self.context, self.test_instance)
# Start test
self.mox.ReplayAll()
try:
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn.firewall_driver,
'setup_basic_filtering',
fake_none)
self.stubs.Set(conn.firewall_driver,
'prepare_instance_filter',
fake_none)
self.stubs.Set(conn.firewall_driver,
'instance_filter_exists',
fake_none)
conn.ensure_filtering_rules_for_instance(instance_ref,
network_info,
time_module=fake_timer)
except exception.NovaException as e:
msg = ('The firewall filter for %s does not exist' %
instance_ref['name'])
c1 = (0 <= str(e).find(msg))
self.assertTrue(c1)
self.assertEqual(29, fake_timer.counter, "Didn't wait the expected "
"amount of time")
db.instance_destroy(self.context, instance_ref['uuid'])
def test_check_can_live_migrate_dest_all_pass_with_block_migration(self):
instance_ref = db.instance_create(self.context, self.test_instance)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'disk_available_least': 400,
'cpu_info': 'asdf',
}
filename = "file"
self.mox.StubOutWithMock(conn, '_create_shared_storage_test_file')
self.mox.StubOutWithMock(conn, '_compare_cpu')
# _check_cpu_match
conn._compare_cpu("asdf")
# mounted_on_same_shared_storage
conn._create_shared_storage_test_file().AndReturn(filename)
self.mox.ReplayAll()
return_value = conn.check_can_live_migrate_destination(self.context,
instance_ref, compute_info, compute_info, True)
self.assertThat({"filename": "file",
'disk_available_mb': 409600,
"disk_over_commit": False,
"block_migration": True},
matchers.DictMatches(return_value))
def test_check_can_live_migrate_dest_all_pass_no_block_migration(self):
instance_ref = db.instance_create(self.context, self.test_instance)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': 'asdf'}
filename = "file"
self.mox.StubOutWithMock(conn, '_create_shared_storage_test_file')
self.mox.StubOutWithMock(conn, '_compare_cpu')
# _check_cpu_match
conn._compare_cpu("asdf")
# mounted_on_same_shared_storage
conn._create_shared_storage_test_file().AndReturn(filename)
self.mox.ReplayAll()
return_value = conn.check_can_live_migrate_destination(self.context,
instance_ref, compute_info, compute_info, False)
self.assertThat({"filename": "file",
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": None},
matchers.DictMatches(return_value))
def test_check_can_live_migrate_dest_incompatible_cpu_raises(self):
instance_ref = db.instance_create(self.context, self.test_instance)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': 'asdf'}
self.mox.StubOutWithMock(conn, '_compare_cpu')
conn._compare_cpu("asdf").AndRaise(exception.InvalidCPUInfo(
reason='foo')
)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidCPUInfo,
conn.check_can_live_migrate_destination,
self.context, instance_ref,
compute_info, compute_info, False)
def test_check_can_live_migrate_dest_cleanup_works_correctly(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest_check_data = {"filename": "file",
"block_migration": True,
"disk_over_commit": False,
"disk_available_mb": 1024}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, '_cleanup_shared_storage_test_file')
conn._cleanup_shared_storage_test_file("file")
self.mox.ReplayAll()
conn.check_can_live_migrate_destination_cleanup(self.context,
dest_check_data)
def test_check_can_live_migrate_source_works_correctly(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest_check_data = {"filename": "file",
"block_migration": True,
"disk_over_commit": False,
"disk_available_mb": 1024}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(False)
self.mox.StubOutWithMock(conn, "get_instance_disk_info")
conn.get_instance_disk_info(instance_ref['name']).AndReturn('[]')
self.mox.StubOutWithMock(conn, "_assert_dest_node_has_enough_disk")
conn._assert_dest_node_has_enough_disk(
self.context, instance_ref, dest_check_data['disk_available_mb'],
False)
self.mox.ReplayAll()
conn.check_can_live_migrate_source(self.context, instance_ref,
dest_check_data)
def test_check_can_live_migrate_source_vol_backed_works_correctly(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest_check_data = {"filename": "file",
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": 1024,
"is_volume_backed": True}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(False)
self.mox.StubOutWithMock(conn, "get_instance_disk_info")
conn.get_instance_disk_info(instance_ref['name']).AndReturn('[]')
self.mox.ReplayAll()
ret = conn.check_can_live_migrate_source(self.context, instance_ref,
dest_check_data)
self.assertTrue(type(ret) == dict)
self.assertTrue('is_shared_storage' in ret)
def test_check_can_live_migrate_source_vol_backed_w_disk_raises(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest_check_data = {"filename": "file",
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": 1024,
"is_volume_backed": True}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(False)
self.mox.StubOutWithMock(conn, "get_instance_disk_info")
conn.get_instance_disk_info(instance_ref['name']).AndReturn(
'[{"fake_disk_attr": "fake_disk_val"}]')
self.mox.ReplayAll()
self.assertRaises(exception.InvalidSharedStorage,
conn.check_can_live_migrate_source, self.context,
instance_ref, dest_check_data)
def test_check_can_live_migrate_source_vol_backed_fails(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest_check_data = {"filename": "file",
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": 1024,
"is_volume_backed": False}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(False)
self.mox.StubOutWithMock(conn, "get_instance_disk_info")
conn.get_instance_disk_info(instance_ref['name']).AndReturn(
'[{"fake_disk_attr": "fake_disk_val"}]')
self.mox.ReplayAll()
self.assertRaises(exception.InvalidSharedStorage,
conn.check_can_live_migrate_source, self.context,
instance_ref, dest_check_data)
def test_check_can_live_migrate_dest_fail_shared_storage_with_blockm(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest_check_data = {"filename": "file",
"block_migration": True,
"disk_over_commit": False,
'disk_available_mb': 1024}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(True)
self.mox.StubOutWithMock(conn, "get_instance_disk_info")
conn.get_instance_disk_info(instance_ref['name']).AndReturn('[]')
self.mox.ReplayAll()
self.assertRaises(exception.InvalidLocalStorage,
conn.check_can_live_migrate_source,
self.context, instance_ref, dest_check_data)
def test_check_can_live_migrate_no_shared_storage_no_blck_mig_raises(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest_check_data = {"filename": "file",
"block_migration": False,
"disk_over_commit": False,
'disk_available_mb': 1024}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(False)
self.mox.StubOutWithMock(conn, "get_instance_disk_info")
conn.get_instance_disk_info(instance_ref['name']).AndReturn('[]')
self.mox.ReplayAll()
self.assertRaises(exception.InvalidSharedStorage,
conn.check_can_live_migrate_source,
self.context, instance_ref, dest_check_data)
def test_check_can_live_migrate_source_with_dest_not_enough_disk(self):
instance_ref = db.instance_create(self.context, self.test_instance)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(False)
self.mox.StubOutWithMock(conn, "get_instance_disk_info")
conn.get_instance_disk_info(instance_ref["name"]).AndReturn(
'[{"virt_disk_size":2}]')
conn.get_instance_disk_info(instance_ref["name"]).AndReturn(
'[{"virt_disk_size":2}]')
dest_check_data = {"filename": "file",
"disk_available_mb": 0,
"block_migration": True,
"disk_over_commit": False}
self.mox.ReplayAll()
self.assertRaises(exception.MigrationError,
conn.check_can_live_migrate_source,
self.context, instance_ref, dest_check_data)
def test_live_migration_raises_exception(self):
# Confirms recover method is called when exceptions are raised.
# Preparing data
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = {'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE}
instance_ref = db.instance_create(self.context, self.test_instance)
instance_ref = db.instance_update(self.context, instance_ref['uuid'],
instance_dict)
# Preparing mocks
vdmock = self.mox.CreateMock(libvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI")
_bandwidth = CONF.live_migration_bandwidth
vdmock.migrateToURI(CONF.live_migration_uri % 'dest',
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(libvirt.libvirtError('ERR'))
def fake_lookup(instance_name):
if instance_name == instance_ref['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.StubOutWithMock(self.compute, "_rollback_live_migration")
self.compute._rollback_live_migration(self.context, instance_ref,
'dest', False)
#start test
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(libvirt.libvirtError,
conn._live_migration,
self.context, instance_ref, 'dest', False,
self.compute._rollback_live_migration)
instance_ref = db.instance_get(self.context, instance_ref['id'])
self.assertTrue(instance_ref['vm_state'] == vm_states.ACTIVE)
self.assertTrue(instance_ref['power_state'] == power_state.RUNNING)
db.instance_destroy(self.context, instance_ref['uuid'])
def test_create_images_and_backing(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, '_fetch_instance_kernel_ramdisk')
self.mox.StubOutWithMock(libvirt_driver.libvirt_utils, 'create_image')
libvirt_driver.libvirt_utils.create_image(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
conn._fetch_instance_kernel_ramdisk(self.context, self.test_instance)
self.mox.ReplayAll()
self.stubs.Set(os.path, 'exists', lambda *args: False)
disk_info_json = jsonutils.dumps([{'path': 'foo', 'type': None,
'disk_size': 0,
'backing_file': None}])
conn._create_images_and_backing(self.context, self.test_instance,
"/fake/instance/dir", disk_info_json)
def test_create_images_and_backing_disk_info_none(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, '_fetch_instance_kernel_ramdisk')
conn._fetch_instance_kernel_ramdisk(self.context, self.test_instance)
self.mox.ReplayAll()
conn._create_images_and_backing(self.context, self.test_instance,
"/fake/instance/dir", None)
def test_pre_live_migration_works_correctly_mocked(self):
# Creating testdata
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
class FakeNetworkInfo():
def fixed_ips(self):
return ["test_ip_addr"]
def fake_none(*args, **kwargs):
return
self.stubs.Set(conn, '_create_images_and_backing', fake_none)
inst_ref = {'id': 'foo'}
c = context.get_admin_context()
nw_info = FakeNetworkInfo()
# Creating mocks
self.mox.StubOutWithMock(driver, "block_device_info_get_mapping")
driver.block_device_info_get_mapping(vol
).AndReturn(vol['block_device_mapping'])
self.mox.StubOutWithMock(conn, "volume_driver_method")
for v in vol['block_device_mapping']:
disk_info = {
'bus': "scsi",
'dev': v['mount_device'].rpartition("/")[2],
'type': "disk"
}
conn.volume_driver_method('connect_volume',
v['connection_info'],
disk_info)
self.mox.StubOutWithMock(conn, 'plug_vifs')
conn.plug_vifs(mox.IsA(inst_ref), nw_info)
self.mox.ReplayAll()
result = conn.pre_live_migration(c, inst_ref, vol, nw_info, None)
self.assertEqual(result, None)
def test_pre_live_migration_vol_backed_works_correctly_mocked(self):
# Creating testdata, using temp dir.
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
def fake_none(*args, **kwargs):
return
self.stubs.Set(conn, '_create_images_and_backing', fake_none)
class FakeNetworkInfo():
def fixed_ips(self):
return ["test_ip_addr"]
inst_ref = db.instance_create(self.context, self.test_instance)
c = context.get_admin_context()
nw_info = FakeNetworkInfo()
# Creating mocks
self.mox.StubOutWithMock(conn, "volume_driver_method")
for v in vol['block_device_mapping']:
disk_info = {
'bus': "scsi",
'dev': v['mount_device'].rpartition("/")[2],
'type': "disk"
}
conn.volume_driver_method('connect_volume',
v['connection_info'],
disk_info)
self.mox.StubOutWithMock(conn, 'plug_vifs')
conn.plug_vifs(mox.IsA(inst_ref), nw_info)
self.mox.ReplayAll()
migrate_data = {'is_shared_storage': False,
'is_volume_backed': True,
'block_migration': False,
'instance_relative_path': inst_ref['name']
}
ret = conn.pre_live_migration(c, inst_ref, vol, nw_info, None,
migrate_data)
self.assertEqual(ret, None)
self.assertTrue(os.path.exists('%s/%s/' % (tmpdir,
inst_ref['name'])))
db.instance_destroy(self.context, inst_ref['uuid'])
def test_pre_live_migration_plug_vifs_retry_fails(self):
self.flags(live_migration_retry_count=3)
instance = {'name': 'test', 'uuid': 'uuid'}
def fake_plug_vifs(instance, network_info):
raise processutils.ProcessExecutionError()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, 'plug_vifs', fake_plug_vifs)
self.assertRaises(processutils.ProcessExecutionError,
conn.pre_live_migration,
self.context, instance, block_device_info=None,
network_info=[], disk_info={})
def test_pre_live_migration_plug_vifs_retry_works(self):
self.flags(live_migration_retry_count=3)
called = {'count': 0}
instance = {'name': 'test', 'uuid': 'uuid'}
def fake_plug_vifs(instance, network_info):
called['count'] += 1
if called['count'] < CONF.live_migration_retry_count:
raise processutils.ProcessExecutionError()
else:
return
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, 'plug_vifs', fake_plug_vifs)
conn.pre_live_migration(self.context, instance, block_device_info=None,
network_info=[], disk_info={})
def test_get_instance_disk_info_works_correctly(self):
# Test data
instance_ref = db.instance_create(self.context, self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
# Preparing mocks
vdmock = self.mox.CreateMock(libvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance_ref['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
GB = 1024 * 1024 * 1024
fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * GB
fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * GB
fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
self.mox.StubOutWithMock(os.path, "getsize")
os.path.getsize('/test/disk').AndReturn((10737418240))
os.path.getsize('/test/disk.local').AndReturn((3328599655))
ret = ("image: /test/disk\n"
"file format: raw\n"
"virtual size: 20G (21474836480 bytes)\n"
"disk size: 3.1G\n"
"cluster_size: 2097152\n"
"backing file: /test/dummy (actual path: /backing/file)\n")
self.mox.StubOutWithMock(os.path, "exists")
os.path.exists('/test/disk.local').AndReturn(True)
self.mox.StubOutWithMock(utils, "execute")
utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
'/test/disk.local').AndReturn((ret, ''))
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = conn.get_instance_disk_info(instance_ref['name'])
info = jsonutils.loads(info)
self.assertEqual(info[0]['type'], 'raw')
self.assertEqual(info[0]['path'], '/test/disk')
self.assertEqual(info[0]['disk_size'], 10737418240)
self.assertEqual(info[0]['backing_file'], "")
self.assertEqual(info[0]['over_committed_disk_size'], 0)
self.assertEqual(info[1]['type'], 'qcow2')
self.assertEqual(info[1]['path'], '/test/disk.local')
self.assertEqual(info[1]['virt_disk_size'], 21474836480)
self.assertEqual(info[1]['backing_file'], "file")
self.assertEqual(info[1]['over_committed_disk_size'], 18146236825)
db.instance_destroy(self.context, instance_ref['uuid'])
def test_post_live_migration(self):
vol = {'block_device_mapping': [
{'connection_info': 'dummy1', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy2', 'mount_device': '/dev/sdb'}]}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
inst_ref = {'id': 'foo'}
cntx = context.get_admin_context()
# Set up the mock expectations
with contextlib.nested(
mock.patch.object(driver, 'block_device_info_get_mapping',
return_value=vol['block_device_mapping']),
mock.patch.object(conn, 'volume_driver_method')
) as (block_device_info_get_mapping, volume_driver_method):
conn.post_live_migration(cntx, inst_ref, vol)
block_device_info_get_mapping.assert_has_calls([
mock.call(vol)])
volume_driver_method.assert_has_calls([
mock.call('disconnect_volume',
v['connection_info'],
v['mount_device'].rpartition("/")[2])
for v in vol['block_device_mapping']])
def test_get_instance_disk_info_excludes_volumes(self):
# Test data
instance_ref = db.instance_create(self.context, self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/fake/path/to/volume1'/>"
"<target dev='vdc' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/fake/path/to/volume2'/>"
"<target dev='vdd' bus='virtio'/></disk>"
"</devices></domain>")
# Preparing mocks
vdmock = self.mox.CreateMock(libvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance_ref['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
GB = 1024 * 1024 * 1024
fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * GB
fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * GB
fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
self.mox.StubOutWithMock(os.path, "getsize")
os.path.getsize('/test/disk').AndReturn((10737418240))
os.path.getsize('/test/disk.local').AndReturn((3328599655))
ret = ("image: /test/disk\n"
"file format: raw\n"
"virtual size: 20G (21474836480 bytes)\n"
"disk size: 3.1G\n"
"cluster_size: 2097152\n"
"backing file: /test/dummy (actual path: /backing/file)\n")
self.mox.StubOutWithMock(os.path, "exists")
os.path.exists('/test/disk.local').AndReturn(True)
self.mox.StubOutWithMock(utils, "execute")
utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
'/test/disk.local').AndReturn((ret, ''))
self.mox.ReplayAll()
conn_info = {'driver_volume_type': 'fake'}
info = {'block_device_mapping': [
{'connection_info': conn_info, 'mount_device': '/dev/vdc'},
{'connection_info': conn_info, 'mount_device': '/dev/vdd'}]}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = conn.get_instance_disk_info(instance_ref['name'],
block_device_info=info)
info = jsonutils.loads(info)
self.assertEqual(info[0]['type'], 'raw')
self.assertEqual(info[0]['path'], '/test/disk')
self.assertEqual(info[0]['disk_size'], 10737418240)
self.assertEqual(info[0]['backing_file'], "")
self.assertEqual(info[0]['over_committed_disk_size'], 0)
self.assertEqual(info[1]['type'], 'qcow2')
self.assertEqual(info[1]['path'], '/test/disk.local')
self.assertEqual(info[1]['virt_disk_size'], 21474836480)
self.assertEqual(info[1]['backing_file'], "file")
self.assertEqual(info[1]['over_committed_disk_size'], 18146236825)
db.instance_destroy(self.context, instance_ref['uuid'])
def test_spawn_with_network_info(self):
# Preparing mocks
def fake_none(*args, **kwargs):
return
def fake_getLibVersion():
return 9007
def fake_getCapabilities():
return """
<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<topology sockets='1' cores='2' threads='1'/>
<feature name='xtpr'/>
</cpu>
</host>
</capabilities>
"""
# _fake_network_info must be called before create_fake_libvirt_mock(),
# as _fake_network_info calls importutils.import_class() and
# create_fake_libvirt_mock() mocks importutils.import_class().
network_info = _fake_network_info(self.stubs, 1)
self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion,
getCapabilities=fake_getCapabilities,
getVersion=lambda: 1005001)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456 # we send an int to test sha1 call
instance_type = db.flavor_get(self.context,
instance_ref['instance_type_id'])
sys_meta = flavors.save_flavor_info({}, instance_type)
instance_ref['system_metadata'] = sys_meta
instance = db.instance_create(self.context, instance_ref)
# Mock out the get_info method of the LibvirtDriver so that the polling
# in the spawn method of the LibvirtDriver returns immediately
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, 'get_info')
libvirt_driver.LibvirtDriver.get_info(instance
).AndReturn({'state': power_state.RUNNING})
# Start test
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn.firewall_driver,
'setup_basic_filtering',
fake_none)
self.stubs.Set(conn.firewall_driver,
'prepare_instance_filter',
fake_none)
self.stubs.Set(imagebackend.Image,
'cache',
fake_none)
conn.spawn(self.context, instance, None, [], 'herp',
network_info=network_info)
path = os.path.join(CONF.instances_path, instance['name'])
if os.path.isdir(path):
shutil.rmtree(path)
path = os.path.join(CONF.instances_path, CONF.base_dir_name)
if os.path.isdir(path):
shutil.rmtree(os.path.join(CONF.instances_path,
CONF.base_dir_name))
def test_spawn_without_image_meta(self):
self.create_image_called = False
def fake_none(*args, **kwargs):
return
def fake_create_image(*args, **kwargs):
self.create_image_called = True
def fake_get_info(instance):
return {'state': power_state.RUNNING}
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
instance = db.instance_create(self.context, instance_ref)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, 'to_xml', fake_none)
self.stubs.Set(conn, '_create_image', fake_create_image)
self.stubs.Set(conn, '_create_domain_and_network', fake_none)
self.stubs.Set(conn, 'get_info', fake_get_info)
conn.spawn(self.context, instance, None, [], None)
self.assertTrue(self.create_image_called)
conn.spawn(self.context,
instance,
{'id': instance['image_ref']},
[],
None)
self.assertTrue(self.create_image_called)
def test_spawn_from_volume_calls_cache(self):
self.cache_called_for_disk = False
def fake_none(*args, **kwargs):
return
def fake_cache(*args, **kwargs):
if kwargs.get('image_id') == 'my_fake_image':
self.cache_called_for_disk = True
def fake_get_info(instance):
return {'state': power_state.RUNNING}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, 'to_xml', fake_none)
self.stubs.Set(imagebackend.Image, 'cache', fake_cache)
self.stubs.Set(conn, '_create_domain_and_network', fake_none)
self.stubs.Set(conn, 'get_info', fake_get_info)
block_device_info = {'root_device_name': '/dev/vda',
'block_device_mapping': [
{'mount_device': 'vda',
'boot_index': 0}
]
}
# Volume-backed instance created without image
instance_ref = self.test_instance
instance_ref['image_ref'] = ''
instance_ref['root_device_name'] = '/dev/vda'
instance_ref['uuid'] = uuidutils.generate_uuid()
instance = db.instance_create(self.context, instance_ref)
conn.spawn(self.context, instance, None, [], None,
block_device_info=block_device_info)
self.assertFalse(self.cache_called_for_disk)
db.instance_destroy(self.context, instance['uuid'])
# Booted from volume but with placeholder image
instance_ref = self.test_instance
instance_ref['image_ref'] = 'my_fake_image'
instance_ref['root_device_name'] = '/dev/vda'
instance_ref['uuid'] = uuidutils.generate_uuid()
instance = db.instance_create(self.context, instance_ref)
conn.spawn(self.context, instance, None, [], None,
block_device_info=block_device_info)
self.assertFalse(self.cache_called_for_disk)
db.instance_destroy(self.context, instance['uuid'])
# Booted from an image
instance_ref['image_ref'] = 'my_fake_image'
instance_ref['uuid'] = uuidutils.generate_uuid()
instance = db.instance_create(self.context, instance_ref)
conn.spawn(self.context, instance, None, [], None)
self.assertTrue(self.cache_called_for_disk)
db.instance_destroy(self.context, instance['uuid'])
def test_spawn_with_pci_devices(self):
def fake_none(*args, **kwargs):
return None
def fake_get_info(instance):
return {'state': power_state.RUNNING}
class FakeLibvirtPciDevice():
def dettach(self):
return None
def reset(self):
return None
def fake_node_device_lookup_by_name(address):
pattern = ("pci_%(hex)s{4}_%(hex)s{2}_%(hex)s{2}_%(oct)s{1}"
% dict(hex='[\da-f]', oct='[0-8]'))
pattern = re.compile(pattern)
if pattern.match(address) is None:
raise libvirt.libvirtError()
return FakeLibvirtPciDevice()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, 'to_xml', fake_none)
self.stubs.Set(conn, '_create_image', fake_none)
self.stubs.Set(conn, '_create_domain_and_network', fake_none)
self.stubs.Set(conn, 'get_info', fake_get_info)
conn._conn.nodeDeviceLookupByName = \
fake_node_device_lookup_by_name
instance_ref = self.test_instance
instance_ref['image_ref'] = 'my_fake_image'
instance = db.instance_create(self.context, instance_ref)
instance = dict(instance.iteritems())
instance['pci_devices'] = [{'address': '0000:00:00.0'}]
conn.spawn(self.context, instance, None, [], None)
def test_create_image_plain(self):
gotFiles = []
def fake_image(self, instance, name, image_type=''):
class FakeImage(imagebackend.Image):
def __init__(self, instance, name):
self.path = os.path.join(instance['name'], name)
def create_image(self, prepare_template, base,
size, *args, **kwargs):
pass
def cache(self, fetch_func, filename, size=None,
*args, **kwargs):
gotFiles.append({'filename': filename,
'size': size})
def snapshot(self, name):
pass
return FakeImage(instance, name)
def fake_none(*args, **kwargs):
return
def fake_get_info(instance):
return {'state': power_state.RUNNING}
# Stop 'libvirt_driver._create_image' touching filesystem
self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image",
fake_image)
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
instance = db.instance_create(self.context, instance_ref)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, 'to_xml', fake_none)
self.stubs.Set(conn, '_create_domain_and_network', fake_none)
self.stubs.Set(conn, 'get_info', fake_get_info)
image_meta = {'id': instance['image_ref']}
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
None,
image_meta)
conn._create_image(context, instance,
disk_info['mapping'])
xml = conn.to_xml(self.context, instance, None,
disk_info, image_meta)
wantFiles = [
{'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
'size': 10 * 1024 * 1024 * 1024},
{'filename': 'ephemeral_20_default',
'size': 20 * 1024 * 1024 * 1024},
]
self.assertEqual(gotFiles, wantFiles)
def test_create_image_with_swap(self):
gotFiles = []
def fake_image(self, instance, name, image_type=''):
class FakeImage(imagebackend.Image):
def __init__(self, instance, name):
self.path = os.path.join(instance['name'], name)
def create_image(self, prepare_template, base,
size, *args, **kwargs):
pass
def cache(self, fetch_func, filename, size=None,
*args, **kwargs):
gotFiles.append({'filename': filename,
'size': size})
def snapshot(self, name):
pass
return FakeImage(instance, name)
def fake_none(*args, **kwargs):
return
def fake_get_info(instance):
return {'state': power_state.RUNNING}
# Stop 'libvirt_driver._create_image' touching filesystem
self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image",
fake_image)
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
# Turn on some swap to exercise that codepath in _create_image
instance_ref['system_metadata']['instance_type_swap'] = 500
instance = db.instance_create(self.context, instance_ref)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, 'to_xml', fake_none)
self.stubs.Set(conn, '_create_domain_and_network', fake_none)
self.stubs.Set(conn, 'get_info', fake_get_info)
image_meta = {'id': instance['image_ref']}
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
None,
image_meta)
conn._create_image(context, instance,
disk_info['mapping'])
xml = conn.to_xml(self.context, instance, None,
disk_info, image_meta)
wantFiles = [
{'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
'size': 10 * 1024 * 1024 * 1024},
{'filename': 'ephemeral_20_default',
'size': 20 * 1024 * 1024 * 1024},
{'filename': 'swap_500',
'size': 500 * 1024 * 1024},
]
self.assertEqual(gotFiles, wantFiles)
def test_get_console_output_file(self):
fake_libvirt_utils.files['console.log'] = '01234567890'
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456
instance = db.instance_create(self.context, instance_ref)
console_dir = (os.path.join(tmpdir, instance['name']))
console_log = '%s/console.log' % (console_dir)
fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='file'>
<source path='%s'/>
<target port='0'/>
</console>
</devices>
</domain>
""" % console_log
def fake_lookup(id):
return FakeVirtDomain(fake_dom_xml)
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
try:
prev_max = libvirt_driver.MAX_CONSOLE_BYTES
libvirt_driver.MAX_CONSOLE_BYTES = 5
output = conn.get_console_output(instance)
finally:
libvirt_driver.MAX_CONSOLE_BYTES = prev_max
self.assertEqual('67890', output)
def test_get_console_output_pty(self):
fake_libvirt_utils.files['pty'] = '01234567890'
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456
instance = db.instance_create(self.context, instance_ref)
console_dir = (os.path.join(tmpdir, instance['name']))
pty_file = '%s/fake_pty' % (console_dir)
fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='pty'>
<source path='%s'/>
<target port='0'/>
</console>
</devices>
</domain>
""" % pty_file
def fake_lookup(id):
return FakeVirtDomain(fake_dom_xml)
def _fake_flush(self, fake_pty):
return 'foo'
def _fake_append_to_file(self, data, fpath):
return 'pty'
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
libvirt_driver.LibvirtDriver._flush_libvirt_console = _fake_flush
libvirt_driver.LibvirtDriver._append_to_file = _fake_append_to_file
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
try:
prev_max = libvirt_driver.MAX_CONSOLE_BYTES
libvirt_driver.MAX_CONSOLE_BYTES = 5
output = conn.get_console_output(instance)
finally:
libvirt_driver.MAX_CONSOLE_BYTES = prev_max
self.assertEqual('67890', output)
def test_get_host_ip_addr(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ip = conn.get_host_ip_addr()
self.assertEqual(ip, CONF.my_ip)
def test_broken_connection(self):
for (error, domain) in (
(libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_REMOTE),
(libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_RPC),
(libvirt.VIR_ERR_INTERNAL_ERROR, libvirt.VIR_FROM_RPC)):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_wrapped_conn")
self.mox.StubOutWithMock(conn._wrapped_conn, "getLibVersion")
self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_code")
self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_domain")
conn._wrapped_conn.getLibVersion().AndRaise(
libvirt.libvirtError("fake failure"))
libvirt.libvirtError.get_error_code().AndReturn(error)
libvirt.libvirtError.get_error_domain().AndReturn(domain)
self.mox.ReplayAll()
self.assertFalse(conn._test_connection(conn._wrapped_conn))
self.mox.UnsetStubs()
def test_immediate_delete(self):
def fake_lookup_by_name(instance_name):
raise exception.InstanceNotFound(instance_id=instance_name)
def fake_delete_instance_files(instance):
pass
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, '_delete_instance_files',
fake_delete_instance_files)
instance = db.instance_create(self.context, self.test_instance)
conn.destroy(instance, {})
def test_destroy_removes_disk(self):
instance = {"name": "instancename", "id": "42",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64",
"cleaned": 0, 'info_cache': None, 'security_groups': []}
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
'_undefine_domain')
libvirt_driver.LibvirtDriver._undefine_domain(instance)
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg(),
columns_to_join=['info_cache',
'security_groups']
).AndReturn(instance)
self.mox.StubOutWithMock(shutil, "rmtree")
shutil.rmtree(os.path.join(CONF.instances_path,
'instance-%08x' % int(instance['id'])))
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_cleanup_lvm')
libvirt_driver.LibvirtDriver._cleanup_lvm(instance)
# Start test
self.mox.ReplayAll()
def fake_destroy(instance):
pass
def fake_os_path_exists(path):
return True
def fake_unplug_vifs(instance, network_info):
pass
def fake_unfilter_instance(instance, network_info):
pass
def fake_obj_load_attr(self, attrname):
if not hasattr(self, attrname):
self[attrname] = {}
def fake_save(self, context):
pass
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_destroy', fake_destroy)
self.stubs.Set(conn, 'unplug_vifs', fake_unplug_vifs)
self.stubs.Set(conn.firewall_driver,
'unfilter_instance', fake_unfilter_instance)
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
self.stubs.Set(instance_obj.Instance, 'fields',
{'id': int, 'uuid': str, 'cleaned': int})
self.stubs.Set(instance_obj.Instance, 'obj_load_attr',
fake_obj_load_attr)
self.stubs.Set(instance_obj.Instance, 'save', fake_save)
conn.destroy(instance, [])
def test_destroy_not_removes_disk(self):
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
'_undefine_domain')
libvirt_driver.LibvirtDriver._undefine_domain(instance)
# Start test
self.mox.ReplayAll()
def fake_destroy(instance):
pass
def fake_os_path_exists(path):
return True
def fake_unplug_vifs(instance, network_info):
pass
def fake_unfilter_instance(instance, network_info):
pass
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_destroy', fake_destroy)
self.stubs.Set(conn, 'unplug_vifs', fake_unplug_vifs)
self.stubs.Set(conn.firewall_driver,
'unfilter_instance', fake_unfilter_instance)
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
conn.destroy(instance, [], None, False)
def test_delete_instance_files(self):
instance = {"name": "instancename", "id": "42",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64",
"cleaned": 0, 'info_cache': None, 'security_groups': []}
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(shutil, "rmtree")
db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg(),
columns_to_join=['info_cache',
'security_groups']
).AndReturn(instance)
os.path.exists(mox.IgnoreArg()).AndReturn(False)
os.path.exists(mox.IgnoreArg()).AndReturn(True)
shutil.rmtree(os.path.join(CONF.instances_path, instance['uuid']))
os.path.exists(mox.IgnoreArg()).AndReturn(True)
os.path.exists(mox.IgnoreArg()).AndReturn(False)
os.path.exists(mox.IgnoreArg()).AndReturn(True)
shutil.rmtree(os.path.join(CONF.instances_path, instance['uuid']))
os.path.exists(mox.IgnoreArg()).AndReturn(False)
self.mox.ReplayAll()
def fake_obj_load_attr(self, attrname):
if not hasattr(self, attrname):
self[attrname] = {}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(instance_obj.Instance, 'fields',
{'id': int, 'uuid': str, 'cleaned': int})
self.stubs.Set(instance_obj.Instance, 'obj_load_attr',
fake_obj_load_attr)
inst_obj = instance_obj.Instance.get_by_uuid(None, instance['uuid'])
self.assertFalse(conn.delete_instance_files(inst_obj))
self.assertTrue(conn.delete_instance_files(inst_obj))
def test_reboot_different_ids(self):
class FakeLoopingCall:
def start(self, *a, **k):
return self
def wait(self):
return None
self.flags(libvirt_wait_soft_reboot_seconds=1)
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
self.reboot_create_called = False
# Mock domain
mock_domain = self.mox.CreateMock(libvirt.virDomain)
mock_domain.info().AndReturn(
(libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
mock_domain.ID().AndReturn('some_fake_id')
mock_domain.shutdown()
mock_domain.info().AndReturn(
(libvirt_driver.VIR_DOMAIN_CRASHED,) + info_tuple)
mock_domain.ID().AndReturn('some_other_fake_id')
self.mox.ReplayAll()
def fake_lookup_by_name(instance_name):
return mock_domain
def fake_create_domain(**kwargs):
self.reboot_create_called = True
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64",
"pci_devices": []}
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, '_create_domain', fake_create_domain)
self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
lambda *a, **k: FakeLoopingCall())
conn.reboot(None, instance, [])
self.assertTrue(self.reboot_create_called)
def test_reboot_same_ids(self):
class FakeLoopingCall:
def start(self, *a, **k):
return self
def wait(self):
return None
self.flags(libvirt_wait_soft_reboot_seconds=1)
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
self.reboot_hard_reboot_called = False
# Mock domain
mock_domain = self.mox.CreateMock(libvirt.virDomain)
mock_domain.info().AndReturn(
(libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
mock_domain.ID().AndReturn('some_fake_id')
mock_domain.shutdown()
mock_domain.info().AndReturn(
(libvirt_driver.VIR_DOMAIN_CRASHED,) + info_tuple)
mock_domain.ID().AndReturn('some_fake_id')
self.mox.ReplayAll()
def fake_lookup_by_name(instance_name):
return mock_domain
def fake_hard_reboot(*args, **kwargs):
self.reboot_hard_reboot_called = True
def fake_sleep(interval):
pass
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64",
"pci_devices": []}
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(greenthread, 'sleep', fake_sleep)
self.stubs.Set(conn, '_hard_reboot', fake_hard_reboot)
self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
lambda *a, **k: FakeLoopingCall())
conn.reboot(None, instance, [])
self.assertTrue(self.reboot_hard_reboot_called)
def test_soft_reboot_libvirt_exception(self):
# Tests that a hard reboot is performed when a soft reboot results
# in raising a libvirtError.
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
# setup mocks
mock_domain = self.mox.CreateMock(libvirt.virDomain)
mock_domain.info().AndReturn(
(libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
mock_domain.ID().AndReturn('some_fake_id')
mock_domain.shutdown().AndRaise(libvirt.libvirtError('Err'))
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
context = None
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
network_info = []
self.mox.StubOutWithMock(conn, '_lookup_by_name')
conn._lookup_by_name(instance['name']).AndReturn(mock_domain)
self.mox.StubOutWithMock(conn, '_hard_reboot')
conn._hard_reboot(context, instance, network_info, None)
self.mox.ReplayAll()
conn.reboot(context, instance, network_info)
def _test_resume_state_on_host_boot_with_state(self, state):
called = {'count': 0}
mock = self.mox.CreateMock(libvirt.virDomain)
mock.info().AndReturn([state, None, None, None, None])
self.mox.ReplayAll()
def fake_lookup_by_name(instance_name):
return mock
def fake_hard_reboot(*args):
called['count'] += 1
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, '_hard_reboot', fake_hard_reboot)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
network_info = _fake_network_info(self.stubs, 1)
conn.resume_state_on_host_boot(self.context, instance, network_info,
block_device_info=None)
ignored_states = (power_state.RUNNING,
power_state.SUSPENDED,
power_state.NOSTATE,
power_state.PAUSED)
if state in ignored_states:
self.assertEqual(called['count'], 0)
else:
self.assertEqual(called['count'], 1)
def test_resume_state_on_host_boot_with_running_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.RUNNING)
def test_resume_state_on_host_boot_with_suspended_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.SUSPENDED)
def test_resume_state_on_host_boot_with_paused_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.PAUSED)
def test_resume_state_on_host_boot_with_nostate(self):
self._test_resume_state_on_host_boot_with_state(power_state.NOSTATE)
def test_resume_state_on_host_boot_with_shutdown_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.RUNNING)
def test_resume_state_on_host_boot_with_crashed_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.CRASHED)
def test_resume_state_on_host_boot_with_instance_not_found_on_driver(self):
called = {'count': 0}
instance = {'name': 'test'}
def fake_instance_exists(name):
return False
def fake_hard_reboot(*args):
called['count'] += 1
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, 'instance_exists', fake_instance_exists)
self.stubs.Set(conn, '_hard_reboot', fake_hard_reboot)
conn.resume_state_on_host_boot(self.context, instance, network_info=[],
block_device_info=None)
self.assertEqual(called['count'], 1)
def test_hard_reboot(self):
called = {'count': 0}
instance = db.instance_create(self.context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
block_device_info = None
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, '_destroy')
self.mox.StubOutWithMock(conn, 'get_instance_disk_info')
self.mox.StubOutWithMock(conn, 'to_xml')
self.mox.StubOutWithMock(conn, '_create_images_and_backing')
self.mox.StubOutWithMock(conn, '_create_domain_and_network')
def fake_get_info(instance_name):
called['count'] += 1
if called['count'] == 1:
state = power_state.SHUTDOWN
else:
state = power_state.RUNNING
return dict(state=state)
self.stubs.Set(conn, 'get_info', fake_get_info)
conn._destroy(instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance, block_device_info)
conn.to_xml(self.context, instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True).AndReturn(dummyxml)
disk_info_json = '[{"virt_disk_size": 2}]'
conn.get_instance_disk_info(instance["name"], dummyxml,
block_device_info).AndReturn(disk_info_json)
conn._create_images_and_backing(self.context, instance,
libvirt_utils.get_instance_path(instance),
disk_info_json)
conn._create_domain_and_network(dummyxml, instance,
network_info, block_device_info,
context=self.context, reboot=True)
self.mox.ReplayAll()
conn._hard_reboot(self.context, instance, network_info,
block_device_info)
def test_destroy_undefines(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndReturn(1)
self.mox.ReplayAll()
def fake_lookup_by_name(instance_name):
return mock
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN, 'id': -1}
def fake_delete_instance_files(instance):
return None
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
self.stubs.Set(conn, '_delete_instance_files',
fake_delete_instance_files)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
conn.destroy(instance, [])
def test_cleanup_rbd(self):
mock = self.mox.CreateMock(libvirt.virDomain)
def fake_lookup_by_name(instance_name):
return mock
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN, 'id': -1}
fake_volumes = ['875a8070-d0b9-4949-8b31-104d125c9a64.local',
'875a8070-d0b9-4949-8b31-104d125c9a64.swap',
'875a8070-d0b9-4949-8b31-104d125c9a64',
'wrong875a8070-d0b9-4949-8b31-104d125c9a64']
fake_pool = 'fake_pool'
fake_instance = {'name': 'fakeinstancename', 'id': 'instanceid',
'uuid': '875a8070-d0b9-4949-8b31-104d125c9a64'}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
self.flags(libvirt_images_rbd_pool=fake_pool)
self.mox.StubOutWithMock(libvirt_driver.libvirt_utils,
'remove_rbd_volumes')
libvirt_driver.libvirt_utils.remove_rbd_volumes(fake_pool,
*fake_volumes[:3])
self.mox.ReplayAll()
conn._cleanup_rbd(fake_instance)
self.mox.VerifyAll()
def test_destroy_undefines_no_undefine_flags(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(libvirt.libvirtError('Err'))
mock.undefine()
self.mox.ReplayAll()
def fake_lookup_by_name(instance_name):
return mock
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN, 'id': -1}
def fake_delete_instance_files(instance):
return None
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
self.stubs.Set(conn, '_delete_instance_files',
fake_delete_instance_files)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
conn.destroy(instance, [])
def test_destroy_undefines_no_attribute_with_managed_save(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(AttributeError())
mock.hasManagedSaveImage(0).AndReturn(True)
mock.managedSaveRemove(0)
mock.undefine()
self.mox.ReplayAll()
def fake_lookup_by_name(instance_name):
return mock
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN, 'id': -1}
def fake_delete_instance_files(instance):
return None
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
self.stubs.Set(conn, '_delete_instance_files',
fake_delete_instance_files)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
conn.destroy(instance, [])
def test_destroy_undefines_no_attribute_no_managed_save(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(AttributeError())
mock.hasManagedSaveImage(0).AndRaise(AttributeError())
mock.undefine()
self.mox.ReplayAll()
def fake_lookup_by_name(instance_name):
return mock
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN, 'id': -1}
def fake_delete_instance_files(instance):
return None
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
self.stubs.Set(conn, '_delete_instance_files',
fake_delete_instance_files)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
conn.destroy(instance, [])
def test_destroy_timed_out(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()
mock.destroy().AndRaise(libvirt.libvirtError("timed out"))
self.mox.ReplayAll()
def fake_lookup_by_name(instance_name):
return mock
def fake_get_error_code(self):
return libvirt.VIR_ERR_OPERATION_TIMEOUT
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(libvirt.libvirtError, 'get_error_code',
fake_get_error_code)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
self.assertRaises(exception.InstancePowerOffFailure,
conn.destroy, instance, [])
def test_private_destroy_not_found(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()
mock.destroy()
self.mox.ReplayAll()
def fake_lookup_by_name(instance_name):
return mock
def fake_get_info(instance_name):
raise exception.InstanceNotFound(instance_id=instance_name)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
# NOTE(vish): verifies destroy doesn't raise if the instance disappears
conn._destroy(instance)
def test_undefine_domain_with_not_found_instance(self):
def fake_lookup(instance_name):
raise libvirt.libvirtError("not found")
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_code")
libvirt.libvirtError.get_error_code().AndReturn(
libvirt.VIR_ERR_NO_DOMAIN)
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = {'name': 'test'}
# NOTE(wenjianhn): verifies undefine doesn't raise if the
# instance disappears
conn._undefine_domain(instance)
def test_disk_over_committed_size_total(self):
# Ensure destroy calls managedSaveRemove for saved instance.
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
def list_instances():
return ['fake1', 'fake2']
self.stubs.Set(conn, 'list_instances', list_instances)
fake_disks = {'fake1': [{'type': 'qcow2', 'path': '/somepath/disk1',
'virt_disk_size': '10737418240',
'backing_file': '/somepath/disk1',
'disk_size': '83886080',
'over_committed_disk_size': '10653532160'}],
'fake2': [{'type': 'raw', 'path': '/somepath/disk2',
'virt_disk_size': '0',
'backing_file': '/somepath/disk2',
'disk_size': '10737418240',
'over_committed_disk_size': '0'}]}
def get_info(instance_name):
return jsonutils.dumps(fake_disks.get(instance_name))
self.stubs.Set(conn, 'get_instance_disk_info', get_info)
result = conn.get_disk_over_committed_size_total()
self.assertEqual(result, 10653532160)
def test_cpu_info(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigCPU()
cpu.model = "Opteron_G4"
cpu.vendor = "AMD"
cpu.arch = "x86_64"
cpu.cores = 2
cpu.threads = 1
cpu.sockets = 4
cpu.add_feature(vconfig.LibvirtConfigCPUFeature("extapic"))
cpu.add_feature(vconfig.LibvirtConfigCPUFeature("3dnow"))
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
guest = vconfig.LibvirtConfigGuest()
guest.ostype = vm_mode.HVM
guest.arch = "x86_64"
guest.domtype = ["kvm"]
caps.guests.append(guest)
guest = vconfig.LibvirtConfigGuest()
guest.ostype = vm_mode.HVM
guest.arch = "i686"
guest.domtype = ["kvm"]
caps.guests.append(guest)
return caps
self.stubs.Set(libvirt_driver.LibvirtDriver,
'get_host_capabilities',
get_host_capabilities_stub)
want = {"vendor": "AMD",
"features": ["extapic", "3dnow"],
"model": "Opteron_G4",
"arch": "x86_64",
"topology": {"cores": 2, "threads": 1, "sockets": 4}}
got = jsonutils.loads(conn.get_cpu_info())
self.assertEqual(want, got)
def test_get_pcidev_info(self):
def fake_nodeDeviceLookupByName(name):
return FakeNodeDevice(_fake_NodeDevXml[name])
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.nodeDeviceLookupByName =\
fake_nodeDeviceLookupByName
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actualvf = conn._get_pcidev_info("pci_0000_04_00_3")
expect_vf = {
"dev_id": "pci_0000_04_00_3",
"address": "0000:04:00.3",
"product_id": '1521',
"vendor_id": '8086',
"label": 'label_8086_1521',
"dev_type": 'type-PF',
}
self.assertEqual(actualvf, expect_vf)
actualvf = conn._get_pcidev_info("pci_0000_04_10_7")
expect_vf = {
"dev_id": "pci_0000_04_10_7",
"address": "0000:04:10.7",
"product_id": '1520',
"vendor_id": '8086',
"label": 'label_8086_1520',
"dev_type": 'type-VF',
"phys_function": [('0x0000', '0x04', '0x00', '0x3')],
}
self.assertEqual(actualvf, expect_vf)
def test_pci_device_assignable(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn.dev_filter, 'device_assignable', lambda x: True)
fake_dev = {'dev_type': 'type-PF'}
self.assertFalse(conn._pci_device_assignable(fake_dev))
fake_dev = {'dev_type': 'type-VF'}
self.assertTrue(conn._pci_device_assignable(fake_dev))
fake_dev = {'dev_type': 'type-PCI'}
self.assertTrue(conn._pci_device_assignable(fake_dev))
def test_get_pci_passthrough_devices(self):
def fakelistDevices(caps, fakeargs=0):
return ['pci_0000_04_00_3', 'pci_0000_04_10_7']
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.listDevices = fakelistDevices
def fake_nodeDeviceLookupByName(name):
return FakeNodeDevice(_fake_NodeDevXml[name])
libvirt_driver.LibvirtDriver._conn.nodeDeviceLookupByName =\
fake_nodeDeviceLookupByName
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn.dev_filter, 'device_assignable', lambda x: x)
actjson = conn.get_pci_passthrough_devices()
expectvfs = [
{
"dev_id": "pci_0000_04_00_3",
"address": "0000:04:10.3",
"product_id": '1521',
"vendor_id": '8086',
"dev_type": 'type-PF',
"phys_function": None},
{
"dev_id": "pci_0000_04_10_7",
"domain": 0,
"address": "0000:04:10.7",
"product_id": '1520',
"vendor_id": '8086',
"dev_type": 'type-VF',
"phys_function": [('0x0000', '0x04', '0x00', '0x3')],
}
]
actctualvfs = jsonutils.loads(actjson)
for key in actctualvfs[0].keys():
if key not in ['phys_function', 'virt_functions', 'label']:
self.assertEqual(actctualvfs[0][key], expectvfs[1][key])
def test_diagnostic_vcpus_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
raise libvirt.libvirtError('vcpus missing')
def blockStats(self, path):
return (169L, 688640L, 0L, 0L, -1L)
def interfaceStats(self, path):
return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
def memoryStats(self):
return {'actual': 220160L, 'rss': 200164L}
def maxMemory(self):
return 280160L
def fake_lookup_name(name):
return DiagFakeDomain()
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'vda_read': 688640L,
'vda_read_req': 169L,
'vda_write': 0L,
'vda_write_req': 0L,
'vda_errors': -1L,
'vdb_read': 688640L,
'vdb_read_req': 169L,
'vdb_write': 0L,
'vdb_write_req': 0L,
'vdb_errors': -1L,
'memory': 280160L,
'memory-actual': 220160L,
'memory-rss': 200164L,
'vnet0_rx': 4408L,
'vnet0_rx_drop': 0L,
'vnet0_rx_errors': 0L,
'vnet0_rx_packets': 82L,
'vnet0_tx': 0L,
'vnet0_tx_drop': 0L,
'vnet0_tx_errors': 0L,
'vnet0_tx_packets': 0L,
}
self.assertEqual(actual, expect)
def test_diagnostic_blockstats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000L, 0),
(1, 1, 1640000000L, 0),
(2, 1, 3040000000L, 0),
(3, 1, 1420000000L, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
raise libvirt.libvirtError('blockStats missing')
def interfaceStats(self, path):
return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
def memoryStats(self):
return {'actual': 220160L, 'rss': 200164L}
def maxMemory(self):
return 280160L
def fake_lookup_name(name):
return DiagFakeDomain()
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'cpu0_time': 15340000000L,
'cpu1_time': 1640000000L,
'cpu2_time': 3040000000L,
'cpu3_time': 1420000000L,
'memory': 280160L,
'memory-actual': 220160L,
'memory-rss': 200164L,
'vnet0_rx': 4408L,
'vnet0_rx_drop': 0L,
'vnet0_rx_errors': 0L,
'vnet0_rx_packets': 82L,
'vnet0_tx': 0L,
'vnet0_tx_drop': 0L,
'vnet0_tx_errors': 0L,
'vnet0_tx_packets': 0L,
}
self.assertEqual(actual, expect)
def test_diagnostic_interfacestats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000L, 0),
(1, 1, 1640000000L, 0),
(2, 1, 3040000000L, 0),
(3, 1, 1420000000L, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169L, 688640L, 0L, 0L, -1L)
def interfaceStats(self, path):
raise libvirt.libvirtError('interfaceStat missing')
def memoryStats(self):
return {'actual': 220160L, 'rss': 200164L}
def maxMemory(self):
return 280160L
def fake_lookup_name(name):
return DiagFakeDomain()
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'cpu0_time': 15340000000L,
'cpu1_time': 1640000000L,
'cpu2_time': 3040000000L,
'cpu3_time': 1420000000L,
'vda_read': 688640L,
'vda_read_req': 169L,
'vda_write': 0L,
'vda_write_req': 0L,
'vda_errors': -1L,
'vdb_read': 688640L,
'vdb_read_req': 169L,
'vdb_write': 0L,
'vdb_write_req': 0L,
'vdb_errors': -1L,
'memory': 280160L,
'memory-actual': 220160L,
'memory-rss': 200164L,
}
self.assertEqual(actual, expect)
def test_diagnostic_memorystats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000L, 0),
(1, 1, 1640000000L, 0),
(2, 1, 3040000000L, 0),
(3, 1, 1420000000L, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169L, 688640L, 0L, 0L, -1L)
def interfaceStats(self, path):
return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
def memoryStats(self):
raise libvirt.libvirtError('memoryStats missing')
def maxMemory(self):
return 280160L
def fake_lookup_name(name):
return DiagFakeDomain()
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'cpu0_time': 15340000000L,
'cpu1_time': 1640000000L,
'cpu2_time': 3040000000L,
'cpu3_time': 1420000000L,
'vda_read': 688640L,
'vda_read_req': 169L,
'vda_write': 0L,
'vda_write_req': 0L,
'vda_errors': -1L,
'vdb_read': 688640L,
'vdb_read_req': 169L,
'vdb_write': 0L,
'vdb_write_req': 0L,
'vdb_errors': -1L,
'memory': 280160L,
'vnet0_rx': 4408L,
'vnet0_rx_drop': 0L,
'vnet0_rx_errors': 0L,
'vnet0_rx_packets': 82L,
'vnet0_tx': 0L,
'vnet0_tx_drop': 0L,
'vnet0_tx_errors': 0L,
'vnet0_tx_packets': 0L,
}
self.assertEqual(actual, expect)
def test_diagnostic_full(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000L, 0),
(1, 1, 1640000000L, 0),
(2, 1, 3040000000L, 0),
(3, 1, 1420000000L, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169L, 688640L, 0L, 0L, -1L)
def interfaceStats(self, path):
return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
def memoryStats(self):
return {'actual': 220160L, 'rss': 200164L}
def maxMemory(self):
return 280160L
def fake_lookup_name(name):
return DiagFakeDomain()
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'cpu0_time': 15340000000L,
'cpu1_time': 1640000000L,
'cpu2_time': 3040000000L,
'cpu3_time': 1420000000L,
'vda_read': 688640L,
'vda_read_req': 169L,
'vda_write': 0L,
'vda_write_req': 0L,
'vda_errors': -1L,
'vdb_read': 688640L,
'vdb_read_req': 169L,
'vdb_write': 0L,
'vdb_write_req': 0L,
'vdb_errors': -1L,
'memory': 280160L,
'memory-actual': 220160L,
'memory-rss': 200164L,
'vnet0_rx': 4408L,
'vnet0_rx_drop': 0L,
'vnet0_rx_errors': 0L,
'vnet0_rx_packets': 82L,
'vnet0_tx': 0L,
'vnet0_tx_drop': 0L,
'vnet0_tx_errors': 0L,
'vnet0_tx_packets': 0L,
}
self.assertEqual(actual, expect)
def test_failing_vcpu_count(self):
"""Domain can fail to return the vcpu description in case it's
just starting up or shutting down. Make sure None is handled
gracefully.
"""
class DiagFakeDomain(object):
def __init__(self, vcpus):
self._vcpus = vcpus
def vcpus(self):
if self._vcpus is None:
return None
else:
return ([1] * self._vcpus, [True] * self._vcpus)
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conn = driver._conn
self.mox.StubOutWithMock(driver, 'list_instance_ids')
conn.lookupByID = self.mox.CreateMockAnything()
driver.list_instance_ids().AndReturn([1, 2])
conn.lookupByID(1).AndReturn(DiagFakeDomain(None))
conn.lookupByID(2).AndReturn(DiagFakeDomain(5))
self.mox.ReplayAll()
self.assertEqual(5, driver.get_vcpu_used())
def test_get_instance_capabilities(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
def get_host_capabilities_stub(self):
caps = vconfig.LibvirtConfigCaps()
guest = vconfig.LibvirtConfigGuest()
guest.ostype = 'hvm'
guest.arch = 'x86_64'
guest.domtype = ['kvm', 'qemu']
caps.guests.append(guest)
guest = vconfig.LibvirtConfigGuest()
guest.ostype = 'hvm'
guest.arch = 'i686'
guest.domtype = ['kvm']
caps.guests.append(guest)
return caps
self.stubs.Set(libvirt_driver.LibvirtDriver,
'get_host_capabilities',
get_host_capabilities_stub)
want = [('x86_64', 'kvm', 'hvm'),
('x86_64', 'qemu', 'hvm'),
('i686', 'kvm', 'hvm')]
got = conn.get_instance_capabilities()
self.assertEqual(want, got)
def test_event_dispatch(self):
# Validate that the libvirt self-pipe for forwarding
# events between threads is working sanely
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
got_events = []
def handler(event):
got_events.append(event)
conn.register_event_listener(handler)
conn._init_events_pipe()
event1 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_STARTED)
event2 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_PAUSED)
conn._queue_event(event1)
conn._queue_event(event2)
conn._dispatch_events()
want_events = [event1, event2]
self.assertEqual(want_events, got_events)
event3 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_RESUMED)
event4 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_STOPPED)
conn._queue_event(event3)
conn._queue_event(event4)
conn._dispatch_events()
want_events = [event1, event2, event3, event4]
self.assertEqual(want_events, got_events)
def test_event_lifecycle(self):
# Validate that libvirt events are correctly translated
# to Nova events
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
got_events = []
def handler(event):
got_events.append(event)
conn.register_event_listener(handler)
conn._init_events_pipe()
fake_dom_xml = """
<domain type='kvm'>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
</devices>
</domain>
"""
dom = FakeVirtDomain(fake_dom_xml,
"cef19ce0-0ca2-11df-855d-b19fbce37686")
conn._event_lifecycle_callback(conn._conn,
dom,
libvirt.VIR_DOMAIN_EVENT_STOPPED,
0,
conn)
conn._dispatch_events()
self.assertEqual(len(got_events), 1)
self.assertEqual(type(got_events[0]), virtevent.LifecycleEvent)
self.assertEqual(got_events[0].uuid,
"cef19ce0-0ca2-11df-855d-b19fbce37686")
self.assertEqual(got_events[0].transition,
virtevent.EVENT_LIFECYCLE_STOPPED)
def test_set_cache_mode(self):
self.flags(disk_cachemodes=['file=directsync'])
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuestDisk()
fake_conf.source_type = 'file'
conn.set_cache_mode(fake_conf)
self.assertEqual(fake_conf.driver_cache, 'directsync')
def test_set_cache_mode_invalid_mode(self):
self.flags(disk_cachemodes=['file=FAKE'])
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuestDisk()
fake_conf.source_type = 'file'
conn.set_cache_mode(fake_conf)
self.assertEqual(fake_conf.driver_cache, None)
def test_set_cache_mode_invalid_object(self):
self.flags(disk_cachemodes=['file=directsync'])
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuest()
fake_conf.driver_cache = 'fake'
conn.set_cache_mode(fake_conf)
self.assertEqual(fake_conf.driver_cache, 'fake')
def _test_shared_storage_detection(self, is_same):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(conn, 'get_host_ip_addr')
self.mox.StubOutWithMock(utils, 'execute')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(os, 'unlink')
conn.get_host_ip_addr().AndReturn('bar')
utils.execute('ssh', 'foo', 'touch', mox.IgnoreArg())
os.path.exists(mox.IgnoreArg()).AndReturn(is_same)
if is_same:
os.unlink(mox.IgnoreArg())
else:
utils.execute('ssh', 'foo', 'rm', mox.IgnoreArg())
self.mox.ReplayAll()
return conn._is_storage_shared_with('foo', '/path')
def test_shared_storage_detection_same_host(self):
self.assertTrue(self._test_shared_storage_detection(True))
def test_shared_storage_detection_different_host(self):
self.assertFalse(self._test_shared_storage_detection(False))
def test_shared_storage_detection_easy(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(conn, 'get_host_ip_addr')
self.mox.StubOutWithMock(utils, 'execute')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(os, 'unlink')
conn.get_host_ip_addr().AndReturn('foo')
self.mox.ReplayAll()
self.assertTrue(conn._is_storage_shared_with('foo', '/path'))
def test_create_domain_define_xml_fails(self):
"""
Tests that the xml is logged when defining the domain fails.
"""
fake_xml = "<test>this is a test</test>"
def fake_defineXML(xml):
self.assertEqual(fake_xml, xml)
raise libvirt.libvirtError('virDomainDefineXML() failed')
self.log_error_called = False
def fake_error(msg):
self.log_error_called = True
self.assertTrue(fake_xml in msg)
self.stubs.Set(nova.virt.libvirt.driver.LOG, 'error', fake_error)
self.create_fake_libvirt_mock(defineXML=fake_defineXML)
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(libvirt.libvirtError, conn._create_domain, fake_xml)
self.assertTrue(self.log_error_called)
def test_create_domain_with_flags_fails(self):
"""
Tests that the xml is logged when creating the domain with flags fails.
"""
fake_xml = "<test>this is a test</test>"
fake_domain = FakeVirtDomain(fake_xml)
def fake_createWithFlags(launch_flags):
raise libvirt.libvirtError('virDomainCreateWithFlags() failed')
self.log_error_called = False
def fake_error(msg):
self.log_error_called = True
self.assertTrue(fake_xml in msg)
self.stubs.Set(fake_domain, 'createWithFlags', fake_createWithFlags)
self.stubs.Set(nova.virt.libvirt.driver.LOG, 'error', fake_error)
self.create_fake_libvirt_mock()
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(libvirt.libvirtError, conn._create_domain,
domain=fake_domain)
self.assertTrue(self.log_error_called)
def test_create_domain_enable_hairpin_fails(self):
"""
Tests that the xml is logged when enabling hairpin mode for the domain
fails.
"""
fake_xml = "<test>this is a test</test>"
fake_domain = FakeVirtDomain(fake_xml)
def fake_enable_hairpin(launch_flags):
raise processutils.ProcessExecutionError('error')
self.log_error_called = False
def fake_error(msg):
self.log_error_called = True
self.assertTrue(fake_xml in msg)
self.stubs.Set(nova.virt.libvirt.driver.LOG, 'error', fake_error)
self.create_fake_libvirt_mock()
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.stubs.Set(conn, '_enable_hairpin', fake_enable_hairpin)
self.assertRaises(processutils.ProcessExecutionError,
conn._create_domain,
domain=fake_domain,
power_on=False)
self.assertTrue(self.log_error_called)
def test_get_vnc_console(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<graphics type='vnc' port='5900'/>"
"</devices></domain>")
vdmock = self.mox.CreateMock(libvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance_ref['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
vnc_dict = conn.get_vnc_console(instance_ref)
self.assertEqual(vnc_dict['port'], '5900')
def test_get_vnc_console_unavailable(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices></devices></domain>")
vdmock = self.mox.CreateMock(libvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance_ref['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.ConsoleTypeUnavailable,
conn.get_vnc_console, instance_ref)
def test_get_spice_console(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<graphics type='spice' port='5950'/>"
"</devices></domain>")
vdmock = self.mox.CreateMock(libvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance_ref['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
spice_dict = conn.get_spice_console(instance_ref)
self.assertEqual(spice_dict['port'], '5950')
def test_get_spice_console_unavailable(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices></devices></domain>")
vdmock = self.mox.CreateMock(libvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance_ref['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.ConsoleTypeUnavailable,
conn.get_spice_console, instance_ref)
def _test_attach_detach_interface_get_config(self, method_name):
"""
Tests that the get_config() method is properly called in
attach_interface() and detach_interface().
method_name: either \"attach_interface\" or \"detach_interface\"
depending on the method to test.
"""
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
test_instance = copy.deepcopy(self.test_instance)
test_instance['name'] = "test"
network_info = _fake_network_info(self.stubs, 1)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
if method_name == "attach_interface":
fake_image_meta = {'id': test_instance['image_ref']}
elif method_name == "detach_interface":
fake_image_meta = None
else:
raise ValueError("Unhandled method %" % method_name)
virtapi = fake.FakeVirtAPI()
fake_inst_type_id = test_instance['instance_type_id']
fake_inst_type = virtapi.instance_type_get(self.context,
fake_inst_type_id)
expected = conn.vif_driver.get_config(test_instance, network_info[0],
fake_image_meta, fake_inst_type)
self.mox.StubOutWithMock(conn.vif_driver, 'get_config')
conn.vif_driver.get_config(test_instance, network_info[0],
fake_image_meta, fake_inst_type).\
AndReturn(expected)
self.mox.ReplayAll()
if method_name == "attach_interface":
conn.attach_interface(test_instance, fake_image_meta,
network_info[0])
elif method_name == "detach_interface":
conn.detach_interface(test_instance, network_info[0])
else:
raise ValueError("Unhandled method %" % method_name)
def test_attach_interface_get_config(self):
"""
Tests that the get_config() method is properly called in
attach_interface().
"""
self._test_attach_detach_interface_get_config("attach_interface")
def test_detach_interface_get_config(self):
"""
Tests that the get_config() method is properly called in
detach_interface().
"""
self._test_attach_detach_interface_get_config("detach_interface")
def test_default_root_device_name(self):
instance = {'uuid': 'fake_instance'}
image_meta = {'id': 'fake'}
root_bdm = {'source_type': 'image',
'detination_type': 'volume',
'image_id': 'fake_id'}
self.flags(libvirt_type='fake_libvirt_type')
self.mox.StubOutWithMock(blockinfo, 'get_disk_bus_for_device_type')
self.mox.StubOutWithMock(blockinfo, 'get_root_info')
blockinfo.get_disk_bus_for_device_type('fake_libvirt_type',
image_meta,
'disk').InAnyOrder().\
AndReturn('virtio')
blockinfo.get_disk_bus_for_device_type('fake_libvirt_type',
image_meta,
'cdrom').InAnyOrder().\
AndReturn('ide')
blockinfo.get_root_info('fake_libvirt_type',
image_meta, root_bdm,
'virtio', 'ide').AndReturn({'dev': 'vda'})
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual(conn.default_root_device_name(instance, image_meta,
root_bdm), '/dev/vda')
def test_default_device_names_for_instance(self):
instance = {'uuid': 'fake_instance'}
root_device_name = '/dev/vda'
ephemerals = [{'device_name': 'vdb'}]
swap = [{'device_name': 'vdc'}]
block_device_mapping = [{'device_name': 'vdc'}]
self.flags(libvirt_type='fake_libvirt_type')
self.mox.StubOutWithMock(blockinfo, 'default_device_names')
blockinfo.default_device_names('fake_libvirt_type', instance,
root_device_name, mox.IgnoreArg(),
ephemerals, swap, block_device_mapping)
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.default_device_names_for_instance(instance, root_device_name,
ephemerals, swap,
block_device_mapping)
def test_hypervisor_hostname_caching(self):
# Make sure that the first hostname is always returned
class FakeConn(object):
def getHostname(self):
pass
def getLibVersion(self):
return 99999
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn._wrapped_conn = FakeConn()
self.mox.StubOutWithMock(conn._wrapped_conn, 'getHostname')
conn._conn.getHostname().AndReturn('foo')
conn._conn.getHostname().AndReturn('bar')
self.mox.ReplayAll()
self.assertEqual('foo', conn.get_hypervisor_hostname())
self.assertEqual('foo', conn.get_hypervisor_hostname())
class HostStateTestCase(test.TestCase):
cpu_info = ('{"vendor": "Intel", "model": "pentium", "arch": "i686", '
'"features": ["ssse3", "monitor", "pni", "sse2", "sse", '
'"fxsr", "clflush", "pse36", "pat", "cmov", "mca", "pge", '
'"mtrr", "sep", "apic"], '
'"topology": {"cores": "1", "threads": "1", "sockets": "1"}}')
instance_caps = [("x86_64", "kvm", "hvm"), ("i686", "kvm", "hvm")]
pci_devices = [{
"dev_id": "pci_0000_04_00_3",
"address": "0000:04:10.3",
"product_id": '1521',
"vendor_id": '8086',
"dev_type": 'type-PF',
"phys_function": None}]
class FakeConnection(object):
"""Fake connection object."""
def get_vcpu_total(self):
return 1
def get_vcpu_used(self):
return 0
def get_cpu_info(self):
return HostStateTestCase.cpu_info
def get_disk_over_committed_size_total(self):
return 0
def get_local_gb_info(self):
return {'total': 100, 'used': 20, 'free': 80}
def get_memory_mb_total(self):
return 497
def get_memory_mb_used(self):
return 88
def get_hypervisor_type(self):
return 'QEMU'
def get_hypervisor_version(self):
return 13091
def get_hypervisor_hostname(self):
return 'compute1'
def get_host_uptime(self):
return ('10:01:16 up 1:36, 6 users, '
'load average: 0.21, 0.16, 0.19')
def get_disk_available_least(self):
return 13091
def get_instance_capabilities(self):
return HostStateTestCase.instance_caps
def get_pci_passthrough_devices(self):
return jsonutils.dumps(HostStateTestCase.pci_devices)
def test_update_status(self):
hs = libvirt_driver.HostState(self.FakeConnection())
stats = hs._stats
self.assertEqual(stats["vcpus"], 1)
self.assertEqual(stats["memory_mb"], 497)
self.assertEqual(stats["local_gb"], 100)
self.assertEqual(stats["vcpus_used"], 0)
self.assertEqual(stats["memory_mb_used"], 88)
self.assertEqual(stats["local_gb_used"], 20)
self.assertEqual(stats["hypervisor_type"], 'QEMU')
self.assertEqual(stats["hypervisor_version"], 13091)
self.assertEqual(stats["hypervisor_hostname"], 'compute1')
self.assertEqual(jsonutils.loads(stats["cpu_info"]),
{"vendor": "Intel", "model": "pentium", "arch": "i686",
"features": ["ssse3", "monitor", "pni", "sse2", "sse",
"fxsr", "clflush", "pse36", "pat", "cmov",
"mca", "pge", "mtrr", "sep", "apic"],
"topology": {"cores": "1", "threads": "1", "sockets": "1"}
})
self.assertEqual(stats["disk_available_least"], 80)
self.assertEqual(jsonutils.loads(stats["pci_passthrough_devices"]),
HostStateTestCase.pci_devices)
class NWFilterFakes:
def __init__(self):
self.filters = {}
def nwfilterLookupByName(self, name):
if name in self.filters:
return self.filters[name]
raise libvirt.libvirtError('Filter Not Found')
def filterDefineXMLMock(self, xml):
class FakeNWFilterInternal:
def __init__(self, parent, name, xml):
self.name = name
self.parent = parent
self.xml = xml
def undefine(self):
del self.parent.filters[self.name]
pass
tree = etree.fromstring(xml)
name = tree.get('name')
if name not in self.filters:
self.filters[name] = FakeNWFilterInternal(self, name, xml)
return True
class IptablesFirewallTestCase(test.TestCase):
def setUp(self):
super(IptablesFirewallTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
class FakeLibvirtDriver(object):
def nwfilterDefineXML(*args, **kwargs):
"""setup_basic_rules in nwfilter calls this."""
pass
self.fake_libvirt_connection = FakeLibvirtDriver()
self.fw = firewall.IptablesFirewallDriver(
fake.FakeVirtAPI(),
get_connection=lambda: self.fake_libvirt_connection)
in_rules = [
'# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
'*nat',
':PREROUTING ACCEPT [1170:189210]',
':INPUT ACCEPT [844:71028]',
':OUTPUT ACCEPT [5149:405186]',
':POSTROUTING ACCEPT [5063:386098]',
'# Completed on Tue Dec 18 15:50:25 2012',
'# Generated by iptables-save v1.4.12 on Tue Dec 18 15:50:25 201;',
'*mangle',
':PREROUTING ACCEPT [241:39722]',
':INPUT ACCEPT [230:39282]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [266:26558]',
':POSTROUTING ACCEPT [267:26590]',
'-A POSTROUTING -o virbr0 -p udp -m udp --dport 68 -j CHECKSUM '
'--checksum-fill',
'COMMIT',
'# Completed on Tue Dec 18 15:50:25 2012',
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*filter',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
'[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
'[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -o virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'[0:0] -A FORWARD -i virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
]
in6_filter_rules = [
'# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
'*filter',
':INPUT ACCEPT [349155:75810423]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [349256:75777230]',
'COMMIT',
'# Completed on Tue Jan 18 23:47:56 2011',
]
def _create_instance_ref(self):
return db.instance_create(self.context,
{'user_id': 'fake',
'project_id': 'fake',
'instance_type_id': 1})
def test_static_filters(self):
instance_ref = self._create_instance_ref()
src_instance_ref = self._create_instance_ref()
admin_ctxt = context.get_admin_context()
secgroup = db.security_group_create(admin_ctxt,
{'user_id': 'fake',
'project_id': 'fake',
'name': 'testgroup',
'description': 'test group'})
src_secgroup = db.security_group_create(admin_ctxt,
{'user_id': 'fake',
'project_id': 'fake',
'name': 'testsourcegroup',
'description': 'src group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': 8,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'cidr': '192.168.10.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'group_id': src_secgroup['id']})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'group_id': src_secgroup['id']})
db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
secgroup['id'])
db.instance_add_security_group(admin_ctxt, src_instance_ref['uuid'],
src_secgroup['id'])
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id'])
# self.fw.add_instance(instance_ref)
def fake_iptables_execute(*cmd, **kwargs):
process_input = kwargs.get('process_input', None)
if cmd == ('ip6tables-save', '-c'):
return '\n'.join(self.in6_filter_rules), None
if cmd == ('iptables-save', '-c'):
return '\n'.join(self.in_rules), None
if cmd == ('iptables-restore', '-c'):
lines = process_input.split('\n')
if '*filter' in lines:
self.out_rules = lines
return '', ''
if cmd == ('ip6tables-restore', '-c',):
lines = process_input.split('\n')
if '*filter' in lines:
self.out6_rules = lines
return '', ''
network_model = _fake_network_info(self.stubs, 1)
from nova.network import linux_net
linux_net.iptables_manager.execute = fake_iptables_execute
from nova.compute import utils as compute_utils
self.stubs.Set(compute_utils, 'get_nw_info_for_instance',
lambda instance: network_model)
self.fw.prepare_instance_filter(instance_ref, network_model)
self.fw.apply_instance_filter(instance_ref, network_model)
in_rules = filter(lambda l: not l.startswith('#'),
self.in_rules)
for rule in in_rules:
if 'nova' not in rule:
self.assertTrue(rule in self.out_rules,
'Rule went missing: %s' % rule)
instance_chain = None
for rule in self.out_rules:
# This is pretty crude, but it'll do for now
# last two octets change
if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule):
instance_chain = rule.split(' ')[-1]
break
self.assertTrue(instance_chain, "The instance chain wasn't added")
security_group_chain = None
for rule in self.out_rules:
# This is pretty crude, but it'll do for now
if '-A %s -j' % instance_chain in rule:
security_group_chain = rule.split(' ')[-1]
break
self.assertTrue(security_group_chain,
"The security group chain wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp '
'-s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"ICMP acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp '
'--icmp-type 8 -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"ICMP Echo Request acceptance rule wasn't added")
for ip in network_model.fixed_ips():
if ip['version'] != 4:
continue
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp -m multiport '
'--dports 80:81 -s %s' % ip['address'])
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -s '
'%s' % ip['address'])
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"Protocol/port-less acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp '
'-m multiport --dports 80:81 -s 192.168.10.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
db.instance_destroy(admin_ctxt, instance_ref['uuid'])
def test_filters_for_instance_with_ip_v6(self):
self.flags(use_ipv6=True)
network_info = _fake_network_info(self.stubs, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEqual(len(rulesv4), 2)
self.assertEqual(len(rulesv6), 1)
def test_filters_for_instance_without_ip_v6(self):
self.flags(use_ipv6=False)
network_info = _fake_network_info(self.stubs, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEqual(len(rulesv4), 2)
self.assertEqual(len(rulesv6), 0)
def test_multinic_iptables(self):
ipv4_rules_per_addr = 1
ipv4_addr_per_network = 2
ipv6_rules_per_addr = 1
ipv6_addr_per_network = 1
networks_count = 5
instance_ref = self._create_instance_ref()
network_info = _fake_network_info(self.stubs, networks_count,
ipv4_addr_per_network)
network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
'1.1.1.1'
ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
network_info)
self.fw.prepare_instance_filter(instance_ref, network_info)
ipv4 = self.fw.iptables.ipv4['filter'].rules
ipv6 = self.fw.iptables.ipv6['filter'].rules
ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
# Extra rules are for the DHCP request
rules = (ipv4_rules_per_addr * ipv4_addr_per_network *
networks_count) + 2
self.assertEqual(ipv4_network_rules, rules)
self.assertEqual(ipv6_network_rules,
ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
def test_do_refresh_security_group_rules(self):
instance_ref = self._create_instance_ref()
self.mox.StubOutWithMock(self.fw,
'instance_rules')
self.mox.StubOutWithMock(self.fw,
'add_filters_for_instance',
use_mock_anything=True)
self.fw.instance_rules(instance_ref,
mox.IgnoreArg()).AndReturn((None, None))
self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg(),
mox.IgnoreArg())
self.fw.instance_rules(instance_ref,
mox.IgnoreArg()).AndReturn((None, None))
self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
self.fw.prepare_instance_filter(instance_ref, mox.IgnoreArg())
self.fw.instances[instance_ref['id']] = instance_ref
self.fw.do_refresh_security_group_rules("fake")
def test_unfilter_instance_undefines_nwfilter(self):
admin_ctxt = context.get_admin_context()
fakefilter = NWFilterFakes()
_xml_mock = fakefilter.filterDefineXMLMock
self.fw.nwfilter._conn.nwfilterDefineXML = _xml_mock
_lookup_name = fakefilter.nwfilterLookupByName
self.fw.nwfilter._conn.nwfilterLookupByName = _lookup_name
instance_ref = self._create_instance_ref()
network_info = _fake_network_info(self.stubs, 1)
self.fw.setup_basic_filtering(instance_ref, network_info)
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
original_filter_count = len(fakefilter.filters)
self.fw.unfilter_instance(instance_ref, network_info)
# should undefine just the instance filter
self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
db.instance_destroy(admin_ctxt, instance_ref['uuid'])
def test_provider_firewall_rules(self):
# setup basic instance data
instance_ref = self._create_instance_ref()
# FRAGILE: peeks at how the firewall names chains
chain_name = 'inst-%s' % instance_ref['id']
# create a firewall via setup_basic_filtering like libvirt_conn.spawn
# should have a chain with 0 rules
network_info = _fake_network_info(self.stubs, 1)
self.fw.setup_basic_filtering(instance_ref, network_info)
self.assertTrue('provider' in self.fw.iptables.ipv4['filter'].chains)
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(0, len(rules))
admin_ctxt = context.get_admin_context()
# add a rule and send the update message, check for 1 rule
provider_fw0 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'tcp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
# Add another, refresh, and make sure number of rules goes to two
provider_fw1 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'udp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(2, len(rules))
# create the instance filter and make sure it has a jump rule
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == chain_name]
jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
provjump_rules = []
# IptablesTable doesn't make rules unique internally
for rule in jump_rules:
if 'provider' in rule.rule and rule not in provjump_rules:
provjump_rules.append(rule)
self.assertEqual(1, len(provjump_rules))
# remove a rule from the db, cast to compute to refresh rule
db.provider_fw_rule_destroy(admin_ctxt, provider_fw1['id'])
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
class NWFilterTestCase(test.TestCase):
def setUp(self):
super(NWFilterTestCase, self).setUp()
class Mock(object):
pass
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.fake_libvirt_connection = Mock()
self.fw = firewall.NWFilterFirewall(fake.FakeVirtAPI(),
lambda: self.fake_libvirt_connection)
def test_cidr_rule_nwfilter_xml(self):
cloud_controller = cloud.CloudController()
cloud_controller.create_security_group(self.context,
'testgroup',
'test group description')
cloud_controller.authorize_security_group_ingress(self.context,
'testgroup',
from_port='80',
to_port='81',
ip_protocol='tcp',
cidr_ip='0.0.0.0/0')
security_group = db.security_group_get_by_name(self.context,
'fake',
'testgroup')
self.teardown_security_group()
def teardown_security_group(self):
cloud_controller = cloud.CloudController()
cloud_controller.delete_security_group(self.context, 'testgroup')
def setup_and_return_security_group(self):
cloud_controller = cloud.CloudController()
cloud_controller.create_security_group(self.context,
'testgroup',
'test group description')
cloud_controller.authorize_security_group_ingress(self.context,
'testgroup',
from_port='80',
to_port='81',
ip_protocol='tcp',
cidr_ip='0.0.0.0/0')
return db.security_group_get_by_name(self.context, 'fake', 'testgroup')
def _create_instance(self):
return db.instance_create(self.context,
{'user_id': 'fake',
'project_id': 'fake',
'instance_type_id': 1})
def _create_instance_type(self, params=None):
"""Create a test instance."""
if not params:
params = {}
context = self.context.elevated()
inst = {}
inst['name'] = 'm1.small'
inst['memory_mb'] = '1024'
inst['vcpus'] = '1'
inst['root_gb'] = '10'
inst['ephemeral_gb'] = '20'
inst['flavorid'] = '1'
inst['swap'] = '2048'
inst['rxtx_factor'] = 1
inst.update(params)
return db.flavor_create(context, inst)['id']
def test_creates_base_rule_first(self):
# These come pre-defined by libvirt
self.defined_filters = ['no-mac-spoofing',
'no-ip-spoofing',
'no-arp-spoofing',
'allow-dhcp-server']
self.recursive_depends = {}
for f in self.defined_filters:
self.recursive_depends[f] = []
def _filterDefineXMLMock(xml):
dom = minidom.parseString(xml)
name = dom.firstChild.getAttribute('name')
self.recursive_depends[name] = []
for f in dom.getElementsByTagName('filterref'):
ref = f.getAttribute('filter')
self.assertTrue(ref in self.defined_filters,
('%s referenced filter that does ' +
'not yet exist: %s') % (name, ref))
dependencies = [ref] + self.recursive_depends[ref]
self.recursive_depends[name] += dependencies
self.defined_filters.append(name)
return True
self.fake_libvirt_connection.nwfilterDefineXML = _filterDefineXMLMock
instance_ref = self._create_instance()
inst_id = instance_ref['id']
inst_uuid = instance_ref['uuid']
def _ensure_all_called(mac, allow_dhcp):
instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'],
mac.translate(None, ':'))
requiredlist = ['no-arp-spoofing', 'no-ip-spoofing',
'no-mac-spoofing']
required_not_list = []
if allow_dhcp:
requiredlist.append('allow-dhcp-server')
else:
required_not_list.append('allow-dhcp-server')
for required in requiredlist:
self.assertTrue(required in
self.recursive_depends[instance_filter],
"Instance's filter does not include %s" %
required)
for required_not in required_not_list:
self.assertFalse(required_not in
self.recursive_depends[instance_filter],
"Instance filter includes %s" % required_not)
self.security_group = self.setup_and_return_security_group()
db.instance_add_security_group(self.context, inst_uuid,
self.security_group['id'])
instance = db.instance_get(self.context, inst_id)
network_info = _fake_network_info(self.stubs, 1)
# since there is one (network_info) there is one vif
# pass this vif's mac to _ensure_all_called()
# to set the instance_filter properly
mac = network_info[0]['address']
network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
'1.1.1.1'
self.fw.setup_basic_filtering(instance, network_info)
allow_dhcp = True
_ensure_all_called(mac, allow_dhcp)
network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = None
self.fw.setup_basic_filtering(instance, network_info)
allow_dhcp = False
_ensure_all_called(mac, allow_dhcp)
db.instance_remove_security_group(self.context, inst_uuid,
self.security_group['id'])
self.teardown_security_group()
db.instance_destroy(context.get_admin_context(), instance_ref['uuid'])
def test_unfilter_instance_undefines_nwfilters(self):
admin_ctxt = context.get_admin_context()
fakefilter = NWFilterFakes()
self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
instance_ref = self._create_instance()
inst_id = instance_ref['id']
inst_uuid = instance_ref['uuid']
self.security_group = self.setup_and_return_security_group()
db.instance_add_security_group(self.context, inst_uuid,
self.security_group['id'])
instance = db.instance_get(self.context, inst_id)
network_info = _fake_network_info(self.stubs, 1)
self.fw.setup_basic_filtering(instance, network_info)
original_filter_count = len(fakefilter.filters)
self.fw.unfilter_instance(instance, network_info)
self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
db.instance_destroy(admin_ctxt, instance_ref['uuid'])
def test_nwfilter_parameters(self):
admin_ctxt = context.get_admin_context()
fakefilter = NWFilterFakes()
self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
instance_ref = self._create_instance()
inst_id = instance_ref['id']
inst_uuid = instance_ref['uuid']
self.security_group = self.setup_and_return_security_group()
db.instance_add_security_group(self.context, inst_uuid,
self.security_group['id'])
instance = db.instance_get(self.context, inst_id)
network_info = _fake_network_info(self.stubs, 1)
self.fw.setup_basic_filtering(instance, network_info)
vif = network_info[0]
nic_id = vif['address'].replace(':', '')
instance_filter_name = self.fw._instance_filter_name(instance, nic_id)
f = fakefilter.nwfilterLookupByName(instance_filter_name)
tree = etree.fromstring(f.xml)
for fref in tree.findall('filterref'):
parameters = fref.findall('./parameter')
for parameter in parameters:
subnet_v4, subnet_v6 = vif['network']['subnets']
if parameter.get('name') == 'IP':
self.assertTrue(_ipv4_like(parameter.get('value'),
'192.168'))
elif parameter.get('name') == 'DHCPSERVER':
dhcp_server = subnet_v4.get('dhcp_server')
self.assertEqual(parameter.get('value'), dhcp_server)
elif parameter.get('name') == 'RASERVER':
ra_server = subnet_v6['gateway']['address'] + "/128"
self.assertEqual(parameter.get('value'), ra_server)
elif parameter.get('name') == 'PROJNET':
ipv4_cidr = subnet_v4['cidr']
net, mask = netutils.get_net_and_mask(ipv4_cidr)
self.assertEqual(parameter.get('value'), net)
elif parameter.get('name') == 'PROJMASK':
ipv4_cidr = subnet_v4['cidr']
net, mask = netutils.get_net_and_mask(ipv4_cidr)
self.assertEqual(parameter.get('value'), mask)
elif parameter.get('name') == 'PROJNET6':
ipv6_cidr = subnet_v6['cidr']
net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
self.assertEqual(parameter.get('value'), net)
elif parameter.get('name') == 'PROJMASK6':
ipv6_cidr = subnet_v6['cidr']
net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
self.assertEqual(parameter.get('value'), prefix)
else:
raise exception.InvalidParameterValue('unknown parameter '
'in filter')
db.instance_destroy(admin_ctxt, instance_ref['uuid'])
class LibvirtUtilsTestCase(test.TestCase):
def test_get_iscsi_initiator(self):
self.mox.StubOutWithMock(utils, 'execute')
initiator = 'fake.initiator.iqn'
rval = ("junk\nInitiatorName=%s\njunk\n" % initiator, None)
utils.execute('cat', '/etc/iscsi/initiatorname.iscsi',
run_as_root=True).AndReturn(rval)
# Start test
self.mox.ReplayAll()
result = libvirt_utils.get_iscsi_initiator()
self.assertEqual(initiator, result)
def test_get_missing_iscsi_initiator(self):
self.mox.StubOutWithMock(utils, 'execute')
file_path = '/etc/iscsi/initiatorname.iscsi'
utils.execute('cat', file_path, run_as_root=True).AndRaise(
exception.FileNotFound(file_path=file_path)
)
# Start test
self.mox.ReplayAll()
result = libvirt_utils.get_iscsi_initiator()
self.assertIsNone(result)
def test_create_image(self):
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('qemu-img', 'create', '-f', 'raw',
'/some/path', '10G')
utils.execute('qemu-img', 'create', '-f', 'qcow2',
'/some/stuff', '1234567891234')
# Start test
self.mox.ReplayAll()
libvirt_utils.create_image('raw', '/some/path', '10G')
libvirt_utils.create_image('qcow2', '/some/stuff', '1234567891234')
def test_create_cow_image(self):
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
rval = ('', '')
os.path.exists('/some/path').AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', '/some/path').AndReturn(rval)
utils.execute('qemu-img', 'create', '-f', 'qcow2',
'-o', 'backing_file=/some/path',
'/the/new/cow')
# Start test
self.mox.ReplayAll()
libvirt_utils.create_cow_image('/some/path', '/the/new/cow')
def test_pick_disk_driver_name(self):
type_map = {'kvm': ([True, 'qemu'], [False, 'qemu'], [None, 'qemu']),
'qemu': ([True, 'qemu'], [False, 'qemu'], [None, 'qemu']),
'xen': ([True, 'phy'], [False, 'tap2'], [None, 'tap2']),
'uml': ([True, None], [False, None], [None, None]),
'lxc': ([True, None], [False, None], [None, None])}
for (libvirt_type, checks) in type_map.iteritems():
if libvirt_type == "xen":
version = 4001000
else:
version = 1005001
self.flags(libvirt_type=libvirt_type)
for (is_block_dev, expected_result) in checks:
result = libvirt_utils.pick_disk_driver_name(version,
is_block_dev)
self.assertEqual(result, expected_result)
def test_pick_disk_driver_name_xen_4_0_0(self):
self.flags(libvirt_type="xen")
result = libvirt_utils.pick_disk_driver_name(4000000, False)
self.assertEqual(result, "tap")
def test_get_disk_size(self):
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
os.path.exists('/some/path').AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
'/some/path').AndReturn(('''image: 00000001
file format: raw
virtual size: 4.4M (4592640 bytes)
disk size: 4.4M''', ''))
# Start test
self.mox.ReplayAll()
self.assertEqual(disk.get_disk_size('/some/path'), 4592640)
def test_copy_image(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
src_fd, src_path = tempfile.mkstemp()
try:
with os.fdopen(src_fd, 'w') as fp:
fp.write('canary')
libvirt_utils.copy_image(src_path, dst_path)
with open(dst_path, 'r') as fp:
self.assertEqual(fp.read(), 'canary')
finally:
os.unlink(src_path)
finally:
os.unlink(dst_path)
def test_write_to_file(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
libvirt_utils.write_to_file(dst_path, 'hello')
with open(dst_path, 'r') as fp:
self.assertEqual(fp.read(), 'hello')
finally:
os.unlink(dst_path)
def test_write_to_file_with_umask(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
os.unlink(dst_path)
libvirt_utils.write_to_file(dst_path, 'hello', umask=0o277)
with open(dst_path, 'r') as fp:
self.assertEqual(fp.read(), 'hello')
mode = os.stat(dst_path).st_mode
self.assertEqual(mode & 0o277, 0)
finally:
os.unlink(dst_path)
def test_chown(self):
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('chown', 'soren', '/some/path', run_as_root=True)
self.mox.ReplayAll()
libvirt_utils.chown('/some/path', 'soren')
def _do_test_extract_snapshot(self, dest_format='raw', out_format='raw'):
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('qemu-img', 'convert', '-f', 'qcow2', '-O', out_format,
'-s', 'snap1', '/path/to/disk/image', '/extracted/snap')
# Start test
self.mox.ReplayAll()
libvirt_utils.extract_snapshot('/path/to/disk/image', 'qcow2',
'snap1', '/extracted/snap', dest_format)
def test_extract_snapshot_raw(self):
self._do_test_extract_snapshot()
def test_extract_snapshot_iso(self):
self._do_test_extract_snapshot(dest_format='iso')
def test_extract_snapshot_qcow2(self):
self._do_test_extract_snapshot(dest_format='qcow2', out_format='qcow2')
def test_load_file(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
# We have a test for write_to_file. If that is sound, this suffices
libvirt_utils.write_to_file(dst_path, 'hello')
self.assertEqual(libvirt_utils.load_file(dst_path), 'hello')
finally:
os.unlink(dst_path)
def test_file_open(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
# We have a test for write_to_file. If that is sound, this suffices
libvirt_utils.write_to_file(dst_path, 'hello')
with libvirt_utils.file_open(dst_path, 'r') as fp:
self.assertEqual(fp.read(), 'hello')
finally:
os.unlink(dst_path)
def test_get_fs_info(self):
class FakeStatResult(object):
def __init__(self):
self.f_bsize = 4096
self.f_frsize = 4096
self.f_blocks = 2000
self.f_bfree = 1000
self.f_bavail = 900
self.f_files = 2000
self.f_ffree = 1000
self.f_favail = 900
self.f_flag = 4096
self.f_namemax = 255
self.path = None
def fake_statvfs(path):
self.path = path
return FakeStatResult()
self.stubs.Set(os, 'statvfs', fake_statvfs)
fs_info = libvirt_utils.get_fs_info('/some/file/path')
self.assertEqual('/some/file/path', self.path)
self.assertEqual(8192000, fs_info['total'])
self.assertEqual(3686400, fs_info['free'])
self.assertEqual(4096000, fs_info['used'])
def test_fetch_image(self):
self.mox.StubOutWithMock(images, 'fetch_to_raw')
context = 'opaque context'
target = '/tmp/targetfile'
image_id = '4'
user_id = 'fake'
project_id = 'fake'
images.fetch_to_raw(context, image_id, target, user_id, project_id)
self.mox.ReplayAll()
libvirt_utils.fetch_image(context, target, image_id,
user_id, project_id)
def test_fetch_raw_image(self):
def fake_execute(*cmd, **kwargs):
self.executes.append(cmd)
return None, None
def fake_rename(old, new):
self.executes.append(('mv', old, new))
def fake_unlink(path):
self.executes.append(('rm', path))
def fake_rm_on_error(path, remove=None):
self.executes.append(('rm', '-f', path))
def fake_qemu_img_info(path):
class FakeImgInfo(object):
pass
file_format = path.split('.')[-1]
if file_format == 'part':
file_format = path.split('.')[-2]
elif file_format == 'converted':
file_format = 'raw'
if 'backing' in path:
backing_file = 'backing'
else:
backing_file = None
FakeImgInfo.file_format = file_format
FakeImgInfo.backing_file = backing_file
return FakeImgInfo()
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(os, 'rename', fake_rename)
self.stubs.Set(os, 'unlink', fake_unlink)
self.stubs.Set(images, 'fetch', lambda *_: None)
self.stubs.Set(images, 'qemu_img_info', fake_qemu_img_info)
self.stubs.Set(fileutils, 'delete_if_exists', fake_rm_on_error)
# Since the remove param of fileutils.remove_path_on_error()
# is initialized at load time, we must provide a wrapper
# that explicitly resets it to our fake delete_if_exists()
old_rm_path_on_error = fileutils.remove_path_on_error
f = functools.partial(old_rm_path_on_error, remove=fake_rm_on_error)
self.stubs.Set(fileutils, 'remove_path_on_error', f)
context = 'opaque context'
image_id = '4'
user_id = 'fake'
project_id = 'fake'
target = 't.qcow2'
self.executes = []
expected_commands = [('qemu-img', 'convert', '-O', 'raw',
't.qcow2.part', 't.qcow2.converted'),
('rm', 't.qcow2.part'),
('mv', 't.qcow2.converted', 't.qcow2')]
images.fetch_to_raw(context, image_id, target, user_id, project_id)
self.assertEqual(self.executes, expected_commands)
target = 't.raw'
self.executes = []
expected_commands = [('mv', 't.raw.part', 't.raw')]
images.fetch_to_raw(context, image_id, target, user_id, project_id)
self.assertEqual(self.executes, expected_commands)
target = 'backing.qcow2'
self.executes = []
expected_commands = [('rm', '-f', 'backing.qcow2.part')]
self.assertRaises(exception.ImageUnacceptable,
images.fetch_to_raw,
context, image_id, target, user_id, project_id)
self.assertEqual(self.executes, expected_commands)
del self.executes
def test_get_disk_backing_file(self):
with_actual_path = False
def fake_execute(*args, **kwargs):
if with_actual_path:
return ("some: output\n"
"backing file: /foo/bar/baz (actual path: /a/b/c)\n"
"...: ...\n"), ''
else:
return ("some: output\n"
"backing file: /foo/bar/baz\n"
"...: ...\n"), ''
def return_true(*args, **kwargs):
return True
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(os.path, 'exists', return_true)
out = libvirt_utils.get_disk_backing_file('')
self.assertEqual(out, 'baz')
with_actual_path = True
out = libvirt_utils.get_disk_backing_file('')
self.assertEqual(out, 'c')
class LibvirtDriverTestCase(test.TestCase):
"""Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver."""
def setUp(self):
super(LibvirtDriverTestCase, self).setUp()
self.libvirtconnection = libvirt_driver.LibvirtDriver(
fake.FakeVirtAPI(), read_only=True)
def _create_instance(self, params=None):
"""Create a test instance."""
if not params:
params = {}
sys_meta = flavors.save_flavor_info(
{}, flavors.get_flavor_by_name('m1.tiny'))
inst = {}
inst['image_ref'] = '1'
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = 'fake'
inst['project_id'] = 'fake'
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst['host'] = 'host1'
inst['root_gb'] = 10
inst['ephemeral_gb'] = 20
inst['config_drive'] = True
inst['kernel_id'] = 2
inst['ramdisk_id'] = 3
inst['key_data'] = 'ABCDEFG'
inst['system_metadata'] = sys_meta
inst.update(params)
return db.instance_create(context.get_admin_context(), inst)
def test_migrate_disk_and_power_off_exception(self):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.migrate_disk_and_power_off.
"""
self.counter = 0
self.checked_shared_storage = False
def fake_get_instance_disk_info(instance, xml=None,
block_device_info=None):
return '[]'
def fake_destroy(instance):
pass
def fake_get_host_ip_addr():
return '10.0.0.1'
def fake_execute(*args, **kwargs):
self.counter += 1
if self.counter == 1:
assert False, "intentional failure"
def fake_os_path_exists(path):
return True
def fake_is_storage_shared(dest, inst_base):
self.checked_shared_storage = True
return False
self.stubs.Set(self.libvirtconnection, 'get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(self.libvirtconnection, '_destroy', fake_destroy)
self.stubs.Set(self.libvirtconnection, 'get_host_ip_addr',
fake_get_host_ip_addr)
self.stubs.Set(self.libvirtconnection, '_is_storage_shared_with',
fake_is_storage_shared)
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
ins_ref = self._create_instance()
self.assertRaises(AssertionError,
self.libvirtconnection.migrate_disk_and_power_off,
None, ins_ref, '10.0.0.2', None, None)
def test_migrate_disk_and_power_off(self):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.migrate_disk_and_power_off.
"""
disk_info = [{'type': 'qcow2', 'path': '/test/disk',
'virt_disk_size': '10737418240',
'backing_file': '/base/disk',
'disk_size': '83886080'},
{'type': 'raw', 'path': '/test/disk.local',
'virt_disk_size': '10737418240',
'backing_file': '/base/disk.local',
'disk_size': '83886080'}]
disk_info_text = jsonutils.dumps(disk_info)
def fake_get_instance_disk_info(instance, xml=None,
block_device_info=None):
return disk_info_text
def fake_destroy(instance):
pass
def fake_get_host_ip_addr():
return '10.0.0.1'
def fake_execute(*args, **kwargs):
pass
self.stubs.Set(self.libvirtconnection, 'get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(self.libvirtconnection, '_destroy', fake_destroy)
self.stubs.Set(self.libvirtconnection, 'get_host_ip_addr',
fake_get_host_ip_addr)
self.stubs.Set(utils, 'execute', fake_execute)
ins_ref = self._create_instance()
# dest is different host case
out = self.libvirtconnection.migrate_disk_and_power_off(
None, ins_ref, '10.0.0.2', None, None)
self.assertEqual(out, disk_info_text)
# dest is same host case
out = self.libvirtconnection.migrate_disk_and_power_off(
None, ins_ref, '10.0.0.1', None, None)
self.assertEqual(out, disk_info_text)
def test_wait_for_running(self):
def fake_get_info(instance):
if instance['name'] == "not_found":
raise exception.InstanceNotFound(instance_id=instance['uuid'])
elif instance['name'] == "running":
return {'state': power_state.RUNNING}
else:
return {'state': power_state.SHUTDOWN}
self.stubs.Set(self.libvirtconnection, 'get_info',
fake_get_info)
# instance not found case
self.assertRaises(exception.InstanceNotFound,
self.libvirtconnection._wait_for_running,
{'name': 'not_found',
'uuid': 'not_found_uuid'})
# instance is running case
self.assertRaises(loopingcall.LoopingCallDone,
self.libvirtconnection._wait_for_running,
{'name': 'running',
'uuid': 'running_uuid'})
# else case
self.libvirtconnection._wait_for_running({'name': 'else',
'uuid': 'other_uuid'})
def _test_finish_migration(self, power_on):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.finish_migration.
"""
disk_info = [{'type': 'qcow2', 'path': '/test/disk',
'local_gb': 10, 'backing_file': '/base/disk'},
{'type': 'raw', 'path': '/test/disk.local',
'local_gb': 10, 'backing_file': '/base/disk.local'}]
disk_info_text = jsonutils.dumps(disk_info)
powered_on = power_on
self.fake_create_domain_called = False
def fake_can_resize_image(path, size):
return False
def fake_extend(path, size, use_cow=False):
pass
def fake_to_xml(context, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
return ""
def fake_plug_vifs(instance, network_info):
pass
def fake_create_image(context, inst,
disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None, inject_files=True):
self.assertFalse(inject_files)
def fake_create_domain(xml, instance=None, power_on=True):
self.fake_create_domain_called = True
self.assertEqual(powered_on, power_on)
return None
def fake_enable_hairpin(instance):
pass
def fake_execute(*args, **kwargs):
pass
def fake_get_info(instance):
if powered_on:
return {'state': power_state.RUNNING}
else:
return {'state': power_state.SHUTDOWN}
self.flags(use_cow_images=True)
self.stubs.Set(libvirt_driver.disk, 'extend', fake_extend)
self.stubs.Set(libvirt_driver.disk, 'can_resize_image',
fake_can_resize_image)
self.stubs.Set(self.libvirtconnection, 'to_xml', fake_to_xml)
self.stubs.Set(self.libvirtconnection, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(self.libvirtconnection, '_create_image',
fake_create_image)
self.stubs.Set(self.libvirtconnection, '_create_domain',
fake_create_domain)
self.stubs.Set(self.libvirtconnection, '_enable_hairpin',
fake_enable_hairpin)
self.stubs.Set(utils, 'execute', fake_execute)
fw = base_firewall.NoopFirewallDriver()
self.stubs.Set(self.libvirtconnection, 'firewall_driver', fw)
self.stubs.Set(self.libvirtconnection, 'get_info',
fake_get_info)
ins_ref = self._create_instance()
self.libvirtconnection.finish_migration(
context.get_admin_context(), None, ins_ref,
disk_info_text, None, None, None, None, power_on)
self.assertTrue(self.fake_create_domain_called)
def test_finish_migration_power_on(self):
self._test_finish_migration(True)
def test_finish_migration_power_off(self):
self._test_finish_migration(False)
def _test_finish_revert_migration(self, power_on):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.finish_revert_migration.
"""
powered_on = power_on
self.fake_create_domain_called = False
def fake_execute(*args, **kwargs):
pass
def fake_plug_vifs(instance, network_info):
pass
def fake_create_domain(xml, instance=None, power_on=True):
self.fake_create_domain_called = True
self.assertEqual(powered_on, power_on)
return None
def fake_enable_hairpin(instance):
pass
def fake_get_info(instance):
if powered_on:
return {'state': power_state.RUNNING}
else:
return {'state': power_state.SHUTDOWN}
def fake_to_xml(context, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None):
return ""
self.stubs.Set(self.libvirtconnection, 'to_xml', fake_to_xml)
self.stubs.Set(self.libvirtconnection, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(utils, 'execute', fake_execute)
fw = base_firewall.NoopFirewallDriver()
self.stubs.Set(self.libvirtconnection, 'firewall_driver', fw)
self.stubs.Set(self.libvirtconnection, '_create_domain',
fake_create_domain)
self.stubs.Set(self.libvirtconnection, '_enable_hairpin',
fake_enable_hairpin)
self.stubs.Set(self.libvirtconnection, 'get_info',
fake_get_info)
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
ins_ref = self._create_instance()
os.mkdir(os.path.join(tmpdir, ins_ref['name']))
libvirt_xml_path = os.path.join(tmpdir,
ins_ref['name'],
'libvirt.xml')
f = open(libvirt_xml_path, 'w')
f.close()
self.libvirtconnection.finish_revert_migration(ins_ref, None,
None, power_on)
self.assertTrue(self.fake_create_domain_called)
def test_finish_revert_migration_power_on(self):
self._test_finish_revert_migration(True)
def test_finish_revert_migration_power_off(self):
self._test_finish_revert_migration(False)
def _test_finish_revert_migration_after_crash(self, backup_made=True,
del_inst_failed=False):
class FakeLoopingCall:
def start(self, *a, **k):
return self
def wait(self):
return None
self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(shutil, 'rmtree')
self.mox.StubOutWithMock(utils, 'execute')
self.stubs.Set(blockinfo, 'get_disk_info', lambda *a: None)
self.stubs.Set(self.libvirtconnection, 'to_xml', lambda *a, **k: None)
self.stubs.Set(self.libvirtconnection, '_create_domain_and_network',
lambda *a: None)
self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
lambda *a, **k: FakeLoopingCall())
libvirt_utils.get_instance_path({}).AndReturn('/fake/foo')
os.path.exists('/fake/foo_resize').AndReturn(backup_made)
if backup_made:
if del_inst_failed:
os_error = OSError(errno.ENOENT, 'No such file or directory')
shutil.rmtree('/fake/foo').AndRaise(os_error)
else:
shutil.rmtree('/fake/foo')
utils.execute('mv', '/fake/foo_resize', '/fake/foo')
self.mox.ReplayAll()
self.libvirtconnection.finish_revert_migration({}, [])
def test_finish_revert_migration_after_crash(self):
self._test_finish_revert_migration_after_crash(backup_made=True)
def test_finish_revert_migration_after_crash_before_new(self):
self._test_finish_revert_migration_after_crash(backup_made=True)
def test_finish_revert_migration_after_crash_before_backup(self):
self._test_finish_revert_migration_after_crash(backup_made=False)
def test_finish_revert_migration_after_crash_delete_failed(self):
self._test_finish_revert_migration_after_crash(backup_made=True,
del_inst_failed=True)
def test_cleanup_failed_migration(self):
self.mox.StubOutWithMock(shutil, 'rmtree')
shutil.rmtree('/fake/inst')
self.mox.ReplayAll()
self.libvirtconnection._cleanup_failed_migration('/fake/inst')
def test_confirm_migration(self):
ins_ref = self._create_instance()
self.mox.StubOutWithMock(self.libvirtconnection, "_cleanup_resize")
self.libvirtconnection._cleanup_resize(ins_ref,
_fake_network_info(self.stubs, 1))
self.mox.ReplayAll()
self.libvirtconnection.confirm_migration("migration_ref", ins_ref,
_fake_network_info(self.stubs, 1))
def test_cleanup_resize_same_host(self):
ins_ref = self._create_instance({'host': CONF.host})
def fake_os_path_exists(path):
return True
def fake_shutil_rmtree(target):
pass
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
self.stubs.Set(shutil, 'rmtree', fake_shutil_rmtree)
self.mox.ReplayAll()
self.libvirtconnection._cleanup_resize(ins_ref,
_fake_network_info(self.stubs, 1))
def test_cleanup_resize_not_same_host(self):
host = 'not' + CONF.host
ins_ref = self._create_instance({'host': host})
def fake_os_path_exists(path):
return True
def fake_shutil_rmtree(target):
pass
def fake_undefine_domain(instance):
pass
def fake_unplug_vifs(instance, network_info):
pass
def fake_unfilter_instance(instance, network_info):
pass
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
self.stubs.Set(shutil, 'rmtree', fake_shutil_rmtree)
self.stubs.Set(self.libvirtconnection, '_undefine_domain',
fake_undefine_domain)
self.stubs.Set(self.libvirtconnection, 'unplug_vifs',
fake_unplug_vifs)
self.stubs.Set(self.libvirtconnection.firewall_driver,
'unfilter_instance', fake_unfilter_instance)
self.mox.ReplayAll()
self.libvirtconnection._cleanup_resize(ins_ref,
_fake_network_info(self.stubs, 1))
def test_get_instance_disk_info_exception(self):
instance_name = "fake-instance-name"
class FakeExceptionDomain(FakeVirtDomain):
def __init__(self):
super(FakeExceptionDomain, self).__init__()
def XMLDesc(self, *args):
raise libvirt.libvirtError("Libvirt error")
def fake_lookup_by_name(instance_name):
return FakeExceptionDomain()
self.stubs.Set(self.libvirtconnection, '_lookup_by_name',
fake_lookup_by_name)
self.assertRaises(exception.InstanceNotFound,
self.libvirtconnection.get_instance_disk_info,
instance_name)
def test_get_cpuset_ids(self):
# correct syntax
self.flags(vcpu_pin_set="1")
cpuset_ids = self.libvirtconnection._get_cpuset_ids()
self.assertEqual([1], cpuset_ids)
self.flags(vcpu_pin_set="1,2")
cpuset_ids = self.libvirtconnection._get_cpuset_ids()
self.assertEqual([1, 2], cpuset_ids)
self.flags(vcpu_pin_set=", , 1 , ,, 2, ,")
cpuset_ids = self.libvirtconnection._get_cpuset_ids()
self.assertEqual([1, 2], cpuset_ids)
self.flags(vcpu_pin_set="1-1")
cpuset_ids = self.libvirtconnection._get_cpuset_ids()
self.assertEqual([1], cpuset_ids)
self.flags(vcpu_pin_set=" 1 - 1, 1 - 2 , 1 -3")
cpuset_ids = self.libvirtconnection._get_cpuset_ids()
self.assertEqual([1, 2, 3], cpuset_ids)
self.flags(vcpu_pin_set="1,^2")
cpuset_ids = self.libvirtconnection._get_cpuset_ids()
self.assertEqual([1], cpuset_ids)
self.flags(vcpu_pin_set="1-2, ^1")
cpuset_ids = self.libvirtconnection._get_cpuset_ids()
self.assertEqual([2], cpuset_ids)
self.flags(vcpu_pin_set="1-3,5,^2")
cpuset_ids = self.libvirtconnection._get_cpuset_ids()
self.assertEqual([1, 3, 5], cpuset_ids)
self.flags(vcpu_pin_set=" 1 - 3 , ^2, 5")
cpuset_ids = self.libvirtconnection._get_cpuset_ids()
self.assertEqual([1, 3, 5], cpuset_ids)
# invalid syntax
self.flags(vcpu_pin_set=" -1-3,5,^2")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
self.flags(vcpu_pin_set="1-3-,5,^2")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
self.flags(vcpu_pin_set="-3,5,^2")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
self.flags(vcpu_pin_set="1-,5,^2")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
self.flags(vcpu_pin_set="1-3,5,^2^")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
self.flags(vcpu_pin_set="1-3,5,^2-")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
self.flags(vcpu_pin_set="--13,^^5,^2")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
self.flags(vcpu_pin_set="a-3,5,^2")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
self.flags(vcpu_pin_set="1-a,5,^2")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
self.flags(vcpu_pin_set="1-3,b,^2")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
self.flags(vcpu_pin_set="1-3,5,^c")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
self.flags(vcpu_pin_set="3 - 1, 5 , ^ 2 ")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
self.flags(vcpu_pin_set=" 1,1, ^1")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
self.flags(vcpu_pin_set=" 1,^1,^1,2, ^2")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
self.flags(vcpu_pin_set="^2")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
class LibvirtVolumeUsageTestCase(test.TestCase):
"""Test for LibvirtDriver.get_all_volume_usage."""
def setUp(self):
super(LibvirtVolumeUsageTestCase, self).setUp()
self.conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.c = context.get_admin_context()
# creating instance
inst = {}
inst['uuid'] = '875a8070-d0b9-4949-8b31-104d125c9a64'
self.ins_ref = db.instance_create(self.c, inst)
# verify bootable volume device path also
self.bdms = [{'volume_id': 1,
'device_name': '/dev/vde'},
{'volume_id': 2,
'device_name': 'vda'}]
def test_get_all_volume_usage(self):
def fake_block_stats(instance_name, disk):
return (169L, 688640L, 0L, 0L, -1L)
self.stubs.Set(self.conn, 'block_stats', fake_block_stats)
vol_usage = self.conn.get_all_volume_usage(self.c,
[dict(instance=self.ins_ref, instance_bdms=self.bdms)])
expected_usage = [{'volume': 1,
'instance': self.ins_ref,
'rd_bytes': 688640L, 'wr_req': 0L,
'flush_operations': -1L, 'rd_req': 169L,
'wr_bytes': 0L},
{'volume': 2,
'instance': self.ins_ref,
'rd_bytes': 688640L, 'wr_req': 0L,
'flush_operations': -1L, 'rd_req': 169L,
'wr_bytes': 0L}]
self.assertEqual(vol_usage, expected_usage)
def test_get_all_volume_usage_device_not_found(self):
def fake_lookup(instance_name):
raise libvirt.libvirtError('invalid path')
self.stubs.Set(self.conn, '_lookup_by_name', fake_lookup)
vol_usage = self.conn.get_all_volume_usage(self.c,
[dict(instance=self.ins_ref, instance_bdms=self.bdms)])
self.assertEqual(vol_usage, [])
class LibvirtNonblockingTestCase(test.TestCase):
"""Test libvirt_nonblocking option."""
def setUp(self):
super(LibvirtNonblockingTestCase, self).setUp()
self.flags(libvirt_nonblocking=True, libvirt_uri="test:///default")
def test_connection_to_primitive(self):
# Test bug 962840.
import nova.virt.libvirt.driver as libvirt_driver
connection = libvirt_driver.LibvirtDriver('')
jsonutils.to_primitive(connection._conn, convert_instances=True)
class LibvirtVolumeSnapshotTestCase(test.TestCase):
"""Tests for libvirtDriver.volume_snapshot_create/delete."""
def setUp(self):
super(LibvirtVolumeSnapshotTestCase, self).setUp()
self.conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.c = context.get_admin_context()
self.flags(instance_name_template='instance-%s')
# creating instance
self.inst = {}
self.inst['uuid'] = uuidutils.generate_uuid()
self.inst['id'] = '1'
# create domain info
self.dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='disk1_file'/>
<target dev='vda' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio' serial='1234'/>
</disk>
</devices>
</domain>"""
self.create_info = {'type': 'qcow2',
'snapshot_id': '1234-5678',
'new_file': 'new-file'}
self.volume_uuid = '0e38683e-f0af-418f-a3f1-6b67ea0f919d'
self.snapshot_id = '9c3ca9f4-9f4e-4dba-bedd-5c5e4b52b162'
self.delete_info_1 = {'type': 'qcow2',
'file_to_merge': 'snap.img',
'merge_target_file': None}
self.delete_info_2 = {'type': 'qcow2',
'file_to_merge': 'snap.img',
'merge_target_file': 'other-snap.img'}
self.delete_info_invalid_type = {'type': 'made_up_type',
'file_to_merge': 'some_file',
'merge_target_file':
'some_other_file'}
def tearDown(self):
super(LibvirtVolumeSnapshotTestCase, self).tearDown()
def test_volume_snapshot_create(self, quiesce=True):
CONF.instance_name_template = 'instance-%s'
self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
self.mox.StubOutWithMock(self.conn, '_volume_api')
instance = db.instance_create(self.c, self.inst)
snapshot_id = 'snap-asdf-qwert'
new_file = 'new-file'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
self.mox.StubOutWithMock(domain, 'snapshotCreateXML')
domain.XMLDesc(0).AndReturn(self.dom_xml)
snap_xml_src = (
'<domainsnapshot>\n'
' <disks>\n'
' <disk name="disk1_file" snapshot="external" type="file">\n'
' <source file="new-file"/>\n'
' </disk>\n'
' <disk name="/path/to/dev/1" snapshot="no"/>\n'
' </disks>\n'
'</domainsnapshot>\n')
# Older versions of libvirt may be missing these.
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
snap_flags = (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
snap_flags_q = snap_flags | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE
if quiesce:
domain.snapshotCreateXML(snap_xml_src, snap_flags_q)
else:
domain.snapshotCreateXML(snap_xml_src, snap_flags_q).\
AndRaise(libvirt.libvirtError('quiescing failed, no qemu-ga'))
domain.snapshotCreateXML(snap_xml_src, snap_flags).AndReturn(0)
self.mox.ReplayAll()
self.conn._volume_snapshot_create(self.c, instance, domain,
self.volume_uuid, snapshot_id,
new_file)
self.mox.VerifyAll()
def test_volume_snapshot_create_noquiesce(self):
self.test_volume_snapshot_create(quiesce=False)
def test_volume_snapshot_create_outer_success(self):
instance = db.instance_create(self.c, self.inst)
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
self.mox.StubOutWithMock(self.conn, '_volume_api')
self.mox.StubOutWithMock(self.conn, '_volume_snapshot_create')
self.conn._lookup_by_name('instance-1').AndReturn(domain)
self.conn._volume_snapshot_create(self.c,
instance,
domain,
self.volume_uuid,
self.create_info['snapshot_id'],
self.create_info['new_file'])
self.conn._volume_api.update_snapshot_status(
self.c, self.create_info['snapshot_id'], 'creating')
self.mox.ReplayAll()
self.conn.volume_snapshot_create(self.c, instance, self.volume_uuid,
self.create_info)
def test_volume_snapshot_create_outer_failure(self):
instance = db.instance_create(self.c, self.inst)
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
self.mox.StubOutWithMock(self.conn, '_volume_api')
self.mox.StubOutWithMock(self.conn, '_volume_snapshot_create')
self.conn._lookup_by_name('instance-1').AndReturn(domain)
self.conn._volume_snapshot_create(self.c,
instance,
domain,
self.volume_uuid,
self.create_info['snapshot_id'],
self.create_info['new_file']).\
AndRaise(exception.NovaException('oops'))
self.conn._volume_api.update_snapshot_status(
self.c, self.create_info['snapshot_id'], 'error')
self.mox.ReplayAll()
self.assertRaises(exception.NovaException,
self.conn.volume_snapshot_create,
self.c,
instance,
self.volume_uuid,
self.create_info)
def test_volume_snapshot_delete_1(self):
"""Deleting newest snapshot -- blockRebase."""
instance = db.instance_create(self.c, self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(0).AndReturn(self.dom_xml)
self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
self.mox.StubOutWithMock(self.conn, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.conn._lookup_by_name('instance-%s' % instance['id']).\
AndReturn(domain)
self.conn.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockRebase('vda', 'snap.img', 0, 0)
self.mox.ReplayAll()
self.conn._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_1)
self.mox.VerifyAll()
def test_volume_snapshot_delete_2(self):
"""Deleting older snapshot -- blockCommit."""
instance = db.instance_create(self.c, self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(0).AndReturn(self.dom_xml)
self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
self.mox.StubOutWithMock(self.conn, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.conn._lookup_by_name('instance-%s' % instance['id']).\
AndReturn(domain)
self.conn.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockCommit('vda', 'other-snap.img', 'snap.img', 0, 0)
self.mox.ReplayAll()
self.conn._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_2)
self.mox.VerifyAll()
def test_volume_snapshot_delete_outer_success(self):
instance = db.instance_create(self.c, self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
self.mox.StubOutWithMock(self.conn, '_volume_api')
self.mox.StubOutWithMock(self.conn, '_volume_snapshot_delete')
self.conn._volume_snapshot_delete(self.c,
instance,
self.volume_uuid,
snapshot_id,
delete_info=self.delete_info_1)
self.conn._volume_api.update_snapshot_status(
self.c, snapshot_id, 'deleting')
self.mox.ReplayAll()
self.conn.volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id,
self.delete_info_1)
self.mox.VerifyAll()
def test_volume_snapshot_delete_outer_failure(self):
instance = db.instance_create(self.c, self.inst)
snapshot_id = '1234-9876'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
self.mox.StubOutWithMock(self.conn, '_volume_api')
self.mox.StubOutWithMock(self.conn, '_volume_snapshot_delete')
self.conn._volume_snapshot_delete(self.c,
instance,
self.volume_uuid,
snapshot_id,
delete_info=self.delete_info_1).\
AndRaise(exception.NovaException('oops'))
self.conn._volume_api.update_snapshot_status(
self.c, snapshot_id, 'error_deleting')
self.mox.ReplayAll()
self.assertRaises(exception.NovaException,
self.conn.volume_snapshot_delete,
self.c,
instance,
self.volume_uuid,
snapshot_id,
self.delete_info_1)
self.mox.VerifyAll()
def test_volume_snapshot_delete_invalid_type(self):
instance = db.instance_create(self.c, self.inst)
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
self.mox.StubOutWithMock(self.conn, '_volume_api')
self.mox.StubOutWithMock(self.conn, 'has_min_version')
self.conn.has_min_version(mox.IgnoreArg()).AndReturn(True)
self.conn._volume_api.update_snapshot_status(
self.c, self.snapshot_id, 'error_deleting')
self.mox.ReplayAll()
self.assertRaises(exception.NovaException,
self.conn.volume_snapshot_delete,
self.c,
instance,
self.volume_uuid,
self.snapshot_id,
self.delete_info_invalid_type)
| apache-2.0 | -9,089,820,123,202,550,000 | 40.487978 | 79 | 0.54878 | false |
IBMDecisionOptimization/docplex-examples | examples/cp/visu/rcpsp_multi_mode_json.py | 1 | 5287 | # --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2015, 2016
# --------------------------------------------------------------------------
"""
This example is the same than the one implemented in rcpsp_multi_mode.py except that
input data files are represented with JSON format, simpler to read and modify.
The MMRCPSP (Multi-Mode Resource-Constrained Project Scheduling Problem) is a
generalization of the Resource-Constrained Project Scheduling problem
(see rcpsp.py).
In the MMRCPSP, each activity can be performed in one out of several modes.
Each mode of an activity represents an alternative way of combining different levels
of resource requirements with a related duration.
Renewable and non-renewable resources are distinguished.
While renewable resources have a limited instantaneous availability such as
manpower and machines, non renewable resources are limited for the entire project,
allowing to model, e.g., a budget for the project.
The objective is to find a mode and a start time for each activity such that the
schedule is makespan minimal and feasible with regard to the precedence
and resource constraints.
Please refer to documentation for appropriate setup of solving configuration.
"""
from docplex.cp.model import *
import os
import json
#-----------------------------------------------------------------------------
# Initialize the problem data
#-----------------------------------------------------------------------------
# Load input data from json file
filename = os.path.dirname(os.path.abspath(__file__)) + '/data/rcpspmm_default.json'
with open(filename, 'r') as f:
jstr = f.read()
JSON_DATA = json.loads(jstr)
#-----------------------------------------------------------------------------
# Prepare the data for modeling
#-----------------------------------------------------------------------------
# Get renewable capacities
CAPACITIES_RENEWABLE = JSON_DATA['capacityRenewable']
NB_RENEWABLE = len(CAPACITIES_RENEWABLE)
# Get non-renewable capacities
CAPACITIES_NON_RENEWABLE = JSON_DATA['capacityNonRenewable']
NB_NON_RENEWABLE = len(CAPACITIES_NON_RENEWABLE)
# Get list of tasks
TASKS = JSON_DATA['tasks']
NB_TASKS = len(TASKS)
# Create a unique id for each mode (to retrieve results)
MODES = []
for t in TASKS:
for i, m in enumerate(t['modes']):
m['id'] = 'T{}-M{}'.format(t['id'], i + 1)
MODES.append(m)
#-----------------------------------------------------------------------------
# Build the model
#-----------------------------------------------------------------------------
# Create model
mdl = CpoModel()
# Create one interval variable per task
tasks = {t['id']: interval_var(name='T{}'.format(t['id'])) for t in TASKS}
# Add precedence constraints
mdl.add(end_before_start(tasks[t['id']], tasks[s]) for t in TASKS for s in t['successors'])
# Create one optional interval variable per task mode
modes = { m['id']: interval_var(name=m['id'], optional=True, size=m['duration']) for t in TASKS for m in t['modes'] }
# Add alternative constraints for tasks
mdl.add(alternative(tasks[t['id']], [ modes[m['id']] for m in t['modes'] ]) for t in TASKS)
# Initialize cumul functions for renewable and non renewable resources
renewables = [ sum(pulse(modes[m['id']], m['demandRenewable'][j]) for m in MODES if m['demandRenewable'][j] > 0)
for j in range(NB_RENEWABLE)]
non_renewables = [ sum(m['demandNonRenewable'][j]*presence_of(modes[m['id']]) for m in MODES if m['demandNonRenewable'][j] > 0 )
for j in range(NB_NON_RENEWABLE)]
# Constrain renewable resources capacity
mdl.add(renewables[j] <= CAPACITIES_RENEWABLE[j] for j in range(NB_RENEWABLE))
# Constrain non-renewable resources capacity
mdl.add(non_renewables[j] <= CAPACITIES_NON_RENEWABLE[j] for j in range(NB_NON_RENEWABLE))
# Minimize overall schedule end date
mdl.add(minimize(max([end_of(t) for t in tasks.values()])))
#-----------------------------------------------------------------------------
# Solve the model and display the result
#-----------------------------------------------------------------------------
# Solve model
print('Solving model...')
res = mdl.solve(FailLimit=30000, TimeLimit=10)
print('Solution: ')
res.print_solution()
import docplex.cp.utils_visu as visu
if res and visu.is_visu_enabled():
load = [CpoStepFunction() for j in range(NB_RENEWABLE)]
for m in MODES:
itv = res.get_var_solution(modes[m['id']])
if itv.is_present():
for j in range(NB_RENEWABLE):
dem = m['demandRenewable'][j]
if dem > 0:
load[j].add_value(itv.get_start(), itv.get_end(), dem)
visu.timeline('Solution for RCPSPMM ' + filename)
visu.panel('Tasks')
for t in TASKS:
tid = t['id']
visu.interval(res.get_var_solution(tasks[tid]), tid, str(tid))
for j in range(NB_RENEWABLE):
visu.panel('R' + str(j + 1))
visu.function(segments=[(INTERVAL_MIN, INTERVAL_MAX, CAPACITIES_RENEWABLE[j])], style='area', color='lightgrey')
visu.function(segments=load[j], style='area', color=j)
visu.show()
| apache-2.0 | 4,069,867,353,027,879,000 | 38.162963 | 128 | 0.598449 | false |
MCLConsortium/mcl-site | support/upgradeMCL.py | 1 | 1894 | #!/usr/bin/env python
# encoding: utf-8
# Copyright 2020 California Institute of Technology. ALL RIGHTS
# RESERVED. U.S. Government Sponsorship acknowledged.
import sys, logging, transaction
from Products.CMFPlone.utils import get_installer
_products = [
# 'edrnsite.portlets',
# 'edrn.theme',
# 'eke.knowledge',
# 'edrnsite.policy',
# 'collective.js.jqueryui', # This wants to be upgraded even though it says its profile version is the same
# 'eea.facetednavigation', # 11.7→13.8
]
logging.basicConfig(level=logging.INFO, format='%(levelname)-8s %(message)s')
app = globals().get('app', None) # ``app`` comes from ``instance run`` magic.
def _setupLogging():
channel = logging.StreamHandler()
channel.setFormatter(logging.Formatter('%(asctime)-15s %(levelname)-8s %(message)s'))
logger = logging.getLogger('jpl')
logger.setLevel(logging.DEBUG)
logger.addHandler(channel)
def upgradeMCL(portal):
# OK, so what needs to be done here?
# Probably the equivalemnt of hitting the upgrade button on jpl.mcl.site.policy
# on the add/remove programs panel in Plone, maybe followed by a full ingest
qi = get_installer(portal)
for productID in _products:
logging.info(u'=== UPGRADING %s', productID)
qi.upgrade_product(productID)
def _main(app):
# Run: ``bin/zope-debug -O mcl run $PWD/support/upgradeMCL.py``
portal = app['mcl']
upgradeMCL(portal)
transaction.commit()
return True
def main(argv):
_setupLogging()
try:
global app
_main(app)
except Exception as ex:
logging.exception(u'This is most unfortunate: %s', unicode(ex))
return False
return True
if __name__ == '__main__':
# The [2:] works around plone.recipe.zope2instance-4.2.6's lame bin/interpreter script issue
sys.exit(0 if main(sys.argv[2:]) is True else -1)
| apache-2.0 | -4,641,201,772,737,198,000 | 29.516129 | 114 | 0.670719 | false |
lukacu/django-gallery | gallery/admin.py | 1 | 3677 | #!/usr/bin/python
# -*- Mode: python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
from django.conf import settings
from django.contrib.admin.util import unquote
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.contrib import admin
from django.forms import ModelForm
from mptt.admin import MPTTModelAdmin
from gallery.models import Album, Image
from imagekit.admin import AdminThumbnail
from mptt.forms import TreeNodeChoiceField
class AlbumAdminForm(ModelForm):
class Meta:
model = Album
def __init__(self, *args, **kwargs):
super(AlbumAdminForm, self).__init__(*args, **kwargs)
q = self.instance.get_descendants(include_self=True).filter(is_public=True).values("id")
self.fields['cover'].queryset = Image.objects.filter(album__in=q, is_public=True).order_by("-date_added")
class AlbumAdmin(MPTTModelAdmin):
list_display = ('title', 'album_cover', 'is_public', 'order', 'move_up_down_links')
list_filter = ['is_public']
mptt_level_indent = 40
form = AlbumAdminForm
def get_urls(self):
from django.conf.urls.defaults import patterns, url
info = self.model._meta.app_label, self.model._meta.module_name
return patterns('',
url(r'^(.+)/move-(up)/$', self.admin_site.admin_view(self.move_album), name='%s_%s_move_up' % info),
url(r'^(.+)/move-(down)/$', self.admin_site.admin_view(self.move_album), name='%s_%s_move_down' % info),
) + super(AlbumAdmin, self).get_urls()
def move_album(self, request, object_id, direction):
obj = get_object_or_404(self.model, pk=unquote(object_id))
if direction == 'up':
relative = obj.get_previous_sibling()
if relative:
obj.move_to(relative, 'left')
else:
relative = obj.get_next_sibling()
if relative:
obj.move_to(relative, 'right')
return HttpResponseRedirect('../../')
def move_up_down_links(self, obj):
var = {'app_label': self.model._meta.app_label, 'module_name': self.model._meta.module_name, 'object_id': obj.id, 'ADMIN_MEDIA_PREFIX': settings.ADMIN_MEDIA_PREFIX }
if obj.get_previous_sibling():
up = '<a href="../../%(app_label)s/%(module_name)s/%(object_id)s/move-up/"><img src="%(ADMIN_MEDIA_PREFIX)simg/admin/arrow-up.gif" alt="Move up" /></a>' % var
else:
up = ''
if obj.get_next_sibling():
down = '<a href="../../%(app_label)s/%(module_name)s/%(object_id)s/move-down/"><img src="%(ADMIN_MEDIA_PREFIX)simg/admin/arrow-down.gif" alt="Move up" /></a>' % var
else:
down = ''
return "%s %s" % (up, down)
move_up_down_links.allow_tags = True
move_up_down_links.short_description = 'Move'
def album_cover(self, obj):
cover = obj.cover_image()
if not cover:
return "<em>Not defined</em>"
return '<img src="%s" alt="%s" style="width: 42px;" />' % (cover.cover_image.url, cover.title)
album_cover.allow_tags = True
album_cover.short_description = 'Cover'
class ImageAdmin(admin.ModelAdmin):
list_display = ('admin_thumbnail', 'title', 'album', 'date_added', 'is_public')
list_display_links = ['title']
list_filter = ['date_added', 'album', 'is_public']
search_fields = ['title', 'title_slug', 'text']
list_per_page = 20
admin_thumbnail = AdminThumbnail(image_field='thumbnail_image', template="gallery/admin/thumbnail.html")
admin.site.register(Album, AlbumAdmin)
admin.site.register(Image, ImageAdmin)
| bsd-3-clause | 6,451,354,872,952,156,000 | 40.264368 | 172 | 0.632853 | false |
erigones/erigones-sddc-api | esdc_api/client.py | 1 | 5663 | # -*- coding: utf-8 -*-
"""
esdc_api.client
~~~~~~~~~~~~~~~
This module contains the Danube Cloud API :class:`Client` class used to access the Danube Cloud HTTP API.
"""
import json
import requests
from . import __version__
from .response import Response
__all__ = (
'Client',
)
class Client(object):
"""
Danube Cloud API HTTP client.
:param str api_url: Danube Cloud API base URL.
:param str api_key: Optional API key used to perform authenticated requests.
:param tuple auth: Optional auth tuple to enable Basic/Digest/Custom HTTP authentication.
:param float timeout: How long to wait for the server to send data before giving up (default: `None`).
:param bool ssl_verify: If `True`, the SSL cert will be verified (default: `True`).
"""
def __init__(self, api_url='https://danube.cloud/api', api_key=None, auth=None, timeout=None, ssl_verify=True):
"""Initialize Danube Cloud API object."""
assert not api_url.endswith('/'), 'trailing slash in api_url is not allowed'
self.api_url = api_url
self.auth = auth
self.timeout = timeout
self.ssl_verify = ssl_verify
self.headers = {
'User-Agent': 'esdc-api/python-client/%s' % __version__,
'Accept': 'application/json; indent=4',
'Content-Type': 'application/json; indent=4',
'ES-STREAM': 'es',
}
if api_key:
self.headers['ES-API-KEY'] = api_key
def __repr__(self):
return '<Danube Cloud API :: %s [%s]>' % (self.__class__.__name__, self.api_url)
def _get_request_url(self, resource):
"""Return complete URL send to the server."""
assert resource.startswith('/'), 'resource should begin with a slash'
url = self.api_url + resource
if not url.endswith('/'): # Every URL must have a trailing slash
url += '/'
return url
def request(self, method, resource, timeout=None, stream=True, **params):
"""Perform request to server and return :class:`.Response` or
raise an :class:`.ESAPIException`. This method is used by all public request methods in this class.
:param str method: HTTP method.
:param str resource: Danube Cloud API resource beginning with a slash (e.g. `/vm/<hostname>`).
:param int timeout: Optional timeout for the request (default `None`).
:param bool stream: Whether to wait for asynchronous API calls to finish (default `True`).
:param dict params: Request parameters internally translated into POST/PUT/DELETE JSON encoded data or
GET query string.
:return: Response object.
:rtype: :class:`.Response`
:raise: :class:`.ESAPIException`
"""
url = self._get_request_url(resource)
if timeout is None:
timeout = self.timeout
if stream:
headers = self.headers
else:
headers = self.headers.copy()
del headers['ES-STREAM']
if method.upper() == 'GET':
data = None
else:
data = json.dumps(params)
params = None
return Response(requests.request(method, url, params=params, data=data, headers=headers,
auth=self.auth, timeout=timeout, allow_redirects=False,
stream=stream, verify=self.ssl_verify))
def get(self, resource, **kwargs):
"""Perform GET :func:`request <request>` to Danube Cloud API."""
return self.request('GET', resource, **kwargs)
def post(self, resource, **kwargs):
"""Perform POST :func:`request <request>` to Danube Cloud API."""
return self.request('POST', resource, **kwargs)
def create(self, resource, **kwargs):
"""Alias for :func:`post`."""
return self.post(resource, **kwargs)
def put(self, resource, **kwargs):
"""Perform PUT :func:`request <request>` to Danube Cloud API."""
return self.request('PUT', resource, **kwargs)
def set(self, resource, **kwargs):
"""Alias for :func:`put`."""
return self.put(resource, **kwargs)
def delete(self, resource, **kwargs):
"""Perform DELETE :func:`request <request>` to Danube Cloud API."""
return self.request('DELETE', resource, **kwargs)
def options(self, resource, **kwargs):
"""Perform OPTIONS :func:`request <request>` to Danube Cloud API."""
return self.request('OPTIONS', resource, **kwargs)
def logout(self):
"""Logout from Danube Cloud API (:func:`GET <get>` /accounts/logout)."""
response = self.get('/accounts/logout')
if response.ok:
self.headers.pop('Authorization', None)
return response
def login(self, username, password):
"""Login to Danube Cloud API (:func:`POST <post>` /accounts/login) using username and password.
:param str username: Danube Cloud username.
:param str password: Danube Cloud password.
"""
self.headers.pop('Authorization', None)
response = self.post('/accounts/login', username=username, password=password)
if response.ok:
self.headers['Authorization'] = 'Token %s' % response.content.result['token']
return response
def is_authenticated(self):
"""Return `True` if api_key is set or authorization token was saved by the :func:`login` method."""
return 'ES-API-KEY' in self.headers or 'Authorization' in self.headers
def ping(self):
""":func:`GET <get>` /ping"""
return self.get('/ping').content.result
| bsd-3-clause | 3,342,749,459,229,568,000 | 35.535484 | 115 | 0.603744 | false |
kennedyshead/home-assistant | homeassistant/components/auth/login_flow.py | 1 | 8731 | """HTTP views handle login flow.
# GET /auth/providers
Return a list of auth providers. Example:
[
{
"name": "Local",
"id": null,
"type": "local_provider",
}
]
# POST /auth/login_flow
Create a login flow. Will return the first step of the flow.
Pass in parameter 'client_id' and 'redirect_url' validate by indieauth.
Pass in parameter 'handler' to specify the auth provider to use. Auth providers
are identified by type and id.
And optional parameter 'type' has to set as 'link_user' if login flow used for
link credential to exist user. Default 'type' is 'authorize'.
{
"client_id": "https://hassbian.local:8123/",
"handler": ["local_provider", null],
"redirect_url": "https://hassbian.local:8123/",
"type': "authorize"
}
Return value will be a step in a data entry flow. See the docs for data entry
flow for details.
{
"data_schema": [
{"name": "username", "type": "string"},
{"name": "password", "type": "string"}
],
"errors": {},
"flow_id": "8f7e42faab604bcab7ac43c44ca34d58",
"handler": ["insecure_example", null],
"step_id": "init",
"type": "form"
}
# POST /auth/login_flow/{flow_id}
Progress the flow. Most flows will be 1 page, but could optionally add extra
login challenges, like TFA. Once the flow has finished, the returned step will
have type RESULT_TYPE_CREATE_ENTRY and "result" key will contain an authorization code.
The authorization code associated with an authorized user by default, it will
associate with an credential if "type" set to "link_user" in
"/auth/login_flow"
{
"flow_id": "8f7e42faab604bcab7ac43c44ca34d58",
"handler": ["insecure_example", null],
"result": "411ee2f916e648d691e937ae9344681e",
"title": "Example",
"type": "create_entry",
"version": 1
}
"""
from ipaddress import ip_address
from aiohttp import web
import voluptuous as vol
import voluptuous_serialize
from homeassistant import data_entry_flow
from homeassistant.components.http.ban import (
log_invalid_auth,
process_success_login,
process_wrong_login,
)
from homeassistant.components.http.data_validator import RequestDataValidator
from homeassistant.components.http.view import HomeAssistantView
from homeassistant.const import (
HTTP_BAD_REQUEST,
HTTP_METHOD_NOT_ALLOWED,
HTTP_NOT_FOUND,
)
from . import indieauth
async def async_setup(hass, store_result):
"""Component to allow users to login."""
hass.http.register_view(AuthProvidersView)
hass.http.register_view(LoginFlowIndexView(hass.auth.login_flow, store_result))
hass.http.register_view(LoginFlowResourceView(hass.auth.login_flow, store_result))
class AuthProvidersView(HomeAssistantView):
"""View to get available auth providers."""
url = "/auth/providers"
name = "api:auth:providers"
requires_auth = False
async def get(self, request):
"""Get available auth providers."""
hass = request.app["hass"]
if not hass.components.onboarding.async_is_user_onboarded():
return self.json_message(
message="Onboarding not finished",
status_code=HTTP_BAD_REQUEST,
message_code="onboarding_required",
)
return self.json(
[
{"name": provider.name, "id": provider.id, "type": provider.type}
for provider in hass.auth.auth_providers
]
)
def _prepare_result_json(result):
"""Convert result to JSON."""
if result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY:
data = result.copy()
data.pop("result")
data.pop("data")
return data
if result["type"] != data_entry_flow.RESULT_TYPE_FORM:
return result
data = result.copy()
schema = data["data_schema"]
if schema is None:
data["data_schema"] = []
else:
data["data_schema"] = voluptuous_serialize.convert(schema)
return data
class LoginFlowIndexView(HomeAssistantView):
"""View to create a config flow."""
url = "/auth/login_flow"
name = "api:auth:login_flow"
requires_auth = False
def __init__(self, flow_mgr, store_result):
"""Initialize the flow manager index view."""
self._flow_mgr = flow_mgr
self._store_result = store_result
async def get(self, request):
"""Do not allow index of flows in progress."""
return web.Response(status=HTTP_METHOD_NOT_ALLOWED)
@RequestDataValidator(
vol.Schema(
{
vol.Required("client_id"): str,
vol.Required("handler"): vol.Any(str, list),
vol.Required("redirect_uri"): str,
vol.Optional("type", default="authorize"): str,
}
)
)
@log_invalid_auth
async def post(self, request, data):
"""Create a new login flow."""
if not await indieauth.verify_redirect_uri(
request.app["hass"], data["client_id"], data["redirect_uri"]
):
return self.json_message(
"invalid client id or redirect uri", HTTP_BAD_REQUEST
)
if isinstance(data["handler"], list):
handler = tuple(data["handler"])
else:
handler = data["handler"]
try:
result = await self._flow_mgr.async_init(
handler,
context={
"ip_address": ip_address(request.remote),
"credential_only": data.get("type") == "link_user",
},
)
except data_entry_flow.UnknownHandler:
return self.json_message("Invalid handler specified", HTTP_NOT_FOUND)
except data_entry_flow.UnknownStep:
return self.json_message("Handler does not support init", HTTP_BAD_REQUEST)
if result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY:
await process_success_login(request)
result.pop("data")
result["result"] = self._store_result(data["client_id"], result["result"])
return self.json(result)
return self.json(_prepare_result_json(result))
class LoginFlowResourceView(HomeAssistantView):
"""View to interact with the flow manager."""
url = "/auth/login_flow/{flow_id}"
name = "api:auth:login_flow:resource"
requires_auth = False
def __init__(self, flow_mgr, store_result):
"""Initialize the login flow resource view."""
self._flow_mgr = flow_mgr
self._store_result = store_result
async def get(self, request):
"""Do not allow getting status of a flow in progress."""
return self.json_message("Invalid flow specified", HTTP_NOT_FOUND)
@RequestDataValidator(vol.Schema({"client_id": str}, extra=vol.ALLOW_EXTRA))
@log_invalid_auth
async def post(self, request, flow_id, data):
"""Handle progressing a login flow request."""
client_id = data.pop("client_id")
if not indieauth.verify_client_id(client_id):
return self.json_message("Invalid client id", HTTP_BAD_REQUEST)
try:
# do not allow change ip during login flow
for flow in self._flow_mgr.async_progress():
if flow["flow_id"] == flow_id and flow["context"][
"ip_address"
] != ip_address(request.remote):
return self.json_message("IP address changed", HTTP_BAD_REQUEST)
result = await self._flow_mgr.async_configure(flow_id, data)
except data_entry_flow.UnknownFlow:
return self.json_message("Invalid flow specified", HTTP_NOT_FOUND)
except vol.Invalid:
return self.json_message("User input malformed", HTTP_BAD_REQUEST)
if result["type"] != data_entry_flow.RESULT_TYPE_CREATE_ENTRY:
# @log_invalid_auth does not work here since it returns HTTP 200
# need manually log failed login attempts
if result.get("errors") is not None and result["errors"].get("base") in [
"invalid_auth",
"invalid_code",
]:
await process_wrong_login(request)
return self.json(_prepare_result_json(result))
result.pop("data")
result["result"] = self._store_result(client_id, result["result"])
return self.json(result)
async def delete(self, request, flow_id):
"""Cancel a flow in progress."""
try:
self._flow_mgr.async_abort(flow_id)
except data_entry_flow.UnknownFlow:
return self.json_message("Invalid flow specified", HTTP_NOT_FOUND)
return self.json_message("Flow aborted")
| apache-2.0 | -943,019,571,495,028,600 | 31.337037 | 87 | 0.617455 | false |
aschn/goodtechgigs | goodtechgigs/gigs/migrations/0001_initial.py | 1 | 1492 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import model_utils.fields
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('profiles', '0004_auto_20150305_2200'),
('tags', '0002_auto_20150305_2106'),
]
operations = [
migrations.CreateModel(
name='Gig',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', model_utils.fields.StatusField(default=b'draft', max_length=100, verbose_name='status', no_check_for_status=True, choices=[(b'draft', b'draft'), (b'published', b'published'), (b'expired', b'expired'), (b'removed', b'removed')])),
('status_changed', model_utils.fields.MonitorField(default=django.utils.timezone.now, verbose_name='status changed', monitor='status')),
('title', models.CharField(max_length=200)),
('description', models.TextField()),
('poster', models.ForeignKey(to='profiles.GigPoster')),
('skills_desired', taggit.managers.TaggableManager(to='tags.SkillTag', through='tags.SkillTaggedItem', help_text='A comma-separated list of tags.', verbose_name=b'desired skills')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
| apache-2.0 | -3,499,465,569,666,910,000 | 42.882353 | 256 | 0.605898 | false |
rec/echomesh | code/python/echomesh/base/Path.py | 1 | 4125 | from __future__ import absolute_import, division, print_function, unicode_literals
from echomesh.base import MakeEmptyProject
from echomesh.base import Platform
import getpass
import os
import os.path
import sys
# If ECHOMESH_EXTERNALS_OVERRIDE_SYSTEM_PACKAGES is True, you want Echomesh to
# use its own external packages in preference to any you might have installed in
# your system path.
ECHOMESH_EXTERNALS_OVERRIDE_SYSTEM_PACKAGES = True
_PYTHON_PATH = os.path.abspath(sys.path[0])
ECHOMESH_PATH = os.path.dirname(os.path.dirname(_PYTHON_PATH))
_ASSET_PATH = None
_DATA_PATH = None
_PROJECT_PATH = None
_PLATFORM_CPP_PATHS = {
'ubuntu': 'Builds/Linux/build'
}
_EXTERNAL_CODE_PATH = os.path.join(_PYTHON_PATH, 'external')
_PLATFORM_EXTERNAL_CODE_PATH = os.path.join(
_EXTERNAL_CODE_PATH, 'platform', Platform.PLATFORM)
LIBRARY_PATH = os.path.join(ECHOMESH_PATH, 'lib', Platform.PLATFORM)
_CPP_BUILD_PATH = os.path.join(
ECHOMESH_PATH, 'code', 'cpp',
_PLATFORM_CPP_PATHS.get(Platform.PLATFORM, '')
)
_COMPATIBILITY_PATH = os.path.join(_PYTHON_PATH, 'compatibility')
PATHS = (_CPP_BUILD_PATH, _PLATFORM_EXTERNAL_CODE_PATH, _EXTERNAL_CODE_PATH,
LIBRARY_PATH, _COMPATIBILITY_PATH)
_REQUIRED_DIRECTORIES = 'asset', 'cache', 'data', 'log'
def data_path(**kwds):
_set_project_path(**kwds)
return _DATA_PATH
def echomesh_path(**kwds):
_set_project_path(**kwds)
return ECHOMESH_PATH
def project_path(**kwds):
_set_project_path(**kwds)
return _PROJECT_PATH
def python_path(**kwds):
_set_project_path(**kwds)
return _PYTHON_PATH
def _possible_project(path):
for d in _REQUIRED_DIRECTORIES:
if not os.path.exists(os.path.join(path, d)):
return False
return True
def set_project_path(project_path=None, show_error=True, prompt=True):
assert not prompt
original_path = os.path.abspath(
os.path.expanduser(project_path or os.curdir))
path = original_path
global _PROJECT_PATH, _DATA_PATH, _ASSET_PATH
while not _possible_project(path):
p = os.path.dirname(path)
if p != path:
path = p
continue
if prompt:
if MakeEmptyProject.ask_to_make_empty_project(original_path):
path = original_path
break
else:
_PROJECT_PATH = None
return False
if show_error:
print(
"\nYour path %s isn't in an echomesh project." % original_path)
print("Defaulting to the echomesh path %s." % ECHOMESH_PATH)
path = ECHOMESH_PATH
break
_PROJECT_PATH = path
_DATA_PATH = os.path.join(path, 'data')
_ASSET_PATH = os.path.join(path, 'asset')
os.chdir(path)
return True
def _set_project_path(**kwds):
if not _PROJECT_PATH:
set_project_path(**kwds)
def info():
_set_project_path()
return {
'Asset path': _ASSET_PATH,
'Code path': _PYTHON_PATH,
'Compatibility path': _COMPATIBILITY_PATH,
'C++ build path': _CPP_BUILD_PATH,
'Data path': _DATA_PATH,
'External code path': _EXTERNAL_CODE_PATH,
'Platform external code path': _PLATFORM_EXTERNAL_CODE_PATH,
'Project path': _PROJECT_PATH,
'Python path': ':'.join(PATHS),
'echomesh path': ECHOMESH_PATH,
}
def fix_sys_path(**kwds):
_set_project_path(**kwds)
for path in reversed(PATHS):
if ECHOMESH_EXTERNALS_OVERRIDE_SYSTEM_PACKAGES:
sys.path.insert(1, path)
else:
sys.path.append(path)
_HOME_VARIABLE_FIXED = False
# HACK!
def fix_home_directory_environment_variable():
if Platform.PLATFORM == Platform.RASPBERRY_PI:
global _HOME_VARIABLE_FIXED
if not _HOME_VARIABLE_FIXED:
# If running as root, export user pi's home directory as $HOME.
if getpass.getuser() == 'root':
os.environ['HOME'] = '/home/pi'
_HOME_VARIABLE_FIXED = True
# TODO: why is this never called.
def fix_paths():
_set_project_path()
fix_home_directory_environment_variable()
fix_sys_path()
| mit | 2,194,033,821,893,237,000 | 28.464286 | 82 | 0.638788 | false |
gmork2/django-soap-connector | soap_connector/tests/api/test_root.py | 1 | 1369 | from urllib.parse import urlparse, ParseResult
from django.test import TestCase
from django.urls import resolve, ResolverMatch
from rest_framework.reverse import reverse
from soap_connector.api.base import URL_NAMES
class RootViewTestCase(TestCase):
"""
"""
def setUp(self):
"""
:return:
"""
self.url = reverse("soap_connector:root")
self.response = self.client.get(self.url)
def test_simple(self):
"""
:return:
"""
self.assertEqual(200, self.response.status_code)
def test_url_names(self):
"""
:return:
"""
self.assertSetEqual(
set(self.response.data.keys()),
set(URL_NAMES)
)
def test_endpoint(self):
"""
Endpoint url path must correspond to a view
function.
:return:
"""
for url in self.response.data.values():
link = ParseResult('', '', *urlparse(url)[2:]).geturl()
self.assertIsInstance(resolve(link), ResolverMatch)
class RegistryViewTestCase(TestCase):
"""
"""
def setUp(self):
"""
:return:
"""
self.url = reverse("soap_connector:registry_list")
self.response = self.client.get(self.url)
def test_registry_view(self):
"""
"""
pass
| gpl-3.0 | 111,986,330,788,116,370 | 19.132353 | 67 | 0.553689 | false |
andela-jngatia/Just-Do-It | bucketlist/migrations/0002_auto_20160310_0740.py | 1 | 3876 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-03-10 07:40
from __future__ import unicode_literals
import django.contrib.auth.models
import django.core.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('auth', '0007_alter_validators_add_error_messages'),
('bucketlist', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='account',
options={'verbose_name': 'user', 'verbose_name_plural': 'users'},
),
migrations.AlterModelManagers(
name='account',
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.RemoveField(
model_name='account',
name='is_admin',
),
migrations.AddField(
model_name='account',
name='date_joined',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined'),
),
migrations.AddField(
model_name='account',
name='first_name',
field=models.CharField(blank=True, max_length=30, verbose_name='first name'),
),
migrations.AddField(
model_name='account',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'),
),
migrations.AddField(
model_name='account',
name='is_active',
field=models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active'),
),
migrations.AddField(
model_name='account',
name='is_staff',
field=models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status'),
),
migrations.AddField(
model_name='account',
name='is_superuser',
field=models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status'),
),
migrations.AddField(
model_name='account',
name='last_name',
field=models.CharField(blank=True, max_length=30, verbose_name='last name'),
),
migrations.AddField(
model_name='account',
name='user_permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'),
),
migrations.AlterField(
model_name='account',
name='email',
field=models.EmailField(blank=True, max_length=254, verbose_name='email address'),
),
migrations.AlterField(
model_name='account',
name='tagline',
field=models.TextField(max_length=250),
),
migrations.AlterField(
model_name='account',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=30, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.')], verbose_name='username'),
),
]
| mit | -4,156,431,365,914,840,600 | 43.045455 | 409 | 0.605263 | false |
hadronproject/lpms | lpms/shelltools.py | 1 | 11180 | # Copyright 2009 - 2011 Burak Sezer <[email protected]>
#
# This file is part of lpms
#
# lpms is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# lpms is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with lpms. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import glob
import shutil
import time
import subprocess
import lpms
from lpms import out
from lpms import conf
from lpms import exceptions
from lpms import constants as cst
def binary_isexists(binary):
path = os.environ['PATH'].split(':')
for directory in path:
if os.path.exists(os.path.join(directory, binary)):
return True
return False
def makedirs(target):
try:
if not os.access(target, os.F_OK):
os.makedirs(target)
except OSError as err:
raise exceptions.BuiltinError("[makedirs] an error occured: %s" % target)
def is_link(source):
return os.path.islink(source)
def is_file(source):
return os.path.isfile(source)
def is_exists(source):
return os.path.exists(source)
def is_dir(source):
return os.path.isdir(source)
def real_path(path):
return os.path.realpath(path)
def is_empty(path):
return os.path.getsize(path) == 0
def basename(path):
return os.path.basename(path)
def dirname(path):
return os.path.dirname(path)
def echo(content, target):
mode = "a"
if not os.path.isfile(target):
mode = "w"
try:
with open(target, mode) as _file:
_file.write('%s\n' % content)
except IOError as err:
raise exceptions.BuiltinError("[echo] given content was not written to %s" % target)
def listdir(source):
if os.path.isdir(source):
return os.listdir(source)
else:
return glob.glob(source)
# FIXME: exception?
def cd(target=None):
current = os.getcwd()
def change(trgt):
try:
os.chdir(trgt)
except OSError as err:
raise exceptions.BuiltinError("[cd] directory was not changed: %s" % trgt)
if target is None:
change(os.path.dirname(current))
else:
change(target)
def touch(path):
if os.path.isfile(path):
out.warn("%s is already exist" % path)
return
open(path, 'w').close()
def system(cmd, show=False, stage=None, sandbox=None):
cfg = conf.LPMSConfig()
if sandbox is None:
sandbox = True if cfg.sandbox else False
# override 'sandbox' variable if the user wants to modifiy from cli
if lpms.getopt('--enable-sandbox'):
sandbox = True
elif lpms.getopt('--disable-sandbox'):
sandbox = False
if lpms.getopt("--verbose"):
ret, output, err = run_cmd(cmd, True)
elif (not cfg.print_output or lpms.getopt("--quiet")) \
and not show:
ret, output, err = run_cmd(cmd, show=False, enable_sandbox=sandbox)
else:
ret, output, err = run_cmd(cmd, show=True, enable_sandbox=sandbox)
if ret != 0:
if not conf.LPMSConfig().print_output or lpms.getopt("--quiet"):
out.brightred("\n>> error messages:\n")
out.write(err)
out.warn("command failed: %s" % out.color(cmd, "red"))
if stage and output and err:
return False, output+err
return False
return True
def run_cmd(cmd, show=True, enable_sandbox=True):
stdout = None; stderr = None
if enable_sandbox:
# FIXME: getopt should not do this.
# the verbosity of messages, defaults to 1
# 1 - error
# 2 - warning
# 3 - normal
# 4 - verbose
# 5 - debug
# 6 - crazy debug
log_level = lpms.getopt("--sandbox-log-level", like=True)
if log_level is None:
log_level = "1"
if not log_level in ('1', '2', '3', '4', '5', '6'):
out.warn("%s is an invalid sandbox log level." % log_level)
cmd = "%s --config=%s --log-level=%s --log-file=%s -- %s" % (cst.sandbox_app, cst.sandbox_config, \
log_level, cst.sandbox_log, cmd)
if not show:
stdout = subprocess.PIPE; stderr=subprocess.PIPE
result = subprocess.Popen(cmd, shell=True, stdout=stdout, stderr=stderr)
output, err = result.communicate()
return result.returncode, output, err
def copytree(source, target, sym=True):
if is_dir(source):
if os.path.exists(target):
if is_dir(target):
copytree(source, os.path.join(target, os.path.basename(source.strip('/'))))
return
else:
copytree(source, os.path.join(target, os.path.basename(source)))
return
try:
shutil.copytree(source, target, sym)
except OSError as err:
raise exceptions.BuiltinError("[copytree] an error occured while copying: %s -> %s" % (source, target))
else:
raise exceptions.BuiltinError("[copytree] %s does not exists" % source, stage=1)
def move(source, target):
src = glob.glob(source)
if len(src) == 0:
raise exceptions.BuiltinError("[move] %s is empty" % source)
if len(target.split("/")) > 1 and not os.path.isdir(os.path.dirname(target)):
makedirs(os.path.dirname(target))
for path in src:
if is_file(path) or is_link(path) or is_dir(path):
try:
shutil.move(path, target)
except OSError as err:
raise exceptions.BuiltinError("[move] an error occured while moving: %s -> %s" % (source, target))
else:
raise exceptions.BuiltinError("[move] file %s doesn\'t exists." % path)
def copy(source, target, sym = True):
src= glob.glob(source)
if len(src) == 0:
raise exceptions.BuiltinError("[copy] no file matched pattern %s." % source)
if len(target.split("/")) > 1 and not os.path.exists(os.path.dirname(target)):
makedirs(os.path.dirname(target))
for path in src:
if is_file(path) and not is_link(path):
try:
shutil.copy2(path, target)
except IOError as err:
raise exceptions.BuiltinError("[copy] an error occured while copying: %s -> %s" % (source, target))
elif is_link(path) and sym:
if is_dir(target):
os.symlink(os.readlink(path), os.path.join(target, os.path.basename(path)))
else:
if is_file(target):
os.remove(target)
os.symlink(os.readlink(path), target)
elif is_link(path) and not sym:
if is_dir(path):
copytree(path, target)
else:
shutil.copy2(path, target)
elif is_dir(path):
copytree(path, target, sym)
else:
raise exceptions.BuiltinError('[copy] file %s does not exist.' % filePath)
def insinto(source, target, install_dir=None, target_file = '', sym = True):
if install_dir is not None:
target = os.path.join(install_dir, target)
makedirs(target)
if not target_file:
src = glob.glob(source)
if len(src) == 0:
raise exceptions.BuiltinError("[instinto] no file matched pattern %s." % source)
for path in src:
if os.access(path, os.F_OK):
copy(path, os.path.join(target, os.path.basename(path)), sym)
else:
copy(source, os.path.join(target, target_file), sym)
def make_symlink(source, target):
try:
os.symlink(source, target)
except OSError as err:
raise exceptions.BuiltinError("[make_symlink] symlink not created: %s -> %s" % (target, source))
def remove_file(pattern):
src = glob.glob(pattern)
if len(src) == 0:
out.error("[remove_file] no file matched pattern: %s." % pattern)
return False
for path in src:
if is_link(path):
try:
os.unlink(path)
except OSError as err:
raise exceptions.BuiltinError("[remove_file] an error occured: %s" % path)
elif is_file(path):
try:
os.remove(path)
except OSError as err:
raise exceptions.BuiltinError("[remove_file] an error occured: %s" % path)
elif not is_dir(path):
out.error("[remove_file] file %s doesn\'t exists." % path)
return False
def remove_dir(source_dir):
if is_link(source_dir):
os.unlink(source_dir)
return
if is_dir(source_dir):
try:
# rmtree gets string
shutil.rmtree(str(source_dir))
except OSError as err:
raise exceptions.BuiltinError("[remove_dir] an error occured while removing: %s" % source_dir)
elif is_file(source_dir):
pass
else:
out.error("[remove_dir] directory %s doesn\'t exists." % source_dir)
return False
def rename(source, target):
try:
os.rename(source, target)
except OSError as err:
raise exceptions.BuiltinError("an error occured while renaming: %s -> %s" % (source, target))
def install_executable(sources, target):
if not os.path.isdir(os.path.dirname(target)):
makedirs(os.path.dirname(target))
for source in sources:
srcs = glob.glob(source)
if len(srcs) == 0:
raise exceptions.BuiltinError("[install_executable] file not found: %s" % source)
for src in srcs:
if not system('install -m0755 -o root -g root %s %s' % (src, target)):
out.error("[install_executable] %s could not installed to %s" % (src, target))
return False
def install_readable(sources, target):
#FIXME: Does the function create target directory?
# what if target value is a file(insfile)??
for source in sources:
srcs = glob.glob(source)
if not srcs:
out.error("[install_readable] file not found, passing: %s" % source)
continue
for src in srcs:
if not system('install -m0644 "%s" %s' % (src, target)):
out.error("[install_readable] %s could not installed to %s." % (src, target))
return False
def install_library(source, target, permission = 0644):
if not os.path.isdir(os.path.dirname(target)):
makedirs(os.path.dirname(target))
if os.path.islink(source):
os.symlink(os.path.realpath(source), os.path.join(target, source))
else:
if not system('install -m0%o %s %s' % (permission, source, target)):
out.error("[install_library] %s could not installed to %s." % (source, target))
return False
def set_id(path, uid, gid):
os.chown(path, uid, gid)
def set_mod(path, mod):
os.chmod(path, mod)
| gpl-3.0 | -1,432,450,333,180,091,600 | 32.573574 | 115 | 0.598748 | false |
MediffRobotics/DeepRobotics | DeepBrain/SockCommun_Brain_Rpi.py | 1 | 2527 | import math
class BrainCommuViaSock:
def __init__(self):
self.Uparm_OldShiftingSteps=0;
self.Elbow_OldShiftingSteps=0;
def Elbow_Angles2Steps(self,Angle_Deg=0):
#Init
MovingSide_InitLength=231.03; # connect to motor
ShortSideLength=50;#mm
LongSideLength=235.63;#mm
pi=3.14159;
alpha=pi/2;
beta=0.19861847;
#Converting
Angle_Rad=Angle_Deg*(pi/180);# deg to rad
AngleTheta=pi-(alpha+beta+Angle_Rad);
MovingSide_NewLength=math.sqrt(ShortSideLength*ShortSideLength+
LongSideLength*LongSideLength-2*LongSideLength*
ShortSideLength*math.cos(AngleTheta));
ShiftingLength=MovingSide_InitLength-MovingSide_NewLength; # in mm
#Motor 1 revolution=6400 steps
#1 revolution = 2mm, so 0.5R/m
ShiftingSteps=0.5*ShiftingLength*6400;
UpdateShiftingSteps=ShiftingSteps-self.Elbow_OldShiftingSteps;
self.Elbow_OldShiftingSteps=ShiftingSteps;
UpdateShiftingSteps=int(UpdateShiftingSteps)
print ('Elbow_UpdateShiftingSteps: %d',UpdateShiftingSteps)
return UpdateShiftingSteps
def UpArm_Angles2Steps(self,Angle_Deg=0):
MovingSide_InitLength=274.61;#connect to motor
ShortSideLength=65;#mm
LongSideLength=280.85;
pi=3.14159;
alpha=pi/2;
beta=0.2122;
Angle_Rad=Angle_Deg*(pi/180);#deg to rad
AngleTheta=pi-(alpha+beta+Angle_Rad);
MovingSide_NewLength=math.sqrt(ShortSideLength*ShortSideLength
+LongSideLength*LongSideLength-2*LongSideLength
*ShortSideLength*math.cos(AngleTheta));
ShiftingLength=MovingSide_InitLength-MovingSide_NewLength; # in mm
#Motor 1 revolution=1600 steps
#1 revolution = 2mm, so 0.5R/m
ShiftingSteps=0.5*ShiftingLength*1600;
UpdateShiftingSteps=ShiftingSteps-self.Uparm_OldShiftingSteps;
self.Uparm_OldShiftingSteps=ShiftingSteps;
UpdateShiftingSteps=int(UpdateShiftingSteps)
print('Up_arm UpdateShiftingSteps: %d',UpdateShiftingSteps)
return UpdateShiftingSteps
if __name__=='__main__':
objBrainSockCommu=BrainCommuViaSock()
while True:
objBrainSockCommu.UpArm_Angles2Steps()
objBrainSockCommu.Elbow_Angles2Steps()
cmd=raw_input("Please input cmd:")
| gpl-3.0 | 3,892,539,015,571,696,600 | 31.818182 | 77 | 0.633558 | false |
marcharper/stationary | stationary/utils/matrix_checks.py | 1 | 1986 | from .math_helpers import kl_divergence_dict
from .graph import Graph
from .edges import edges_to_edge_dict
from nose.tools import assert_almost_equal
def check_detailed_balance(edges, s, places=7):
"""
Check if the detailed balance condition is satisfied.
Parameters
----------
edges: list of tuples
transitions of the Markov process
s: dict
the stationary distribution
places: int
Decimal places of precision to require
"""
edge_dict = edges_to_edge_dict(edges)
for s1, s2 in edge_dict.keys():
diff = s[s1] * edge_dict[(s1, s2)] - s[s2] * edge_dict[(s2, s1)]
assert_almost_equal(diff, 0, places=places)
def check_global_balance(edges, stationary, places=7):
"""
Checks that the stationary distribution satisfies the global balance
condition. https://en.wikipedia.org/wiki/Balance_equation
Parameters
----------
edges: list of tuples
transitions of the Markov process
stationary: dict
the stationary distribution
places: int
Decimal places of precision to require
"""
g = Graph(edges)
for s1 in g.vertices():
lhs = 0.
rhs = 0.
for s2, v in g.out_dict(s1).items():
if s1 == s2:
continue
lhs += stationary[s1] * v
for s2, v in g.in_dict(s1).items():
if s1 == s2:
continue
rhs += stationary[s2] * v
assert_almost_equal(lhs, rhs, places=places)
def check_eigenvalue(edges, s, places=3):
"""
Check that the stationary distribution satisfies the eigenvalue condition.
Parameters
----------
edges: list of tuples
transitions of the Markov process
s: dict
the stationary distribution
places: int
Decimal places of precision to require
"""
g = Graph(edges)
t = g.left_multiply(s)
assert_almost_equal(kl_divergence_dict(s, t), 0, places=places)
| mit | 7,332,367,794,311,983,000 | 25.48 | 78 | 0.608761 | false |
tensorflow/cloud | src/python/tensorflow_cloud/tuner/vizier_client.py | 1 | 19569 | # Lint as: python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A thin client for the Cloud AI Platform Vizier Service."""
import datetime
import http
import json
import time
from typing import Any, Dict, List, Mapping, Optional, Text, Union
from googleapiclient import discovery
from googleapiclient import errors
import tensorflow as tf
from tensorflow_cloud.tuner import vizier_client_interface
from tensorflow_cloud.tuner import constants
from tensorflow_cloud.utils import google_api_client
class SuggestionInactiveError(Exception):
"""Indicates that GetSuggestion was called on an inactive study."""
class _VizierClient(vizier_client_interface.VizierClientInterface):
"""A wrapper class that allows for easy interaction with a Study."""
def __init__(self,
service_client: discovery.Resource,
project_id: Text,
region: Text,
study_id: Optional[Text] = None):
"""Create an VizierClient object.
Use this constructor when you know the study_id, and when the Study
already exists. Otherwise, you'll probably want to use
create_or_load_study() instead of constructing the
VizierClient class directly.
Args:
service_client: An API client of Vizier service.
project_id: A GCP project id.
region: A GCP region. e.g. 'us-central1'.
study_id: An identifier of the study. The full study name will be
`projects/{project_id}/locations/{region}/studies/{study_id}`.
The full trial name will be `{study name}/trials/{trial_id}`.
"""
self.service_client = service_client
self.project_id = project_id
self.region = region
if not study_id:
raise ValueError(
"Use create_or_load_study() instead of constructing the"
"VizierClient class directly"
)
self.study_id = study_id
def get_suggestions(
self,
client_id: Text,
suggestion_count: int = constants.SUGGESTION_COUNT_PER_REQUEST
) -> List[Dict[Text, Any]]:
"""Gets a list of suggested Trials.
Args:
client_id: An ID that identifies the `Tuner` requesting a `Trial`.
`Tuners` that should run the same trial (for instance, when
running a multi-worker model) should have the same ID. If
multiple suggestTrialsRequests have the same tuner_id, the
service will return the identical suggested trial if the trial
is PENDING, and provide a new trial if the last suggest trial
was completed.
suggestion_count: The number of suggestions to request.
Returns:
A list of Trials (represented by JSON dicts). This may be an empty
list if:
1. A finite search space has been exhausted.
2. If max_num_trials = 1000 has been reached.
3. Or if there are no longer any trials that match a supplied Context.
Raises:
SuggestionInactiveError: Indicates that a suggestion was requested
from an inactive study. Note that this is NOT raised when a
finite Study runs out of suggestions. In such a case, an empty
list is returned.
"""
# Requests a trial.
try:
resp = (
self.service_client.projects()
.locations()
.studies()
.trials()
.suggest(
parent=self._make_study_name(),
body={
"client_id": client_id,
"suggestion_count": suggestion_count,
},
)
.execute()
)
except errors.HttpError as e:
if e.resp.status == 429:
# Status 429 'RESOURCE_EXAUSTED' is raised when trials more than
# the maximum limit (1000) of the Vizier service for a study
# are requested, or the number of finite search space.
# For distributed tuning, a tuner worker may request the 1001th
# trial, while the other tuner worker has not completed training
# the 1000th trial, and triggers this error.
tf.get_logger().info("Reached max number of trials.")
return []
else:
tf.get_logger().info("SuggestTrial failed.")
raise e
# Polls the suggestion of long-running operations.
tf.get_logger().info("CreateTrial: polls the suggestions.")
operation = self._obtain_long_running_operation(resp)
suggestions = operation["response"]
if "trials" not in suggestions:
if operation["response"]["studyState"] == "INACTIVE":
raise SuggestionInactiveError(
"The study is stopped due to an internal error."
)
return suggestions["trials"]
def report_intermediate_objective_value(
self,
step: int,
elapsed_secs: float,
metric_list: List[Mapping[Text, Union[int, float]]],
trial_id: Text,
) -> None:
"""Calls AddMeasurementToTrial with the provided objective_value.
Args:
step: The number of steps the model has trained for.
elapsed_secs: The number of seconds since Trial execution began.
metric_list: A list of dictionary from metric names (strings) to
values (doubles) for additional metrics to record.
trial_id: trial_id.
"""
measurement = {
"stepCount": step,
"elapsedTime": {"seconds": int(elapsed_secs)},
"metrics": metric_list,
}
try:
self.service_client.projects().locations().studies().trials(
).addMeasurement(
name=self._make_trial_name(trial_id),
body={"measurement": measurement}).execute()
except errors.HttpError as e:
tf.get_logger().info("AddMeasurement failed.")
raise e
def should_trial_stop(self, trial_id: Text) -> bool:
"""Returns whether trial should stop early.
Args:
trial_id: trial_id.
Returns:
Whether it is recommended to stop the trial early.
"""
trial_name = self._make_trial_name(trial_id)
try:
resp = (
self.service_client.projects()
.locations()
.studies()
.trials()
.checkEarlyStoppingState(name=trial_name)
.execute()
)
except errors.HttpError as e:
tf.get_logger().info("CheckEarlyStoppingState failed.")
raise e
# Polls the stop decision of long-running operations.
operation = self._obtain_long_running_operation(resp)
tf.get_logger().info("CheckEarlyStoppingStateResponse")
if operation["response"].get("shouldStop"):
# Stops a trial.
try:
tf.get_logger().info("Stop the Trial.")
self.service_client.projects().locations().studies().trials(
).stop(name=trial_name).execute()
except errors.HttpError as e:
tf.get_logger().info("StopTrial failed.")
raise e
return True
return False
def complete_trial(
self,
trial_id: Text,
trial_infeasible: bool,
infeasibility_reason: Optional[Text] = None) -> Dict[Text, Any]:
"""Marks the trial as COMPLETED and sets the final measurement.
Args:
trial_id: trial_id.
trial_infeasible: If True, the parameter setting is not feasible.
infeasibility_reason: The reason the Trial was infeasible. Should
only be non-empty if trial_infeasible==True.
Returns:
The Completed Vizier trial, represented as a JSON Dictionary.
"""
try:
vizier_trial = (
self.service_client.projects()
.locations()
.studies()
.trials()
.complete(
name=self._make_trial_name(trial_id),
body={
"trial_infeasible": trial_infeasible,
"infeasible_reason": infeasibility_reason,
},
)
.execute()
)
except errors.HttpError as e:
tf.get_logger().info("CompleteTrial failed.")
raise e
return vizier_trial
def get_trial(self, trial_id: Text) -> Dict[Text, Any]:
"""Return the Vizier trial for the given trial_id."""
try:
trial = (
self.service_client.projects()
.locations()
.studies()
.trials()
.get(name=self._make_trial_name(trial_id))
.execute()
)
except errors.HttpError:
tf.get_logger().info("GetTrial failed.")
raise
return trial
def list_trials(self) -> List[Dict[Text, Any]]:
"""List trials."""
study_name = self._make_study_name()
try:
resp = (
self.service_client.projects()
.locations()
.studies()
.trials()
.list(parent=study_name)
.execute()
)
except errors.HttpError as e:
tf.get_logger().info("ListTrials failed.")
raise e
return resp.get("trials", [])
def list_studies(self) -> List[Dict[Text, Any]]:
"""List all studies under the current project and region.
Returns:
The list of studies.
"""
parent_name = self._make_parent_name()
try:
resp = self.service_client.projects().locations().studies().list(
parent=parent_name).execute()
except errors.HttpError:
tf.get_logger().info("ListStudies failed.")
raise
return resp.get("studies", [])
def delete_study(self, study_name: Optional[Text] = None) -> None:
"""Deletes the study.
Args:
study_name: Name of the study.
Raises:
ValueError: Indicates that the study_name does not exist.
HttpError: Indicates a HTTP error from calling the discovery API.
"""
if study_name is None:
study_name = self._make_study_name()
try:
self.service_client.projects().locations().studies().delete(
name=study_name).execute()
except errors.HttpError as e:
if e.resp.status == http.HTTPStatus.NOT_FOUND.value:
raise ValueError(
"DeleteStudy failed. Study not found: {}."
.format(study_name))
tf.get_logger().info("DeleteStudy failed.")
raise
tf.get_logger().info("Study deleted: {}.".format(study_name))
def _obtain_long_running_operation(self, resp):
"""Obtain the long-running operation."""
op_id = resp["name"].split("/")[-1]
operation_name = "projects/{}/locations/{}/operations/{}".format(
self.project_id, self.region, op_id
)
try:
get_op = (
self.service_client.projects()
.locations()
.operations()
.get(name=operation_name)
)
operation = get_op.execute()
except errors.HttpError as e:
tf.get_logger().info("GetLongRunningOperations failed.")
raise e
polling_secs = 1
num_attempts = 0
while not operation.get("done"):
sleep_time = self._polling_delay(num_attempts, polling_secs)
num_attempts += 1
tf.get_logger().info(
"Waiting for operation; attempt {}; "
"sleeping for {} seconds".format(
num_attempts, sleep_time
)
)
time.sleep(sleep_time.total_seconds())
if num_attempts > 30: # about 10 minutes
raise RuntimeError("GetLongRunningOperations timeout.")
operation = get_op.execute()
return operation
def _polling_delay(self, num_attempts, time_scale):
"""Computes a delay to the next attempt to poll the Vizier service.
This does bounded exponential backoff, starting with $time_scale.
If $time_scale == 0, it starts with a small time interval, less than
1 second.
Args:
num_attempts: The number of times have we polled and found that the
desired result was not yet available.
time_scale: The shortest polling interval, in seconds, or zero.
Zero is treated as a small interval, less than 1 second.
Returns:
A recommended delay interval, in seconds.
"""
small_interval = 0.3 # Seconds
interval = max(
time_scale, small_interval) * 1.41 ** min(num_attempts, 9)
return datetime.timedelta(seconds=interval)
def _make_study_name(self):
return "projects/{}/locations/{}/studies/{}".format(
self.project_id, self.region, self.study_id
)
def _make_trial_name(self, trial_id):
return "projects/{}/locations/{}/studies/{}/trials/{}".format(
self.project_id, self.region, self.study_id, trial_id
)
def _make_parent_name(self):
return "projects/{}/locations/{}".format(self.project_id, self.region)
def create_or_load_study(
project_id: Text,
region: Text,
study_id: Text,
study_config: Optional[Dict[Text, Any]] = None,
) -> _VizierClient:
"""Factory method for creating or loading a Vizier client.
Given an Vizier study_config, this will either create or open the
specified study. It will create it if it doesn't already exist, and open
it if someone has already created it.
Note that once a study is created, you CANNOT modify it with this function.
This function is designed for use in a distributed system, where many jobs
call create_or_load_study() nearly simultaneously with the same
`study_config`. In that situation, all clients will end up pointing nicely
to the same study.
Args:
project_id: A GCP project id.
region: A GCP region. e.g. 'us-central1'.
study_id: An identifier of the study. If not supplied, system-determined
unique ID is given. The full study name will be
projects/{project_id}/locations/{region}/studies/{study_id}.
And the full trial name will be {study name}/trials/{trial_id}.
study_config: Study configuration for Vizier service. If not
supplied, it will be assumed that the study with the given study_id
already exists, and will try to retrieve that study.
Returns:
An _VizierClient object with the specified study created or loaded.
Raises:
RuntimeError: Indicates that study_config is supplied but CreateStudy
failed and GetStudy did not succeed after
constants.MAX_NUM_TRIES_FOR_STUDIES tries.
ValueError: Indicates that study_config is not supplied and the study
with the given study_id does not exist.
"""
# Build the API client
# Note that Vizier service is exposed as a regional endpoint. As such,
# an API client needs to be created separately from the default.
with open(constants.OPTIMIZER_API_DOCUMENT_FILE) as f:
service_client = discovery.build_from_document(
service=json.load(f),
requestBuilder=google_api_client.TFCloudHttpRequest,
)
# Creates or loads a study.
study_parent = "projects/{}/locations/{}".format(project_id, region)
if study_config is None:
# If study config is unspecified, assume that the study already exists.
_get_study(
service_client=service_client,
study_parent=study_parent,
study_id=study_id,
study_should_exist=True,
)
else:
request = (
service_client.projects()
.locations()
.studies()
.create(
body={"study_config": study_config},
parent=study_parent,
studyId=study_id,
)
)
try:
tf.get_logger().info(request.execute())
except errors.HttpError as e:
if e.resp.status != 409: # 409 implies study exists, handled below
raise
_get_study(
service_client=service_client,
study_parent=study_parent,
study_id=study_id,
)
return _VizierClient(service_client, project_id, region, study_id)
def _get_study(
service_client: discovery.Resource,
study_parent: Text,
study_id: Text,
study_should_exist: bool = False,
):
"""Method for loading a study.
Given the study_parent and the study_id, this method will load the specified
study, up to constants.MAX_NUM_TRIES_FOR_STUDIES tries.
Args:
service_client: An API client of Vizier service.
study_parent: Prefix of the study name. The full study name will be
{study_parent}/studies/{study_id}.
study_id: An identifier of the study.
study_should_exist: Indicates whether it should be assumed that the
study with the given study_id exists.
"""
study_name = "{}/studies/{}".format(study_parent, study_id)
tf.get_logger().info(
"Study already exists: {}.\nLoad existing study...".format(study_name))
num_tries = 0
while True:
try:
service_client.projects().locations().studies().get(
name=study_name
).execute()
except errors.HttpError as err:
num_tries += 1
if num_tries >= constants.MAX_NUM_TRIES_FOR_STUDIES:
if (
study_should_exist
and err.resp.status == http.HTTPStatus.NOT_FOUND.value
):
raise ValueError(
"GetStudy failed. Study not found: {}.".format(study_id)
)
else:
raise RuntimeError(
"GetStudy failed. Max retries reached: {0!s}".format(
err
)
)
time.sleep(1) # wait 1 second before trying to get the study again
else:
break
| apache-2.0 | -2,417,612,249,861,651,000 | 36.632692 | 80 | 0.568297 | false |
georgemarshall/django-rest-auth | rest_allauth/app_settings.py | 1 | 1793 | from django.conf import settings
from .account.serializers import (
UserDetailsSerializer as DefaultUserDetailsSerializer,
LoginSerializer as DefaultLoginSerializer,
ChangePasswordSerializer as DefaultChangePasswordSerializer,
SetPasswordSerializer as DefaultSetPasswordSerializer,
EmailSerializer as DefaultEmailSerializer,
ConfirmEmailSerializer as DefaultConfirmEmailSerializer,
ResetPasswordSerializer as DefaultResetPasswordSerializer,
ResetPasswordKeySerializer as DefaultPasswordResetConfirmSerializer,
)
from .serializers import (
TokenSerializer as DefaultTokenSerializer,
)
from .utils import import_callable
serializers = getattr(settings, 'REST_AUTH_SERIALIZERS', {})
UserDetailsSerializer = import_callable(
serializers.get('USER_DETAILS_SERIALIZER', DefaultUserDetailsSerializer)
)
LoginSerializer = import_callable(
serializers.get('LOGIN_SERIALIZER', DefaultLoginSerializer)
)
ChangePasswordSerializer = import_callable(
serializers.get('PASSWORD_CHANGE_SERIALIZER', DefaultChangePasswordSerializer)
)
SetPasswordSerializer = import_callable(
serializers.get('PASSWORD_SET_SERIALIZER', DefaultSetPasswordSerializer)
)
EmailSerializer = import_callable(
serializers.get('EMAIL_SERIALIZER', DefaultEmailSerializer)
)
ConfirmEmailSerializer = import_callable(
serializers.get('CONFIRM_EMAIL_SERIALIZER', DefaultConfirmEmailSerializer)
)
ResetPasswordSerializer = import_callable(
serializers.get('PASSWORD_RESET_SERIALIZER', DefaultResetPasswordSerializer)
)
ResetPasswordKeySerializer = import_callable(
serializers.get('PASSWORD_RESET_CONFIRM_SERIALIZER', DefaultPasswordResetConfirmSerializer)
)
TokenSerializer = import_callable(
serializers.get('TOKEN_SERIALIZER', DefaultTokenSerializer)
)
| mit | 1,956,809,140,446,849,000 | 31.6 | 95 | 0.819297 | false |
ismk/Python-Examples | temp3.py | 1 | 1914 | import sys
import sqlite3 as lite
import time
con = lite.connect("passwords.db")
cur = con.cursor()
cur.execute("CREATE TABLE if not EXISTS passwords(site VARCHAR(50), username VARCHAR(20), password VARCHAR(20));")
print ("\n***** Welcome to Passwords.py. *****\n\n")
choice = None
while choice == None:
print ("What would you like to do?")
print ("1.) Add new password.")
print ("2.) Find existing password.")
print ("3.) Update existing password.")
print ("4.) View all passwords.")
print ("5.) Quit.")
choice = input("> ")
if choice == "1":
s = input("Which website to add? ")
name = input("Username to add? ")
passwd = input("Password to add? ")
cur.execute("INSERT INTO passwords VALUES (?,?,?)", (s,name,passwd,))
time.sleep(1)
print ("\nUpdated.\n")
choice = None
elif choice == "2":
s = input("Find info for which website? ")
cur.execute("SELECT * FROM passwords WHERE site=?", (s,))
while True:
row = cur.fetchone()
if row == None:
break
print ("Website: " + row[0], "\nUsername: " + row[1], "\nPassword: " + row[2] + "\n")
choice = None
elif choice == "3":
s = input("Update info for which website? ")
name = input("New username? ")
passwd = input("New password? ")
cur.execute("UPDATE passwords SET username=?, password=? WHERE site=?", (name, passwd, s,))
time.sleep(1)
print ("\nUpdated.\n")
choice = None
elif choice == "4":
cur.execute("SELECT * FROM passwords")
while True:
row = cur.fetchone()
if row == None:
break
print ("Website: " + row[0], "\nUsername: " + row[1], "\nPassword: " + row[2] + "\n")
choice = None
elif choice == "5":
print ("\nBye bye\n")
break
else:
print ("Enter 1, 2, 3, or 4.")
choice = None
### cleaning up.
if con:
con.commit()
con.close() | mit | 4,125,055,926,198,433,000 | 19.288889 | 114 | 0.568443 | false |
AnalogJ/lexicon | lexicon/providers/cloudns.py | 1 | 8480 | """Provider module for CloudNS"""
import logging
import requests
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ["cloudns.net"]
def provider_parser(subparser):
"""Configure provider parser for CloudNS"""
identity_group = subparser.add_mutually_exclusive_group()
identity_group.add_argument("--auth-id", help="specify user id for authentication")
identity_group.add_argument(
"--auth-subid", help="specify subuser id for authentication"
)
identity_group.add_argument(
"--auth-subuser", help="specify subuser name for authentication"
)
subparser.add_argument(
"--auth-password", help="specify password for authentication"
)
subparser.add_argument("--weight", help="specify the SRV record weight")
subparser.add_argument("--port", help="specify the SRV record port")
class Provider(BaseProvider):
"""Provider class for CloudNS"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self.api_endpoint = "https://api.cloudns.net"
def _authenticate(self):
payload = self._get("/dns/get-zone-info.json", {"domain-name": self.domain})
self.domain_id = payload["name"]
LOGGER.debug("authenticate: %s", payload)
def _create_record(self, rtype, name, content):
# Skip execution if such a record already exists
existing_records = self._list_records(rtype, name, content)
if existing_records:
return True
# Build parameters for adding a new record
params = {
"domain-name": self.domain_id,
"record-type": rtype,
"host": self._relative_name(name),
"record": content,
}
if self._get_lexicon_option("ttl"):
params["ttl"] = self._get_lexicon_option("ttl")
if self._get_lexicon_option("priority"):
params["priority"] = self._get_lexicon_option("priority")
if self._get_provider_option("weight"):
params["weight"] = self._get_lexicon_option("weight")
if self._get_provider_option("port"):
params["port"] = self._get_lexicon_option("port")
# Add new record by calling the ClouDNS API
payload = self._post("/dns/add-record.json", params)
LOGGER.debug("create_record: %s", payload)
# Error handling is already covered by self._request
return True
def _list_records(self, rtype=None, name=None, content=None):
# Build parameters to make use of the built-in API filtering
params = {"domain-name": self.domain_id}
if rtype:
params["type"] = rtype
if name:
params["host"] = self._relative_name(name)
# Fetch and parse all records for the given zone
payload = self._get("/dns/records.json", params)
payload = payload if not isinstance(payload, list) else {}
records = []
for record in payload.values():
records.append(
{
"type": record["type"],
"name": self._full_name(record["host"]),
"ttl": record["ttl"],
"content": record["record"],
"id": record["id"],
}
)
# Filter by content manually as API does not support that
if content:
records = [record for record in records if record["content"] == content]
# Print records as debug output and return them
LOGGER.debug("list_records: %s", records)
return records
def _update_record(self, identifier, rtype=None, name=None, content=None):
# Try to find record if no identifier was specified
if not identifier:
identifier = self._find_record_identifier(rtype, name, None)
# Build parameters for updating an existing record
params = {"domain-name": self.domain_id, "record-id": identifier}
if name:
params["host"] = self._relative_name(name)
if content:
params["record"] = content
if self._get_lexicon_option("ttl"):
params["ttl"] = self._get_lexicon_option("ttl")
if self._get_lexicon_option("priority"):
params["priority"] = self._get_lexicon_option("priority")
if self._get_provider_option("weight"):
params["weight"] = self._get_provider_option("weight")
if self._get_provider_option("port"):
params["port"] = self._get_provider_option("port")
# Update existing record by calling the ClouDNS API
payload = self._post("/dns/mod-record.json", params)
LOGGER.debug("update_record: %s", payload)
# Error handling is already covered by self._request
return True
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
# Try to find record if no identifier was specified
delete_record_id = []
if not identifier:
records = self._list_records(rtype, name, content)
delete_record_id = [record["id"] for record in records]
else:
delete_record_id.append(identifier)
LOGGER.debug("delete_records: %s", delete_record_id)
for record_id in delete_record_id:
# Delete existing record by calling the ClouDNS API
self._post(
"/dns/delete-record.json",
{"domain-name": self.domain_id, "record-id": record_id},
)
LOGGER.debug("delete_record: %s", True)
# Error handling is already covered by self._request
return True
def _build_authentication_data(self):
if not self._get_provider_option("auth_password"):
raise Exception(
"No valid authentication data passed, expected: auth-password"
)
if self._get_provider_option("auth_id"):
return {
"auth-id": self._get_provider_option("auth_id"),
"auth-password": self._get_provider_option("auth_password"),
}
if self._get_provider_option("auth_subid"):
return {
"sub-auth-id": self._get_provider_option("auth_subid"),
"auth-password": self._get_provider_option("auth_password"),
}
if self._get_provider_option("auth_subuser"):
return {
"sub-auth-user": self._get_provider_option("auth_subuser"),
"auth-password": self._get_provider_option("auth_password"),
}
if (
self._get_provider_option("auth_id")
or self._get_provider_option("auth_subid")
or self._get_provider_option("auth_subuser")
):
# All the options were passed with a fallback value, return an empty dictionary.
return {}
raise Exception(
"No valid authentication data passed, expected: auth-id, auth-subid, auth-subuser"
)
def _find_record_identifier(self, rtype, name, content):
records = self._list_records(rtype, name, content)
LOGGER.debug("records: %s", records)
if len(records) == 1:
return records[0]["id"]
raise Exception("Record identifier could not be found.")
def _request(self, action="GET", url="/", data=None, query_params=None):
# Set default values for missing arguments
data = data if data else {}
query_params = query_params if query_params else {}
# Merge authentication data into request
if action == "GET":
query_params.update(self._build_authentication_data())
else:
data.update(self._build_authentication_data())
# Fire request against ClouDNS API and parse result as JSON
response = requests.request(
action, self.api_endpoint + url, params=query_params, data=data
)
response.raise_for_status()
payload = response.json()
# Check ClouDNS specific status code and description
if (
"status" in payload
and "statusDescription" in payload
and payload["status"] != "Success"
):
raise Exception(
"ClouDNS API request has failed: " + payload["statusDescription"]
)
# Return payload
return payload
| mit | -3,725,754,271,118,477,000 | 37.371041 | 94 | 0.58691 | false |
spectralDNS/shenfun | docs/paper/CG/CGpaper_dirichlet.py | 1 | 8842 | """
This script has been used to compute the Dirichlet results of the paper
Efficient spectral-Galerkin methods for second-order equations using different Chebyshev bases
The results have been computed using Python 3.9 and Shenfun 3.1.1.
The generalized Chebyshev-Tau results are computed with dedalus,
and are as such not part of this script.
"""
import sympy as sp
import numpy as np
import scipy.sparse.linalg as lin
import array_to_latex as a2l
from time import time
x = sp.Symbol('x', real=True)
fe = {}
rnd = {}
func = {}
def matvec(u_hat, f_hat, A, B, alpha, method):
"""Compute matrix vector product
Parameters
----------
u_hat : Function
The solution array
f_hat : Function
The right hand side array
A : SparseMatrix
The stiffness matrix
B : SparseMatrix
The mass matrix
alpha : number
The weight of the mass matrix
method : int
The chosen method
"""
from shenfun import chebyshev, la
if method == 1:
if alpha == 0:
A.scale *= -1
f_hat = A.matvec(u_hat, f_hat)
A.scale *= -1
else:
sol = chebyshev.la.Helmholtz(A, B, -1, alpha)
f_hat = sol.matvec(u_hat, f_hat)
else:
if alpha == 0:
A.scale *= -1
f_hat = A.matvec(u_hat, f_hat)
A.scale *= -1
else:
M = alpha*B - A
f_hat = M.matvec(u_hat, f_hat)
return f_hat
def get_solver(A, B, alpha, method):
"""Return optimal solver for given method
Parameters
----------
A : SparseMatrix
The stiffness matrix
B : SparseMatrix
The mass matrix
alpha : number
The weight of the mass matrix
method : int
The chosen method
"""
from shenfun import chebyshev, la
if method == 2:
if alpha == 0:
sol = la.TDMA(A*(-1))
else:
sol = la.PDMA(alpha*B - A)
elif method == 1:
if alpha == 0:
A.scale = -1
sol = chebyshev.la.ADD_Solve(A)
else:
sol = chebyshev.la.Helmholtz(A, B, -1, alpha)
elif method in (0, 3, 4):
if alpha == 0:
sol = chebyshev.la.TwoDMA(A*(-1))
else:
sol = chebyshev.la.FDMA(alpha*B-A)
elif method == 5:
if alpha == 0:
AA = A*(-1)
sol = AA.solve
else:
sol = la.TDMA(alpha*B-A)
else:
raise NotImplementedError
return sol
def solve(f_hat, u_hat, A, B, alpha, method):
"""Solve (alpha*B-A)u_hat = f_hat
Parameters
----------
f_hat : Function
The right hand side array
u_hat : Function
The solution array
A : SparseMatrix
The stiffness matrix
B : SparseMatrix
The mass matrix
alpha : number
The weight of the mass matrix
method : int
The chosen method
"""
from shenfun import extract_bc_matrices, Function
if isinstance(B, list):
u_hat.set_boundary_dofs()
bc_mat = extract_bc_matrices([B])
B = B[0]
w0 = Function(u_hat.function_space())
f_hat -= alpha*bc_mat[0].matvec(u_hat, w0)
sol = get_solver(A, B, alpha, method)
if method == 1 and alpha != 0:
u_hat = sol(u_hat, f_hat)
else:
u_hat = sol(f_hat, u_hat)
return u_hat
def main(N, method=0, alpha=0, returntype=0):
from shenfun import FunctionSpace, TrialFunction, TestFunction, \
inner, div, grad, chebyshev, SparseMatrix, Function, Array
global fe
basis = {0: ('ShenDirichlet', 'Heinrichs'),
1: ('ShenDirichlet', 'ShenDirichlet'),
2: ('Heinrichs', 'Heinrichs'),
3: ('DirichletU', 'ShenDirichlet'),
4: ('Orthogonal', 'ShenDirichlet'), # Quasi-Galerkin
5: ('ShenDirichlet', 'ShenDirichlet'), # Legendre
}
test, trial = basis[method]
if returntype == 2:
ue = sp.sin(100*sp.pi*x)
family = 'C' if method < 5 else 'L'
kw = {}
scaled = True if method in (0, 5) else False
if scaled:
kw['scaled'] = True
ST = FunctionSpace(N, family, basis=test, **kw)
TS = FunctionSpace(N, family, basis=trial, **kw)
wt = {0: 1, 1: 1, 2: 1, 3: 1-x**2, 4: 1, 5: 1}[method]
u = TrialFunction(TS)
v = TestFunction(ST)
A = inner(v*wt, div(grad(u)))
B = inner(v*wt, u)
if method == 4:
# Quasi
Q2 = chebyshev.quasi.QIGmat(N)
A = Q2*A
B = Q2*B
if method == 3:
k = np.arange(N-2)
K = SparseMatrix({0: 1/((k+1)*(k+2)*2)}, (N-2, N-2))
A[0] *= K[0]
A[2] *= K[0][:-2]
B[-2] *= K[0][2:]
B[0] *= K[0]
B[2] *= K[0][:-2]
B[4] *= K[0][:-4]
if returntype == 0:
M = alpha*B.diags()-A.diags()
con = np.linalg.cond(M.toarray())
elif returntype == 1:
# Use rnd to get the same random numbers for all methods
buf = rnd.get(N, np.random.random(N))
if not N in rnd:
rnd[N] = buf
v = Function(TS, buffer=buf)
v[-2:] = 0
u_hat = Function(TS)
f_hat = Function(TS)
f_hat = matvec(v, f_hat, A, B, alpha, method)
u_hat = solve(f_hat, u_hat, A, B, alpha, method)
con = np.abs(u_hat-v).max()
elif returntype == 2:
fe = alpha*ue - ue.diff(x, 2)
f_hat = Function(ST)
fj = Array(ST, buffer=fe)
if wt != 1:
fj *= np.sin((np.arange(N)+0.5)*np.pi/N)**2
f_hat = ST.scalar_product(fj, f_hat, fast_transform=True)
if method == 4:
f_hat[:-2] = Q2.diags('csc')*f_hat
if method == 3:
f_hat[:-2] *= K[0]
sol = get_solver(A, B, alpha, method)
u_hat = Function(TS)
u_hat = solve(f_hat, u_hat, A, B, alpha, method)
uj = Array(TS)
uj = TS.backward(u_hat, uj, fast_transform=True)
ua = Array(TS, buffer=ue)
con = np.sqrt(inner(1, (uj-ua)**2))
return con
if __name__ == '__main__':
import matplotlib.pyplot as plt
import argparse
import os
import sys
parser = argparse.ArgumentParser(description='Solve the Helmholtz problem with Dirichlet boundary conditions')
parser.add_argument('--return_type', action='store', type=int, required=True)
parser.add_argument('--include_legendre', action='store_true')
parser.add_argument('--verbose', '-v', action='count', default=0)
parser.add_argument('--plot', action='store_true')
parser.add_argument('--numba', action='store_true')
args = parser.parse_args()
if args.numba:
try:
import numba
os.environ['SHENFUN_OPTIMIZATION'] = 'NUMBA'
except ModuleNotFoundError:
os.warning('Numba not found - using Cython')
cond = []
if args.return_type == 2:
N = (2**4,2**6, 2**8, 2**12, 2**16, 2**20)
elif args.return_type == 1:
N = (2**4, 2**12, 2**20)
else:
N = (32, 64, 128, 256, 512, 1024, 2048)
M = 6 if args.include_legendre else 5
alphas = (0, 1000)
if args.return_type in (0, 2):
for alpha in alphas:
cond.append([])
if args.verbose > 0:
print('alpha =', alpha)
for basis in range(M): # To include Legendre use --include_legendre (takes hours for N=2**20)
if args.verbose > 1:
print('Method =', basis)
cond[-1].append([])
for n in N:
if args.verbose > 2:
print('N =', n)
cond[-1][-1].append(main(n, basis, alpha, args.return_type))
linestyle = {0: 'solid', 1: 'dashed', 2: 'dotted'}
for i in range(len(cond)):
plt.loglog(N, cond[i][0], 'b',
N, cond[i][1], 'r',
N, cond[i][2], 'k',
N, cond[i][3], 'm',
N, cond[i][4], 'y',
linestyle=linestyle[i])
if args.include_legendre:
plt.loglog(N, cond[i][5], 'y', linestyle=linestyle[i])
a2l.to_ltx(np.array(cond)[i], frmt='{:6.2e}', print_out=True, mathform=False)
else:
for basis in range(M):
cond.append([])
if args.verbose > 1:
print('Method =', basis)
for alpha in alphas:
if args.verbose > 0:
print('alpha =', alpha)
for n in N:
if args.verbose > 2:
print('N =', n)
cond[-1].append(main(n, basis, alpha, args.return_type))
a2l.to_ltx(np.array(cond), frmt='{:6.2e}', print_out=True, mathform=False)
if args.plot:
plt.show()
| bsd-2-clause | -6,614,167,239,419,566,000 | 28.972881 | 114 | 0.513345 | false |
Zearin/python-parsimonious | parsimonious/exceptions.py | 1 | 1517 | class BadGrammar(Exception):
"""The rule definitions passed to Grammar contain syntax errors."""
class VisitationError(Exception):
"""Something went wrong while traversing a parse tree.
This exception exists to augment an underlying exception with information
about where in the parse tree the error occurred. Otherwise, it could be
tiresome to figure out what went wrong; you'd have to play back the whole
tree traversal in your head.
"""
# TODO: Make sure this is pickleable. Probably use @property pattern. Make
# the original exc and node available on it if they don't cause a whole
# raft of stack frames to be retained.
def __init__(self, exc, exc_class, node):
"""Construct.
:arg exc: What went wrong. We wrap this and add more info.
:arg node: The node at which the error occurred
"""
self.original_class = exc_class
super(VisitationError, self).__init__(
'%s: %s\n\n'
'Parse tree:\n'
'%s' %
(exc_class.__name__,
exc,
node.prettily(error=node)))
class UndefinedLabel(VisitationError):
"""A rule referenced in a grammar was never defined.
Circular references and forward references are okay, but you have to define
stuff at some point.
"""
def __init__(self, label):
self.label = label
def __unicode__(self):
return u'The label "%s" was never defined.' % self.label
__str__ = __unicode__
| mit | -4,155,696,561,795,561,500 | 31.276596 | 79 | 0.632169 | false |
bricetebbs/signup | urls.py | 1 | 1081 | from django.conf.urls import patterns, url
urlpatterns = patterns('',
url (
regex = '^email/$',
view = 'signup.views.signup_email',
name = 'signup_email'
),
url (
regex = '^logout/$',
view = 'signup.views.signup_logout',
name = 'signup_logout'
),
url (
regex = r'^$',
view = 'signup.views.signup_login',
name = 'signup_login'
),
url (
regex = '^login/$',
view = 'signup.views.signup_login',
name = 'signup_login'
),
url (
regex = '^login/email/(?P<user_token>[-\w]+)/(?P<key_token>[-\w]+)/$',
view = 'signup.views.signup_login_by_email',
name = 'signup_login_by_email'
),
url (
regex = '^change_username/$',
view = 'signup.views.signup_change_username_and_password',
name = 'signup_change_username_and_password'
),
url (
regex = '^change_password/$',
view = 'signup.views.signup_change_password',
name = 'signup_change_password'
),
) | mit | -8,363,420,443,017,328,000 | 26.74359 | 78 | 0.506013 | false |
EnviroCentre/patterns-toolbox | setup.py | 1 | 1042 | # -*- coding: utf-8 -*-
# Copyright 2015 Florenz A. P. Hollebrandse
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from distutils.core import setup
import version
setup(
name='patterntoolbox',
version=version.update()['str'],
packages=[
'herringbone',
'patterntoolbox'
],
package_data={'patterntoolbox': ['esri/toolboxes/*.*']},
license='Apache v2.0',
author='Florenz A. P. Hollebrandse',
author_email='[email protected]',
description='ArcGIS toolbox to create patterns'
)
| apache-2.0 | -1,147,717,983,915,998,200 | 30.575758 | 74 | 0.711132 | false |
di-unipi-socc/tosKer | tosker/docker_interface.py | 1 | 9261 | '''
Docker interface module
'''
import os
from functools import wraps
from os import path
import six
from docker import APIClient, errors
from .graph.artifacts import Dockerfile
from .graph.nodes import Container, Root, Volume
from .helper import Logger
_cli = None
def _get_name(func):
@wraps(func)
def func_wrapper(*args, **kwds):
if isinstance(args[0], six.string_types):
return func(*args, **kwds)
else:
assert isinstance(args[0], (Container, Volume))
return func(args[0].full_name, *args[1:], **kwds)
return func_wrapper
def _inject_docker_cli(func):
@wraps(func)
def func_wrapper(*args, **kwds):
global _cli
if _cli is None:
_cli = APIClient(base_url=os.environ.get('DOCKER_HOST'))
return func(_cli, *args, **kwds)
return func_wrapper
def _get_tmp_dir(node):
assert isinstance(node, Root)
return path.join(node.tpl.tmp_dir, node.name)
@_inject_docker_cli
def create_container(_cli,
con,
cmd=None,
entrypoint=None,
from_saved=False,
force=False):
_log = Logger.get(__name__)
def create():
tmp_dir = _get_tmp_dir(con)
try:
os.makedirs(tmp_dir)
except Exception:
pass
img_name = con.image.format
_log.debug('image_name: %s', img_name)
if from_saved:
saved_image = get_saved_image(con)
if inspect_image(saved_image):
img_name = saved_image
con.id = _cli.create_container(
name=con.full_name,
image=img_name,
user='root',
entrypoint=entrypoint,
command=cmd if cmd else con.cmd,
environment=con.env,
detach=True,
# stdin_open=True,
ports=[key for key in con.ports.keys()]
if con.ports else None,
volumes=['/tmp/dt'] + [k for k, v in con.volume] +\
[k for k in con.share_data.keys()],
networking_config=_cli.create_networking_config({
_get_app_name(con): _cli.create_endpoint_config(
links=con.connection
)}),
host_config=_cli.create_host_config(
port_bindings=con.ports,
binds=[tmp_dir + ':/tmp/dt'] +\
[v + ':' + k for k, v in con.volume] +\
['{}:{}'.format(v, k) for k, v in con.share_data.items()],
)
).get('Id')
assert isinstance(con, Container)
if isinstance(con.image, Dockerfile):
_log.debug('start building..')
build_image(con)
_log.debug('stop building..')
elif not from_saved:
_log.debug('start pulling.. %s', con.image)
# helper.print_json(
_cli.pull(con.image.format)
# , _log.debug)
_log.debug('end pulling..')
try:
create()
except errors.APIError as e:
if force:
delete_container(con)
create()
else:
_log.debug(e)
raise e
# @_inject_docker_cli
# def pull_image(_cli, image):
# assert isinstance(image, six.string_types)
# _cli.pull(image)
@_get_name
@_inject_docker_cli
def stop_container(_cli, name):
_log = Logger.get(__name__)
try:
_cli.stop(name)
except errors.NotFound as e:
_log.error(e)
raise e
@_get_name
@_inject_docker_cli
def start_container(_cli, name, wait=False):
_log = Logger.get(__name__)
try:
_cli.start(name)
if wait:
_log.debug('wait container..')
_cli.wait(name)
# helper.print_byte(
# _cli.logs(name, stream=True),
# _log.debug
# )
_log.debug('end wait container..')
except errors.NotFound as e:
_log.error(e)
raise e
@_get_name
@_inject_docker_cli
def delete_container(_cli, name, force=False):
_log = Logger.get(__name__)
try:
_cli.remove_container(name, v=True, force=force)
except (errors.NotFound, errors.APIError) as e:
_log.error(e)
raise e
@_get_name
@_inject_docker_cli
def exec_cmd(_cli, name, cmd, detach=False):
_log = Logger.get(__name__)
if not is_running(name):
raise Exception('{} is not running'.format(name))
try:
exec_id = _cli.exec_create(name, cmd,
#stdout=False,
#stderr=False
)
status = _cli.exec_start(exec_id, detach=detach)
_log.debug(status)
check = 'rpc error:' != status[:10].decode("utf-8")
_log.debug('check: %s', check)
if not check:
raise errors.APIError
except errors.APIError as e:
_log.error(e)
raise e
@_inject_docker_cli
def build_image(_cli, node):
assert isinstance(node, Container)
# helper.print_json(
return _cli.build(
path='/'.join(node.image.dockerfile.split('/')[0:-1]),
dockerfile='./' + node.image.dockerfile.split('/')[-1],
tag=node.image.name,
pull=True,
quiet=True
)
# )
@_inject_docker_cli
def create_volume(_cli, volume):
assert isinstance(volume, Volume)
_log = Logger.get(__name__)
_log.debug('volume opt: %s', volume.get_all_opt())
return _cli.create_volume(
volume.full_name, 'local', volume.get_all_opt()
)
@_get_name
@_inject_docker_cli
def delete_volume(_cli, name):
return _cli.remove_volume(name)
@_inject_docker_cli
def get_containers(_cli, all=False):
return _cli.containers(all=all)
@_inject_docker_cli
def get_volumes(_cli):
volumes = _cli.volumes()
return volumes['Volumes'] or []
@_inject_docker_cli
def get_images(_cli, name=None):
return _cli.images(name=name)
def inspect(item):
return (inspect_image(item) or
inspect_container(item) or
inspect_volume(item))
@_inject_docker_cli
def inspect_image(_cli, name):
assert isinstance(name, six.string_types)
try:
return _cli.inspect_image(name)
except errors.NotFound:
return None
@_get_name
@_inject_docker_cli
def inspect_container(_cli, name):
try:
return _cli.inspect_container(name)
except errors.NotFound:
return None
@_get_name
@_inject_docker_cli
def inspect_volume(_cli, name):
try:
return _cli.inspect_volume(name)
except errors.NotFound:
return None
def remove_all_containers():
for c in get_containers(all=True):
stop_container(c['Id'])
delete_container(c['Id'])
def remove_all_volumes():
for v in get_volumes():
delete_volume(v['Name'])
@_inject_docker_cli
def create_network(_cli, name, subnet='172.25.0.0/16'):
_log = Logger.get(__name__)
# docker network create -d bridge --subnet 172.25.0.0/16 isolated_nw
# self.delete_network(name)
try:
_cli.create_network(name=_get_app_name(name),
driver='bridge',
ipam={'subnet': subnet},
check_duplicate=True)
except errors.APIError:
_log.debug('network already exists!')
@_inject_docker_cli
def delete_network(_cli, name):
_log = Logger.get(__name__)
try:
_cli.remove_network(_get_app_name(name))
except errors.APIError:
_log.debug('network not exists!')
@_inject_docker_cli
def delete_image(_cli, name):
assert isinstance(name, six.string_types)
try:
_cli.remove_image(name)
except errors.NotFound:
pass
@_inject_docker_cli
def update_container(_cli, node, cmd):
assert isinstance(node, Container)
stat = inspect_image(node.image.format)
old_cmd = stat['Config']['Cmd'] or None
old_entry = stat['Config']['Entrypoint'] or None
# if is_running(node):
# stop_container(node)
# delete_container(node)
create_container(node,
cmd=cmd,
entrypoint='',
from_saved=True,
force=True)
start_container(node.id, wait=True)
# stop_container(node.id)
_cli.commit(node.id, get_saved_image(node))
# stop_container(node)
# delete_container(node)
create_container(node,
cmd=node.cmd or old_cmd,
entrypoint=old_entry,
from_saved=True,
force=True)
_cli.commit(node.id, get_saved_image(node))
def is_running(container):
_log = Logger.get(__name__)
stat = inspect_container(container)
stat = stat is not None and stat['State']['Running'] is True
_log.debug('State: %s', stat)
return stat
def get_saved_image(node):
assert isinstance(node, Container)
return '{}/{}'.format(_get_app_name(node), node.name.lower())
def _get_app_name(node):
assert isinstance(node, (six.string_types, Container))
if isinstance(node, Container):
node = node.tpl.name
return 'tosker_{}'.format(node.lower())
# @property
# def tmp_dir(self):
# return self._tmp_dir
| mit | -3,849,401,497,029,572,600 | 24.725 | 80 | 0.559875 | false |
Coldwings/Campanella-rapidfork | rapidfork/views/base.py | 1 | 4552 | # coding:utf-8
from datetime import datetime
import decimal
import json
import six
import logging
import traceback
from tornado import escape
from tornado.web import RequestHandler, HTTPError
def tojson(data, ensure_ascii=True, default=False, **kwargs):
def serializable(obj):
if isinstance(obj, decimal.Decimal):
return float(obj)
elif isinstance(obj, datetime):
return obj.strftime("%Y-%m-%d %H:%M:%S")
raise TypeError
_default = serializable if default else None
return json.dumps(data,
ensure_ascii=ensure_ascii,
default=_default,
separators=(',', ':'),
**
kwargs).replace("</", "<\\/")
class RESTfulHandler(RequestHandler):
def data_received(self, chunk):
pass
def check_xsrf_cookie(self):
""" RESTful 禁用 XSRF 保护机制 """
pass
def finish(self, chunk=None, message=None):
if chunk is None:
chunk = {}
if isinstance(chunk, dict):
chunk = {"code": self._status_code, "content": chunk}
if message:
chunk["message"] = message
chunk = tojson(chunk, default=True, ensure_ascii=False)
else:
chunk = six.text_type(chunk)
callback = escape.utf8(self.get_argument("callback", None))
if callback:
self.set_header("Content-Type", "application/x-javascript")
setattr(self, '_write_buffer', [callback, "(", chunk, ")"] if chunk else [])
super(RESTfulHandler, self).finish()
else:
self.set_header("Content-Type", "application/json; charset=UTF-8")
super(RESTfulHandler, self).finish(chunk)
def write_error(self, status_code, **kwargs):
"""覆盖自定义错误."""
debug = self.settings.get("debug", False)
try:
exc_info = kwargs.pop('exc_info')
e = exc_info[1]
if isinstance(e, RESTfulHTTPError):
pass
elif isinstance(e, HTTPError):
e = RESTfulHTTPError(e.status_code)
else:
e = RESTfulHTTPError(500)
exception = "".join([ln for ln in traceback.format_exception(
*exc_info)])
if status_code == 500 and not debug:
pass
if debug:
e.error["exception"] = exception
self.clear()
self.set_status(200) # 使 RESTful 接口错误总是返回成功(200 OK)
self.set_header("Content-Type", "application/json; charset=UTF-8")
self.finish(six.text_type(e))
except Exception:
logging.error(traceback.format_exc())
return super(RESTfulHandler, self).write_error(status_code, **
kwargs)
class RESTfulHTTPError(HTTPError):
""" API 错误异常模块:
API服务器产生内部服务器错误时总是向客户返回JSON格式的数据.
"""
_error_types = {
400: "参数错误",
401: "认证失败",
403: "未经授权",
404: "终端错误",
405: "未许可的方法",
500: "服务器错误"
}
def __init__(self, status_code=400, error_detail="", error_type="", content="", log_message=None, *args):
super(RESTfulHTTPError, self).__init__(int(status_code), log_message, *
args)
self.error_detail = error_detail
self.error = {'type': error_type} if error_type else {
'type': self._error_types.get(self.status_code, "未知错误")
}
self.content = content if content else {}
def __str__(self):
message = {"code": self.status_code}
self._set_message(message, ["error", "content"])
if self.error_detail:
message["error"]["detail"] = self.error_detail
return tojson(message, default=True, ensure_ascii=False)
def _set_message(self, err, names):
for name in names:
v = getattr(self, name)
if v:
err[name] = v
class DefaultRESTfulHandler(RESTfulHandler):
""" 不存在的RESTfultHandler请求都返回JSON格式404错误
*** 在相应的urls最末行设置如(r".*", DefaultRESTfulHandler)路由即可
"""
def prepare(self):
super(DefaultRESTfulHandler, self).prepare()
raise RESTfulHTTPError(404)
| apache-2.0 | 4,599,085,205,981,180,400 | 33.047244 | 109 | 0.547641 | false |
JeffRoy/mi-dataset | mi/dataset/driver/cg_dcl_eng/dcl/test/test_cg_dcl_eng_dcl_recovered_driver.py | 1 | 1070 | #!/home/mworden/uframes/ooi/uframe-1.0/python/bin/python
__author__ = 'mworden'
from mi.core.log import get_logger
log = get_logger()
from mi.idk.config import Config
import unittest
import os
from mi.dataset.driver.cg_dcl_eng.dcl.cg_dcl_eng_dcl_recovered_driver import parse
from mi.dataset.dataset_driver import ParticleDataHandler
class DriverTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_one(self):
sourceFilePath = os.path.join('mi','dataset','driver','cg_dcl_eng','dcl','resource',
'20140915.syslog.log')
particle_data_hdlr_obj = ParticleDataHandler()
particle_data_hdlr_obj = parse(Config().base_dir(), sourceFilePath, particle_data_hdlr_obj)
log.debug("SAMPLES: %s", particle_data_hdlr_obj._samples)
log.debug("FAILURE: %s", particle_data_hdlr_obj._failure)
self.assertEquals(particle_data_hdlr_obj._failure, False)
if __name__ == '__main__':
test = DriverTest('test_one')
test.test_one() | bsd-2-clause | -7,268,057,656,039,230,000 | 24.5 | 99 | 0.654206 | false |
atados/api | atados_core/models/legacy_address.py | 1 | 3608 | import pytz
from django.db import models
from datetime import datetime
from pygeocoder import Geocoder
from django.utils.translation import ugettext_lazy as _
from address import GoogleAddress
class State(models.Model):
name = models.CharField(_('name'), max_length=30)
code = models.CharField(_('code'), max_length=2)
def __unicode__(self):
return self.name
class Meta:
app_label = 'atados_core'
verbose_name = _('state')
class City(models.Model):
name = models.CharField(_('name'), max_length=50)
state = models.ForeignKey(State)
active = models.BooleanField(_("City where Atados is present."), default=False)
highlight = models.BooleanField(_("Highlight this city when listing cities"), default=False)
def __unicode__(self):
return '%s, %s' % (self.name, self.state.code)
class Meta:
app_label = 'atados_core'
verbose_name = _('city')
verbose_name_plural = _('cities')
class Address(models.Model):
zipcode = models.CharField(_('Zip code'), max_length=10,
blank=True, null=True, default=None)
addressline = models.CharField(_('Street'), max_length=200,
blank=True, null=True, default=None)
addressnumber = models.CharField(_('Address number'), max_length=10,
blank=True, null=True, default=None)
addressline2 = models.CharField(_('Apt, PO Box, block'), max_length=100,
blank=True, null=True, default=None)
neighborhood = models.CharField(_('Neighborhood'), max_length=50,
blank=True, null=True, default=None)
city = models.ForeignKey(City, verbose_name=_('City'), blank=True,
null=True, default=None)
latitude = models.FloatField(blank=False, null=False, default=0.0)
longitude = models.FloatField(blank=False, null=False, default=0.0)
modified_date = models.DateTimeField(auto_now=True)
# Address class is legacy
# Foreign key for conversion to GoogleAddress class
google_address = models.ForeignKey(GoogleAddress, verbose_name=_('Google Address'), blank=True, null=True, default=None)
def save(self, *args, **kwargs):
if self.city and not self.city.id == 0:
try:
results = Geocoder.geocode(self)
self.latitude = results[0].coordinates[0]
self.longitude = results[0].coordinates[1]
except Exception as e:
print e
self.modified_date = datetime.utcnow().replace(tzinfo=pytz.timezone("America/Sao_Paulo"))
return super(Address, self).save(*args, **kwargs)
def __unicode__(self):
address = '';
if self.addressline:
address = self.addressline
if self.addressnumber:
address = '%s, %s' % (address, self.addressnumber)
if self.addressline2:
address = '%s, %s' % (address, self.addressline2)
if self.neighborhood:
address = '%s, %s' % (address, self.neighborhood)
if self.city:
if address == '':
address = '%s, %s' % (self.city.name, self.city.state.code)
else:
address = '%s - %s, %s' % (address, self.city.name, self.city.state.code)
return address
def get_city_state(self):
if self.city:
if self.city.id == 0: # Trabalho a Distancia
return self.city.name
return "%s, %s" % (self.city.name, self.city.state.code)
else:
return ""
class Meta:
app_label = 'atados_core'
verbose_name = _('address')
| mit | -4,876,659,624,860,120,000 | 36.583333 | 124 | 0.606984 | false |
ppyordanov/HCI_4_Future_Cities | Server/src/virtualenv/Lib/site-packages/pip/_vendor/requests/adapters.py | 1 | 14468 | # -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import socket
from .models import Response
from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
from .packages.urllib3.response import HTTPResponse
from .packages.urllib3.util import Timeout as TimeoutSauce
from .compat import urlparse, basestring, urldefrag, unquote
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
prepend_scheme_if_needed, get_auth_from_url)
from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import TimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .packages.urllib3.exceptions import ProxyError as _ProxyError
from .cookies import extract_cookies_to_jar
from .exceptions import ConnectionError, Timeout, SSLError, ProxyError
from .auth import _basic_auth_str
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param int max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed connections and
timeouts, never to requests where the server returns a response.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
self.max_retries = max_retries
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# because self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK):
"""Initializes a urllib3 PoolManager. This method should not be called
from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block)
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Whether we should actually verify the certificate.
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc:
raise Exception("Could not find a suitable SSL CA certificate bundle.")
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
"""
proxies = proxies or {}
proxy = proxies.get(urlparse(url.lower()).scheme)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_headers = self.proxy_headers(proxy)
if not proxy in self.proxy_manager:
self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block)
conn = self.proxy_manager[proxy].connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this just closes the PoolManager, which closes pooled
connections.
"""
self.poolmanager.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes to proxy URLs.
"""
proxies = proxies or {}
scheme = urlparse(request.url).scheme
proxy = proxies.get(scheme)
if proxy and scheme != 'https':
url, _ = urldefrag(request.url)
else:
url = request.path_url
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:param kwargs: Optional additional keyword arguments.
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) The timeout on the request.
:param verify: (optional) Whether to verify SSL certificates.
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=timeout)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
else:
# All is well, return the connection to the pool.
conn._put_conn(low_conn)
except socket.error as sockerr:
raise ConnectionError(sockerr, request=request)
except MaxRetryError as e:
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e, request=request)
elif isinstance(e, TimeoutError):
raise Timeout(e, request=request)
else:
raise
return self.build_response(request, resp)
| mit | -4,050,275,592,329,711,600 | 36.28866 | 97 | 0.600705 | false |
arangodb/arangodb | 3rdParty/V8/v7.9.317/tools/testrunner/local/variants.py | 1 | 2368 | # Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Use this to run several variants of the tests.
ALL_VARIANT_FLAGS = {
"assert_types": [["--assert-types"]],
"code_serializer": [["--cache=code"]],
"default": [[]],
"future": [["--future"]],
"gc_stats": [["--gc-stats=1"]],
# Alias of exhaustive variants, but triggering new test framework features.
"infra_staging": [[]],
"interpreted_regexp": [["--regexp-interpret-all"]],
"jitless": [["--jitless"]],
"minor_mc": [["--minor-mc"]],
# No optimization means disable all optimizations. OptimizeFunctionOnNextCall
# would not force optimization too. It turns into a Nop. Please see
# https://chromium-review.googlesource.com/c/452620/ for more discussion.
# For WebAssembly, we test "Liftoff-only" in the nooptimization variant and
# "TurboFan-only" in the stress variant. The WebAssembly configuration is
# independent of JS optimizations, so we can combine those configs.
"nooptimization": [["--no-opt", "--liftoff", "--no-wasm-tier-up"]],
"slow_path": [["--force-slow-path"]],
"stress": [["--stress-opt", "--always-opt", "--no-liftoff",
"--no-wasm-tier-up", '--stress-lazy-source-positions']],
"stress_js_bg_compile_wasm_code_gc": [["--stress-background-compile",
"--wasm-code-gc",
"--stress-wasm-code-gc"]],
"stress_incremental_marking": [["--stress-incremental-marking"]],
# Trigger stress sampling allocation profiler with sample interval = 2^14
"stress_sampling": [["--stress-sampling-allocation-profiler=16384"]],
"trusted": [["--no-untrusted-code-mitigations"]],
"no_wasm_traps": [["--no-wasm-trap-handler"]],
"turboprop": [["--turboprop"]],
"top_level_await": [["--harmony-top-level-await"]],
}
SLOW_VARIANTS = set([
'stress',
'nooptimization',
])
FAST_VARIANTS = set([
'default'
])
def _variant_order_key(v):
if v in SLOW_VARIANTS:
return 0
if v in FAST_VARIANTS:
return 100
return 50
ALL_VARIANTS = sorted(ALL_VARIANT_FLAGS.keys(),
key=_variant_order_key)
# Check {SLOW,FAST}_VARIANTS entries
for variants in [SLOW_VARIANTS, FAST_VARIANTS]:
for v in variants:
assert v in ALL_VARIANT_FLAGS
| apache-2.0 | 5,822,265,050,243,910,000 | 37.193548 | 79 | 0.638514 | false |
lebabouin/CouchPotatoServer-develop | couchpotato/core/providers/nzb/binnewz/nzbdownloader.py | 1 | 3562 | # Author: Guillaume Serre <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import urllib2
from StringIO import StringIO
import gzip
import cookielib
import time
class NZBDownloader(object):
def __init__( self ):
self.cj = cookielib.CookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
self.lastRequestTime = None
def waitBeforeNextRequest(self):
if self.lastRequestTime and self.lastRequestTime > ( time.mktime(time.localtime()) - 10):
time.sleep( 10 )
self.lastRequestTime = time.gmtime()
def open(self, request):
self.waitBeforeNextRequest()
return self.opener.open(request)
class NZBSearchResult(object):
def __init__(self, downloader, sizeInMegs, refererURL, age, nzbid):
self.downloader = downloader
self.refererURL = refererURL
self.sizeInMegs = sizeInMegs
self.age = age
self.nzbid = nzbid
def readRequest(self, request):
request.add_header('Accept-encoding', 'gzip')
request.add_header('Referer', self.refererURL)
request.add_header('Accept-Encoding', 'gzip')
request.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.57 Safari/537.17')
response = self.downloader.open(request)
if response.info().get('Content-Encoding') == 'gzip':
buf = StringIO( response.read())
f = gzip.GzipFile(fileobj=buf)
return f.read()
else:
return response.read()
def getNZB(self):
pass
class NZBGetURLSearchResult( NZBSearchResult ):
def __init__(self, downloader, nzburl, sizeInMegs, refererURL, age, nzbid):
NZBSearchResult.__init__(self, downloader, sizeInMegs, refererURL, age, nzbid)
self.nzburl = nzburl
def getNZB(self):
request = urllib2.Request( self.nzburl )
self.nzbdata = NZBSearchResult.readRequest( self, request )
return self.nzbdata
class NZBPostURLSearchResult( NZBSearchResult ):
def __init__(self, downloader, nzburl, postData, sizeInMegs, refererURL, age, nzbid):
NZBSearchResult.__init__(self, downloader, sizeInMegs, refererURL, age, nzbid)
self.nzburl = nzburl
self.postData = postData
def getNZB(self):
request = urllib2.Request( self.nzburl, self.postData )
self.nzbdata = NZBSearchResult.readRequest( self, request )
return self.nzbdata
class NZBDataSearchResult( NZBSearchResult ):
def __init__(self, nzbdata, sizeInMegs, refererURL, age, nzbid):
NZBSearchResult.__init__(self, None, refererURL, age, nzbid)
self.nzbdata = nzbdata
def getNZB(self):
return self.nzbdata
| gpl-3.0 | 7,066,434,616,544,077,000 | 35.731959 | 152 | 0.660865 | false |
w1ndy/qtile | libqtile/widget/keyboardkbdd.py | 1 | 3411 | # Copyright (c) 2015 Ali Mousavi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from . import base
from dbus.mainloop.glib import DBusGMainLoop
import re
import subprocess
import dbus
class KeyboardKbdd(base.InLoopPollText):
"""
Widget for changing keyboard layouts per window, using kbdd.
kbdd should be installed and running, you can get it from:
https://github.com/qnikst/kbdd
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
("update_interval", 1, "Update time in seconds."),
("configured_keyboards", ["us", "ir"],
"your predefined list of keyboard layouts."
"example: ['us', 'ir', 'es']"),
("colours", None,
"foreground colour for each layout"
"either 'None' or a list of colours."
"example: ['ffffff', 'E6F0AF']. ")
]
def __init__(self, **config):
base.InLoopPollText.__init__(self, **config)
self.add_defaults(KeyboardKbdd.defaults)
self.keyboard = self.configured_keyboards[0]
if not self._check_kbdd():
self.keyboard = "N/A"
self._dbus_init()
def _check_kbdd(self):
s = subprocess.Popen(["ps", "axw"], stdout=subprocess.PIPE)
stdout = s.communicate()[0]
if re.search("kbdd", stdout):
return True
self.log.error('Please, check that kbdd is running')
return False
def _dbus_init(self):
dbus_loop = DBusGMainLoop()
bus = dbus.SessionBus(mainloop=dbus_loop)
bus.add_signal_receiver(self._layout_changed,
dbus_interface='ru.gentoo.kbdd',
signal_name='layoutChanged')
def _layout_changed(self, layout_changed):
"""
Hanldler for "layoutChanged" dbus signal.
"""
if self.colours:
self._set_colour(layout_changed)
self.keyboard = self.configured_keyboards[layout_changed]
def _set_colour(self, index):
if isinstance(self.colours, list):
try:
self.layout.colour = self.colours[index]
except ValueError:
self._setColour(index - 1)
else:
self.log.error('variable "colours" should be a list, to set a\
colour for all layouts, use "foreground".')
def poll(self):
return self.keyboard
| mit | 3,760,122,123,004,868,600 | 37.325843 | 79 | 0.640575 | false |
patchew-project/patchew | tests/test_mbox.py | 1 | 3040 | #!/usr/bin/env python3
#
# Copyright 2017 Red Hat, Inc.
#
# Authors:
# Fam Zheng <[email protected]>
#
# This work is licensed under the MIT License. Please see the LICENSE file or
# http://opensource.org/licenses/MIT.
import mbox
from .patchewtest import PatchewTestCase, main
class MboxTest(PatchewTestCase):
def test_multipart_in_multipart(self):
expected = """
On 07/25/2017 10:57 AM, Jeff Cody wrote:
> Signed-off-by: Jeff Cody <[email protected]>
> ---
> redhat/build_configure.sh | 3 +++
> redhat/qemu-kvm.spec.template | 7 +++++++
> 2 files changed, 10 insertions(+)
>
ACK
--
Eric Blake, Principal Software Engineer
Red Hat, Inc. +1-919-301-3266
Virtualization: qemu.org | libvirt.org
""".strip()
dp = self.get_data_path("0016-nested-multipart.mbox.gz")
with open(dp, "r") as f:
msg = mbox.MboxMessage(f.read())
self.assertEqual(msg.get_body().strip(), expected)
def test_mime_word_recipient(self):
dp = self.get_data_path("0018-mime-word-recipient.mbox.gz")
with open(dp, "r") as f:
msg = mbox.MboxMessage(f.read())
utf8_recipient = msg.get_cc()[1]
self.assertEqual(utf8_recipient[0], "Philippe Mathieu-Daudé")
self.assertEqual(utf8_recipient[1], "[email protected]")
def test_mode_only_patch(self):
dp = self.get_data_path("0021-mode-only-patch.mbox.gz")
with open(dp, "r") as f:
msg = mbox.MboxMessage(f.read())
self.assertTrue(msg.is_patch())
def test_rename_only_patch(self):
dp = self.get_data_path("0034-rename-only-patch.mbox.gz")
with open(dp, "r") as f:
msg = mbox.MboxMessage(f.read())
self.assertTrue(msg.is_patch())
def test_raw_diff(self):
dp = self.get_data_path("0033-raw-diff.mbox.gz")
with open(dp, "r") as f:
msg = mbox.MboxMessage(f.read())
self.assertTrue(msg.is_patch())
def test_get_json(self):
dp = self.get_data_path("0001-simple-patch.mbox.gz")
with open(dp, "r") as f:
content = f.read()
expected = {
"message_id": "[email protected]",
"in_reply_to": "",
"date": "2016-06-28T01:47:47",
"subject": "[Qemu-devel] [PATCH] quorum: Only compile when supported",
"sender": {"name": "Fam Zheng", "address": "[email protected]"},
"recipients": [
{"address": "[email protected]"},
{"name": "Kevin Wolf", "address": "[email protected]"},
{"name": "Alberto Garcia", "address": "[email protected]"},
{"address": "[email protected]"},
{"name": "Max Reitz", "address": "[email protected]"},
],
"mbox": content,
}
msg = mbox.MboxMessage(content).get_json()
self.assertEqual(msg, expected)
if __name__ == "__main__":
main()
| mit | -6,980,690,564,377,600,000 | 33.146067 | 86 | 0.56104 | false |
IAmWave/blekota | src/blekota.py | 1 | 2295 | import argparse
parser = argparse.ArgumentParser(description='Load or create a Blekota model')
parser.add_argument('file',
help='Either a .pkl file containing an existing model or a .wav sound on which to train a new model. If a .wav is provided, other parameters can be used to specify model hyperparameters.')
# parser.add_argument('--model-file',
# +help='Load an existing model from this file. If unspecified, new model will be created')
# parser.add_argument('--sound-file',
# help='The sound file to train on. Should be a .wav')
parser.add_argument('--model-name', default='unnamed_model',
help='A name for the model, it will later be saved in files like so: [MODEL_NAME]_123.pkl')
parser.add_argument('--layers', type=int, default=3,
help='The number of layers of the model')
parser.add_argument('--hidden', type=int, default=256,
help='The size of the hidden vector of each layer')
parser.add_argument('--seq-length', type=int, default=100,
help='Number of steps to perform before updating parameters')
parser.add_argument('--batch-size', type=int, default=80,
help='Number of sequences to process in parallel')
parser.add_argument('--alpha', type=float, default=0.002,
help='Learning rate. Do not change if unsure')
args = parser.parse_args()
import numpy as np
import pickle
import audio_io
import rnn
import gru
import visual
clf = None
if args.file.endswith('.pkl'): # load existing model
print('Loading model from', args.file)
with open(args.file, 'rb') as f:
clf = pickle.load(f)
else:
print('Creating new model')
if not args.file.endswith('.wav'):
exit('file should be either a .wav (create new model) or .pkl (load existing model)')
clf = gru.GRU(args.hidden, layer_n=args.layers, seq_length=args.seq_length, batches=args.batch_size,
alpha=args.alpha, name=args.model_name, file=args.file)
def heatmap(start=0, length=1000):
"""After sampling a sound, visualise a part of it as a heatmap."""
visual.heatmap(clf.p[start:(start + length)])
show = visual.show # visualise a sound
play = audio_io.play # play a sound
print(clf)
| mit | 5,942,508,307,662,493,000 | 39.982143 | 208 | 0.658388 | false |
hayderimran7/ec2-api | ec2api/tests/functional/botocoreclient.py | 1 | 1492 | # Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import botocore.session
def _get_client(client_name, url, region, access, secret, ca_bundle):
connection_data = {
'config_file': (None, 'AWS_CONFIG_FILE', None, None),
'region': ('region', 'BOTO_DEFAULT_REGION', region, None),
}
session = botocore.session.get_session(connection_data)
kwargs = {
'region_name': region,
'endpoint_url': url,
'aws_access_key_id': access,
'aws_secret_access_key': secret
}
if ca_bundle:
kwargs['verify'] = ca_bundle
return session.create_client(client_name, **kwargs)
def get_ec2_client(url, region, access, secret, ca_bundle=None):
return _get_client('ec2', url, region, access, secret, ca_bundle)
def get_s3_client(url, region, access, secret, ca_bundle=None):
return _get_client('s3', url, region, access, secret, ca_bundle)
| apache-2.0 | -957,823,799,688,157,800 | 35.390244 | 78 | 0.676944 | false |
jwodder/aptrepo | src/aptrepo/sources.py | 1 | 1579 | from collections import namedtuple
import re
from .archive import Archive
class AptSource(namedtuple('AptSource', 'deb options uri suite components')):
def __str__(self):
sline = self.deb
if self.options:
sline += ' ' + ' '.join(f'{k}={v}' if v is not None else k
for k,v in self.options.items())
sline += f' {self.uri} {self.suite}'
if self.components:
sline += ' ' + ' '.join(self.components)
return sline
def repositories(self):
### TODO: Incorporate `options` (and `deb`?) somehow
suite = Archive(self.uri).fetch_suite(self.suite)
if self.suite.endswith('/'):
return [suite]
else:
return [suite[c] for c in self.components]
def parse_sources(fp):
for line in fp:
line = line.partition('#')[0].strip()
if not line:
continue
m = re.search(r'^(deb(?:-src)?)(?:\s+\[(.*?)\])?((?:\s+\S+){2,})$',
line)
if m:
deb, optstr, words = m.groups()
options = {}
if optstr is not None:
for opt in optstr.split():
key, eq, value = opt.partition('=')
options[key] = value if eq == '=' else None
words = words.split()
yield AptSource(deb=deb, options=options, uri=words[0],
suite=words[1], components=words[2:])
else:
raise ValueError(f'{line!r}: could not parse sources.list entry')
| mit | 9,165,954,857,391,489,000 | 35.72093 | 77 | 0.49905 | false |
0xffea/keystone | keystone/common/wsgi.py | 1 | 19339 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for working with WSGI servers."""
import socket
import sys
import eventlet.wsgi
import routes.middleware
import ssl
import webob.dec
import webob.exc
from keystone.common import logging
from keystone.common import utils
from keystone import config
from keystone import exception
from keystone.openstack.common import importutils
from keystone.openstack.common import jsonutils
CONF = config.CONF
LOG = logging.getLogger(__name__)
# Environment variable used to pass the request context
CONTEXT_ENV = 'openstack.context'
# Environment variable used to pass the request params
PARAMS_ENV = 'openstack.params'
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.DEBUG):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg)
class Server(object):
"""Server class to manage multiple WSGI sockets and applications."""
def __init__(self, application, host=None, port=None, threads=1000):
self.application = application
self.host = host or '0.0.0.0'
self.port = port or 0
self.pool = eventlet.GreenPool(threads)
self.socket_info = {}
self.greenthread = None
self.do_ssl = False
self.cert_required = False
def start(self, key=None, backlog=128):
"""Run a WSGI server with the given application."""
LOG.debug(_('Starting %(arg0)s on %(host)s:%(port)s') %
{'arg0': sys.argv[0],
'host': self.host,
'port': self.port})
# TODO(dims): eventlet's green dns/socket module does not actually
# support IPv6 in getaddrinfo(). We need to get around this in the
# future or monitor upstream for a fix
info = socket.getaddrinfo(self.host,
self.port,
socket.AF_UNSPEC,
socket.SOCK_STREAM)[0]
_socket = eventlet.listen(info[-1],
family=info[0],
backlog=backlog)
if key:
self.socket_info[key] = _socket.getsockname()
# SSL is enabled
if self.do_ssl:
if self.cert_required:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
sslsocket = eventlet.wrap_ssl(_socket, certfile=self.certfile,
keyfile=self.keyfile,
server_side=True,
cert_reqs=cert_reqs,
ca_certs=self.ca_certs)
_socket = sslsocket
self.greenthread = self.pool.spawn(self._run,
self.application,
_socket)
def set_ssl(self, certfile, keyfile=None, ca_certs=None,
cert_required=True):
self.certfile = certfile
self.keyfile = keyfile
self.ca_certs = ca_certs
self.cert_required = cert_required
self.do_ssl = True
def kill(self):
if self.greenthread:
self.greenthread.kill()
def wait(self):
"""Wait until all servers have completed running."""
try:
self.pool.waitall()
except KeyboardInterrupt:
pass
def _run(self, application, socket):
"""Start a WSGI server in a new green thread."""
log = logging.getLogger('eventlet.wsgi.server')
eventlet.wsgi.server(socket, application, custom_pool=self.pool,
log=WritableLogger(log))
class Request(webob.Request):
pass
class BaseApplication(object):
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [app:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[app:wadl]
latest_version = 1.3
paste.app_factory = nova.api.fancy_api:Wadl.factory
which would result in a call to the `Wadl` class as
import nova.api.fancy_api
fancy_api.Wadl(latest_version='1.3')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
return cls()
def __call__(self, environ, start_response):
r"""Subclasses will probably want to implement __call__ like this:
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
# Any of the following objects work as responses:
# Option 1: simple string
res = 'message\n'
# Option 2: a nicely formatted HTTP exception page
res = exc.HTTPForbidden(detail='Nice try')
# Option 3: a webob Response object (in case you need to play with
# headers, or you want to be treated like an iterable, or or or)
res = Response();
res.app_iter = open('somefile')
# Option 4: any wsgi app to be run next
res = self.application
# Option 5: you can get a Response object for a wsgi app, too, to
# play with headers etc
res = req.get_response(self.application)
# You can then just return your response...
return res
# ... or set req.response and return None.
req.response = res
See the end of http://pythonpaste.org/webob/modules/dec.html
for more info.
"""
raise NotImplementedError('You must implement __call__')
class Application(BaseApplication):
@webob.dec.wsgify
def __call__(self, req):
arg_dict = req.environ['wsgiorg.routing_args'][1]
action = arg_dict.pop('action')
del arg_dict['controller']
LOG.debug(_('arg_dict: %s'), arg_dict)
# allow middleware up the stack to provide context & params
context = req.environ.get(CONTEXT_ENV, {})
context['query_string'] = dict(req.params.iteritems())
context['path'] = req.environ['PATH_INFO']
params = req.environ.get(PARAMS_ENV, {})
if 'REMOTE_USER' in req.environ:
context['REMOTE_USER'] = req.environ['REMOTE_USER']
elif context.get('REMOTE_USER', None) is not None:
del context['REMOTE_USER']
params.update(arg_dict)
# TODO(termie): do some basic normalization on methods
method = getattr(self, action)
# NOTE(vish): make sure we have no unicode keys for py2.6.
params = self._normalize_dict(params)
try:
result = method(context, **params)
except exception.Unauthorized as e:
LOG.warning(_("Authorization failed. %s from %s")
% (e, req.environ['REMOTE_ADDR']))
return render_exception(e)
except exception.Error as e:
LOG.warning(e)
return render_exception(e)
except TypeError as e:
logging.exception(e)
return render_exception(exception.ValidationError(e))
except Exception as e:
logging.exception(e)
return render_exception(exception.UnexpectedError(exception=e))
if result is None:
return render_response(status=(204, 'No Content'))
elif isinstance(result, basestring):
return result
elif isinstance(result, webob.Response):
return result
elif isinstance(result, webob.exc.WSGIHTTPException):
return result
response_code = self._get_response_code(req)
return render_response(body=result, status=response_code)
def _get_response_code(self, req):
req_method = req.environ['REQUEST_METHOD']
controller = importutils.import_class('keystone.common.controller')
code = None
if isinstance(self, controller.V3Controller) and req_method == 'POST':
code = (201, 'Created')
return code
def _normalize_arg(self, arg):
return str(arg).replace(':', '_').replace('-', '_')
def _normalize_dict(self, d):
return dict([(self._normalize_arg(k), v)
for (k, v) in d.iteritems()])
def assert_admin(self, context):
if not context['is_admin']:
try:
user_token_ref = self.token_api.get_token(
context=context, token_id=context['token_id'])
except exception.TokenNotFound as e:
raise exception.Unauthorized(e)
creds = user_token_ref['metadata'].copy()
try:
creds['user_id'] = user_token_ref['user'].get('id')
except AttributeError:
logging.debug('Invalid user')
raise exception.Unauthorized()
try:
creds['tenant_id'] = user_token_ref['tenant'].get('id')
except AttributeError:
logging.debug('Invalid tenant')
raise exception.Unauthorized()
# NOTE(vish): this is pretty inefficient
creds['roles'] = [self.identity_api.get_role(context, role)['name']
for role in creds.get('roles', [])]
# Accept either is_admin or the admin role
self.policy_api.enforce(context, creds, 'admin_required', {})
class Middleware(Application):
"""Base WSGI middleware.
These classes require an application to be
initialized that will be called next. By default the middleware will
simply call its wrapped app, or you can override __call__ to customize its
behavior.
"""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = nova.api.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import nova.api.analytics
analytics.Analytics(app_from_paste, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
conf = global_config.copy()
conf.update(local_config)
return cls(app)
return _factory
def __init__(self, application):
self.application = application
def process_request(self, request):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, request, response):
"""Do whatever you'd like to the response, based on the request."""
return response
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
response = self.process_request(request)
if response:
return response
response = request.get_response(self.application)
return self.process_response(request, response)
class Debug(Middleware):
"""Helper class for debugging a WSGI application.
Can be inserted into any WSGI application chain to get information
about the request and response.
"""
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
LOG.debug('%s %s %s', ('*' * 20), 'REQUEST ENVIRON', ('*' * 20))
for key, value in req.environ.items():
LOG.debug('%s = %s', key, value)
LOG.debug('')
LOG.debug('%s %s %s', ('*' * 20), 'REQUEST BODY', ('*' * 20))
for line in req.body_file:
LOG.debug(line)
LOG.debug('')
resp = req.get_response(self.application)
LOG.debug('%s %s %s', ('*' * 20), 'RESPONSE HEADERS', ('*' * 20))
for (key, value) in resp.headers.iteritems():
LOG.debug('%s = %s', key, value)
LOG.debug('')
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""Iterator that prints the contents of a wrapper string."""
LOG.debug('%s %s %s', ('*' * 20), 'RESPONSE BODY', ('*' * 20))
for part in app_iter:
LOG.debug(part)
yield part
class Router(object):
"""WSGI middleware that maps incoming requests to WSGI apps."""
def __init__(self, mapper):
"""Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be an object that can route
the request to the action-specific method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, '/svrlist', controller=sc, action='list')
# Actions are all implicitly defined
mapper.resource('server', 'servers', controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp())
"""
# if we're only running in debug, bump routes' internal logging up a
# notch, as it's very spammy
if CONF.debug:
logging.getLogger('routes.middleware').setLevel(logging.INFO)
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@staticmethod
@webob.dec.wsgify(RequestClass=Request)
def _dispatch(req):
"""Dispatch the request to the appropriate controller.
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404
or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return render_exception(
exception.NotFound(_('The resource could not be found.')))
app = match['controller']
return app
class ComposingRouter(Router):
def __init__(self, mapper=None, routers=None):
if mapper is None:
mapper = routes.Mapper()
if routers is None:
routers = []
for router in routers:
router.add_routes(mapper)
super(ComposingRouter, self).__init__(mapper)
class ComposableRouter(Router):
"""Router that supports use by ComposingRouter."""
def __init__(self, mapper=None):
if mapper is None:
mapper = routes.Mapper()
self.add_routes(mapper)
super(ComposableRouter, self).__init__(mapper)
def add_routes(self, mapper):
"""Add routes to given mapper."""
pass
class ExtensionRouter(Router):
"""A router that allows extensions to supplement or overwrite routes.
Expects to be subclassed.
"""
def __init__(self, application, mapper=None):
if mapper is None:
mapper = routes.Mapper()
self.application = application
self.add_routes(mapper)
mapper.connect('{path_info:.*}', controller=self.application)
super(ExtensionRouter, self).__init__(mapper)
def add_routes(self, mapper):
pass
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = nova.api.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import nova.api.analytics
analytics.Analytics(app_from_paste, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
conf = global_config.copy()
conf.update(local_config)
return cls(app)
return _factory
def render_response(body=None, status=None, headers=None):
"""Forms a WSGI response."""
headers = headers or []
headers.append(('Vary', 'X-Auth-Token'))
if body is None:
body = ''
status = status or (204, 'No Content')
else:
body = jsonutils.dumps(body, cls=utils.SmarterEncoder)
headers.append(('Content-Type', 'application/json'))
status = status or (200, 'OK')
return webob.Response(body=body,
status='%s %s' % status,
headerlist=headers)
def render_exception(error):
"""Forms a WSGI response based on the current error."""
body = {'error': {
'code': error.code,
'title': error.title,
'message': str(error)
}}
if isinstance(error, exception.AuthPluginException):
body['error']['identity'] = error.authentication
return render_response(status=(error.code, error.title), body=body)
| apache-2.0 | -8,984,619,201,358,130,000 | 33.047535 | 79 | 0.595687 | false |
google/report2bq | application/classes/sa360_report_validation/product_lead_and_cross_sell.py | 1 | 3261 | """
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = ['[email protected] (David Harcombe)']
from googleapiclient.discovery import Resource
from classes.sa360_report_validation.sa360_field_validator import SA360Validator
class ProductLeadAndCrossSell(SA360Validator):
def __init__(self,
sa360_service: Resource = None,
agency: int = None,
advertiser: int = None) -> None:
super().__init__(sa360_service, agency, advertiser)
self.fields = [
"agency",
"agencyId",
"advertiser",
"advertiserId",
"productId",
"productCountry",
"productLanguage",
"productMpn",
"productColor",
"productSize",
"productMaterial",
"productPattern",
"productAvailability",
"productGender",
"productAgeGroup",
"productLandingPageUrl",
"productCategory",
"productCategoryLevel1",
"productCategoryLevel2",
"productCategoryLevel3",
"productCategoryLevel4",
"productCategoryLevel5",
"productBrand",
"productGtin",
"productPrice",
"productSalePrice",
"productTypeLevel1",
"productTypeLevel2",
"productTypeLevel3",
"productTypeLevel4",
"productTypeLevel5",
"productCondition",
"productCustomLabel0",
"productCustomLabel1",
"productCustomLabel2",
"productCustomLabel3",
"productCustomLabel4",
"productCostOfGoodsSold",
"productStoreId",
"productChannel",
"productChannelExclusivity",
"productItemGroupId",
"productTitle",
"dfaActions",
"dfaRevenue",
"dfaTransactions",
"dfaWeightedActions",
"dfaActionsCrossEnv",
"dfaRevenueCrossEnv",
"dfaTransactionsCrossEnv",
"dfaWeightedActionsCrossEnv",
"crossSellAverageUnitPrice",
"crossSellCostOfGoodsSold",
"crossSellGrossFromUnitsSold",
"crossSellGrossProfitMargin",
"crossSellRevenueFromUnitsSold",
"crossSellUnitsSold",
"leadAverageUnitPrice",
"leadCostOfGoodsSold",
"leadGrossProfitFromUnitsSold",
"leadGrossProfitMargin",
"leadRevenueFromUnitsSold",
"leadUnitsSold",
"productUnitsSold",
"productRevenueFromUnitsSold",
"productAverageUnitPrice",
"date",
"monthStart",
"monthEnd",
"quarterStart",
"quarterEnd",
"weekStart",
"weekEnd",
"yearStart",
"yearEnd",
"deviceSegment",
"floodlightGroup",
"floodlightGroupId",
"floodlightGroupTag",
"floodlightActivity",
"floodlightActivityId",
"floodlightActivityTag",
"accountId",
"campaignId",
"adGroupId",
] | apache-2.0 | -783,439,681,970,412,500 | 27.12069 | 80 | 0.653481 | false |
catapult-project/catapult | catapult_build/build_steps.py | 3 | 12384 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import os
import sys
# This is the list of tests to run. It is a dictionary with the following
# fields:
#
# name (required): The name of the step, to show on the buildbot status page.
# path (required): The path to the executable which runs the tests.
# additional_args (optional): An array of optional arguments.
# uses_sandbox_env (optional): True if CHROME_DEVEL_SANDBOX must be in
# environment.
# disabled (optional): List of platforms the test is disabled on. May contain
# 'win', 'mac', 'linux', or 'android'.
# outputs_presentation_json (optional): If True, pass in --presentation-json
# argument to the test executable to allow it to update the buildbot status
# page. More details here:
# github.com/luci/recipes-py/blob/master/recipe_modules/generator_script/api.py
_DASHBOARD_TESTS = [
{
'name': 'Dashboard Dev Server Tests Stable',
'path': 'dashboard/bin/run_dev_server_tests',
'additional_args': [
'--no-install-hooks', '--no-use-local-chrome', '--channel=stable',
'--timeout-sec=120', '--timeout-retries=2'
],
'outputs_presentation_json': True,
'disabled': ['android'],
},
{
'name': 'Dashboard Dev Server Tests Canary',
'path': 'dashboard/bin/run_dev_server_tests',
'additional_args': [
'--no-install-hooks', '--no-use-local-chrome', '--channel=canary',
'--timeout-sec=120', '--timeout-retries=2'
],
'outputs_presentation_json': True,
'disabled': ['android'],
},
{
'name': 'Dashboard Python Tests',
'path': 'dashboard/bin/run_py_tests',
'additional_args': ['--no-install-hooks'],
'disabled': ['android'],
},
]
_CATAPULT_TESTS = [
{
'name': 'Build Python Tests',
'path': 'catapult_build/bin/run_py_tests',
'disabled': ['android'],
},
{
'name': 'Common Tests',
'path': 'common/bin/run_tests',
},
{
'name': 'Dependency Manager Tests',
'path': 'dependency_manager/bin/run_tests',
},
{
'name': 'Devil Device Tests',
'path': 'devil/bin/run_py_devicetests',
'disabled': ['win', 'mac', 'linux']
},
{
'name': 'Devil Python Tests',
'path': 'devil/bin/run_py_tests',
'disabled': ['mac', 'win'],
},
{
'name': 'Native Heap Symbolizer Tests',
'path': 'tracing/bin/run_symbolizer_tests',
'disabled': ['android'],
},
{
'name': 'Py-vulcanize Tests',
'path': 'common/py_vulcanize/bin/run_py_tests',
'additional_args': ['--no-install-hooks'],
'disabled': ['android'],
},
{
'name': 'Systrace Tests',
'path': 'systrace/bin/run_tests',
},
{
'name': 'Snap-it Tests',
'path': 'telemetry/bin/run_snap_it_unittest',
'additional_args': ['--browser=reference',],
'uses_sandbox_env': True,
'disabled': ['android'],
},
{
'name': 'Telemetry Tests with Stable Browser (Desktop)',
'path': 'catapult_build/fetch_telemetry_deps_and_run_tests',
'additional_args': [
'--browser=reference',
'--start-xvfb',
'-v',
],
'uses_sandbox_env': True,
'disabled': ['android'],
},
{
'name': 'Telemetry Tests with Stable Browser (Android)',
'path': 'catapult_build/fetch_telemetry_deps_and_run_tests',
'additional_args': [
'--browser=reference',
'--device=android',
'--jobs=1',
'-v',
],
'uses_sandbox_env': True,
'disabled': ['win', 'mac', 'linux']
},
{
'name': 'Telemetry Integration Tests with Stable Browser',
'path': 'telemetry/bin/run_browser_tests',
'additional_args': [
'BrowserTest',
'--browser=reference',
'-v',
],
'uses_sandbox_env': True,
'disabled': ['android', 'linux'], # TODO(nedn): enable this on linux
},
{
'name': 'Tracing Dev Server Tests',
'path': 'tracing/bin/run_dev_server_tests',
'additional_args': [
'--no-install-hooks',
'--no-use-local-chrome',
'--channel=stable',
'--timeout-sec=900',
],
'outputs_presentation_json': True,
'disabled': ['android'],
},
{
'name': 'Tracing Dev Server Tests Canary',
'path': 'tracing/bin/run_dev_server_tests',
'additional_args': [
'--no-install-hooks',
'--no-use-local-chrome',
'--channel=canary',
'--timeout-sec=900',
],
'outputs_presentation_json': True,
'disabled': ['android'],
},
{
'name': 'Tracing D8 Tests',
'path': 'tracing/bin/run_vinn_tests',
'disabled': ['android'],
},
{
'name': 'Tracing Python Tests',
'path': 'tracing/bin/run_py_tests',
'additional_args': ['--no-install-hooks'],
'disabled': ['android'],
},
{
'name': 'Typ unittest',
'path': 'third_party/typ/run',
'additional_args': ['tests'],
'disabled': ['android', 'win'
], # TODO(crbug.com/851498): enable typ unittests on Win
},
{
'name': 'Vinn Tests',
'path': 'third_party/vinn/bin/run_tests',
'disabled': ['android'],
},
{
'name': 'NetLog Viewer Dev Server Tests',
'path': 'netlog_viewer/bin/run_dev_server_tests',
'additional_args': [
'--no-install-hooks',
'--no-use-local-chrome',
],
'disabled': ['android', 'win', 'mac', 'linux'],
},
]
_STALE_FILE_TYPES = ['.pyc', '.pseudo_lock']
def main(args=None):
"""Send list of test to run to recipes generator_script.
See documentation at:
github.com/luci/recipes-py/blob/master/recipe_modules/generator_script/api.py
"""
parser = argparse.ArgumentParser(description='Run catapult tests.')
parser.add_argument('--api-path-checkout', help='Path to catapult checkout')
parser.add_argument(
'--app-engine-sdk-pythonpath',
help='PYTHONPATH to include app engine SDK path')
parser.add_argument('--platform', help='Platform name (linux, mac, or win)')
parser.add_argument('--output-json', help='Output for buildbot status page')
parser.add_argument(
'--run_android_tests', default=True, help='Run Android tests')
parser.add_argument(
'--dashboard_only',
default=False,
help='Run only the Dashboard and Pinpoint tests',
action='store_true')
parser.add_argument(
'--use_python3',
default=False,
help='Run Catapult Tests using vpython3',
action='store_true')
args = parser.parse_args(args)
dashboard_protos_path = os.path.join(args.api_path_checkout, 'dashboard',
'dashboard', 'proto')
dashboard_proto_files = [
os.path.join(dashboard_protos_path, p)
for p in ['sheriff.proto', 'sheriff_config.proto']
]
sheriff_proto_output_path = os.path.join(args.api_path_checkout, 'dashboard',
'dashboard', 'sheriff_config')
dashboard_proto_output_path = os.path.join(args.api_path_checkout,
'dashboard', 'dashboard')
tracing_protos_path = os.path.join(args.api_path_checkout, 'tracing',
'tracing', 'proto')
tracing_proto_output_path = tracing_protos_path
tracing_proto_files = [os.path.join(tracing_protos_path, 'histogram.proto')]
steps = [
{
# Always remove stale files first. Not listed as a test above
# because it is a step and not a test, and must be first.
'name':
'Remove Stale files',
'cmd': [
'python',
os.path.join(args.api_path_checkout, 'catapult_build',
'remove_stale_files.py'),
args.api_path_checkout,
','.join(_STALE_FILE_TYPES),
]
},
# Since we might not have access to 'make', let's run the protobuf
# compiler directly. We want to run the proto compiler to generate the
# right data in the right places.
{
'name':
'Generate Sheriff Config protocol buffers',
'cmd': [
'protoc',
'--proto_path',
dashboard_protos_path,
'--python_out',
sheriff_proto_output_path,
] + dashboard_proto_files,
},
{
'name':
'Generate Dashboard protocol buffers',
'cmd': [
'protoc',
'--proto_path',
dashboard_protos_path,
'--python_out',
dashboard_proto_output_path,
] + dashboard_proto_files,
},
{
'name':
'Generate Tracing protocol buffers',
'cmd': [
'protoc',
'--proto_path',
tracing_protos_path,
'--python_out',
tracing_proto_output_path,
] + tracing_proto_files,
},
]
if args.platform == 'android' and args.run_android_tests:
# On Android, we need to prepare the devices a bit before using them in
# tests. These steps are not listed as tests above because they aren't
# tests and because they must precede all tests.
steps.extend([
{
'name':
'Android: Recover Devices',
'cmd': [
'python',
os.path.join(args.api_path_checkout, 'devil', 'devil',
'android', 'tools', 'device_recovery.py')
],
},
{
'name':
'Android: Provision Devices',
'cmd': [
'python',
os.path.join(args.api_path_checkout, 'devil', 'devil',
'android', 'tools', 'provision_devices.py')
],
},
{
'name':
'Android: Device Status',
'cmd': [
'python',
os.path.join(args.api_path_checkout, 'devil', 'devil',
'android', 'tools', 'device_status.py')
],
},
])
tests = None
if args.dashboard_only:
tests = _DASHBOARD_TESTS
else:
tests = _DASHBOARD_TESTS + _CATAPULT_TESTS
for test in tests:
if args.platform == 'android' and not args.run_android_tests:
# Remove all the steps for the Android configuration if we're asked to not
# run the Android tests.
steps = []
break
if args.platform in test.get('disabled', []):
continue
step = {'name': test['name'], 'env': {}}
if args.use_python3:
vpython_executable = "vpython3"
else:
vpython_executable = "vpython"
if sys.platform == 'win32':
vpython_executable += '.bat'
# Always add the appengine SDK path.
step['env']['PYTHONPATH'] = args.app_engine_sdk_pythonpath
step['cmd'] = [
vpython_executable,
os.path.join(args.api_path_checkout, test['path'])
]
if step['name'] == 'Systrace Tests':
step['cmd'] += ['--device=' + args.platform]
if test.get('additional_args'):
step['cmd'] += test['additional_args']
if test.get('uses_sandbox_env'):
step['env']['CHROME_DEVEL_SANDBOX'] = '/opt/chromium/chrome_sandbox'
if test.get('outputs_presentation_json'):
step['outputs_presentation_json'] = True
# TODO(crbug/1221663):
# Before python 3 conversion is finished, the try jobs with use_python3 are
# experimental. We want to see all possible failure and thus we don't want
# to try job to quit before all tests are finished.
# This condition will be removed when the python 3 conversion is done.
if args.use_python3:
step['always_run'] = True
steps.append(step)
with open(args.output_json, 'w') as outfile:
json.dump(steps, outfile)
if __name__ == '__main__':
main(sys.argv[1:])
| bsd-3-clause | -8,936,685,477,396,930,000 | 31.93617 | 80 | 0.541101 | false |
tyll/bodhi | bodhi/server/scripts/approve_testing.py | 1 | 4185 | # -*- coding: utf-8 -*-
# Copyright © 2013-2017 Red Hat, Inc. and others.
#
# This file is part of Bodhi.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Generates the bodhi-approve-testing console script.
The script is responsible for commenting on updates after they reach the mandatory amount of time
spent in the testing repository.
"""
import os
import sys
from pyramid.paster import get_appsettings
from ..models import Update, UpdateStatus
from ..config import config
from bodhi.server import Session, initialize_db
def usage(argv):
"""
Print usage information and exit with code 1.
Args:
argv (list): The arguments that were passed to the CLI from sys.argv.
"""
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri>\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
"""
Comment on updates that are eligible to be pushed to stable.
Queries for updates in the testing state that have a NULL request, looping over them looking for
updates that are eligible to be pushed to stable but haven't had comments from Bodhi to this
effect. For each such update it finds it will add a comment stating that the update may now be
pushed to stable.
This function is the entry point for the bodhi-approve-testing console script.
Args:
argv (list): A list of command line arguments. Defaults to sys.argv.
"""
if len(argv) != 2:
usage(argv)
settings = get_appsettings(argv[1])
initialize_db(settings)
db = Session()
try:
testing = db.query(Update).filter_by(status=UpdateStatus.testing,
request=None)
for update in testing:
# If this release does not have any testing requirements, skip it
if not update.release.mandatory_days_in_testing:
print('%s doesn\'t have mandatory days in testing' % update.release.name)
continue
# If this has already met testing requirements, skip it
if update.met_testing_requirements:
continue
# Approval message when testing based on karma threshold
if update.stable_karma not in (0, None) and update.karma >= update.stable_karma \
and not update.autokarma and update.meets_testing_requirements:
print('%s now reaches stable karma threshold' % update.title)
text = config.get('testing_approval_msg_based_on_karma')
update.comment(db, text, author=u'bodhi')
continue
# If autokarma updates have reached the testing threshold, say something! Keep in mind
# that we don't care about karma here, because autokarma updates get their request set
# to stable by the Update.comment() workflow when they hit the required threshold. Thus,
# this function only needs to consider the time requirements because these updates have
# not reached the karma threshold.
if update.meets_testing_requirements:
print('%s now meets testing requirements' % update.title)
text = unicode(
config.get('testing_approval_msg') % update.mandatory_days_in_testing)
update.comment(db, text, author=u'bodhi')
db.commit()
except Exception as e:
print(str(e))
db.rollback()
Session.remove()
sys.exit(1)
| gpl-2.0 | -3,813,178,846,344,620,500 | 38.102804 | 100 | 0.660134 | false |
NoBodyCam/TftpPxeBootBareMetal | nova/tests/baremetal/test_volume_driver.py | 1 | 4399 | # Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for baremetal volume driver.
"""
import mox
from nova import flags
from nova import test
from nova import utils
from nova.virt.baremetal import volume_driver
FLAGS = flags.FLAGS
SHOW_OUTPUT = """Target 1: iqn.2010-10.org.openstack:volume-00000001
System information:
Driver: iscsi
State: ready
I_T nexus information:
I_T nexus: 8
Initiator: iqn.1993-08.org.debian:01:7780c6a16b4
Connection: 0
IP Address: 172.17.12.10
LUN information:
LUN: 0
Type: controller
SCSI ID: IET 00010000
SCSI SN: beaf10
Size: 0 MB, Block size: 1
Online: Yes
Removable media: No
Readonly: No
Backing store type: null
Backing store path: None
Backing store flags:
LUN: 1
Type: disk
SCSI ID: IET 00010001
SCSI SN: beaf11
Size: 1074 MB, Block size: 512
Online: Yes
Removable media: No
Readonly: No
Backing store type: rdwr
Backing store path: /dev/nova-volumes/volume-00000001
Backing store flags:
Account information:
ACL information:
ALL
Target 2: iqn.2010-10.org.openstack:volume-00000002
System information:
Driver: iscsi
State: ready
I_T nexus information:
LUN information:
LUN: 0
Type: controller
SCSI ID: IET 00020000
SCSI SN: beaf20
Size: 0 MB, Block size: 1
Online: Yes
Removable media: No
Readonly: No
Backing store type: null
Backing store path: None
Backing store flags:
LUN: 1
Type: disk
SCSI ID: IET 00020001
SCSI SN: beaf21
Size: 2147 MB, Block size: 512
Online: Yes
Removable media: No
Readonly: No
Backing store type: rdwr
Backing store path: /dev/nova-volumes/volume-00000002
Backing store flags:
Account information:
ACL information:
ALL
Target 1000001: iqn.2010-10.org.openstack.baremetal:1000001-dev.vdc
System information:
Driver: iscsi
State: ready
I_T nexus information:
LUN information:
LUN: 0
Type: controller
SCSI ID: IET f42410000
SCSI SN: beaf10000010
Size: 0 MB, Block size: 1
Online: Yes
Removable media: No
Readonly: No
Backing store type: null
Backing store path: None
Backing store flags:
LUN: 1
Type: disk
SCSI ID: IET f42410001
SCSI SN: beaf10000011
Size: 1074 MB, Block size: 512
Online: Yes
Removable media: No
Readonly: No
Backing store type: rdwr
Backing store path: /dev/disk/by-path/ip-172.17.12.10:3260-iscsi-\
iqn.2010-10.org.openstack:volume-00000001-lun-1
Backing store flags:
Account information:
ACL information:
ALL
"""
class BaremetalVolumeTestCase(test.TestCase):
def test_list_backingstore_path(self):
self.stubs.Set(utils, 'execute', lambda *args,
**kwargs: (SHOW_OUTPUT, ''))
l = volume_driver._list_backingstore_path()
self.assertEqual(len(l), 3)
self.assertIn('/dev/nova-volumes/volume-00000001', l)
self.assertIn('/dev/nova-volumes/volume-00000002', l)
self.assertIn('/dev/disk/by-path/ip-172.17.12.10:3260-iscsi-\
iqn.2010-10.org.openstack:volume-00000001-lun-1', l)
| apache-2.0 | 8,312,770,303,761,770,000 | 30.198582 | 78 | 0.580359 | false |
wcmckee/wcmckee-notebook | hackbrobeur.py | 1 | 2509 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# Hack BroBeur
# <markdowncell>
# python script to make account at centre for logins.
# <codecell>
from github import Github
import os
import getpass
import git
import time
from clint.textui import colored
import dominate
from dominate.tags import *
import envoy
# <codecell>
zeuser = getpass.getuser()
# <codecell>
g = Github(zeuser, 'blzh123!')
# <codecell>
# <codecell>
gitlist = []
# <codecell>
searchpy = g.search_repositories(zeuser)
# <codecell>
for pya in searchpy:
print pya.full_name
wcm = pya.full_name
# <codecell>
for repo in g.get_user('wcmckee').get_repos():
gitlist.append(repo.name)
# <codecell>
os.mkdir('/home/wcmckee/github')
# <codecell>
lisdir = os.listdir('/home/wcmckee/github/')
# <codecell>
lisdir
# <codecell>
curlis = []
# <codecell>
for lis in lisdir:
curlis.append(ls)
# <codecell>
dlrepo = list(set(gitlist) - set(curlis))
# <codecell>
print dlrepo
# <codecell>
wafi = time.sleep(5)
# <codecell>
import sh
# <codecell>
import git
repo = git.Repo( '/home/wcmckee/learnpython' )
print repo.git.status()
# <codecell>
assert repo.bare == False
# <codecell>
ycmds = ['git', 'clone', ']
# <codecell>
import os
# <codecell>
import git
os.chdir()
# <codecell>
for gitbl in dlrepo:
print (colored.red('Downloading - ' + (colored.blue('wcmckee') + ' - ' + gitbl)))
#git.Git().clone("https://github.com/wcmckee/" + gitbl)
envoy.run('git clone https://github.com/wcmckee/' + gitbl )
t = envoy.run('df')
t.std_out
print ('Download complete. Waiting 5 secs till the next')
wafi
# <codecell>
from paramiko import SSHClient
from scp import SCPClient
ssh = SSHClient()
ssh.load_system_host_keys()
ssh.connect('example.com')
# <codecell>
import subprocess
# <codecell>
# <codecell>
# <codecell>
cmdrun = ['sudo', 'pip', 'install', 'paramiko']
# <codecell>
supi = envoy.run(cmdrun)
# <codecell>
insvn = subprocess.check_output(cmdrun)
# <codecell>
newlis = []
# <codecell>
for repoz in g.get_user('wcmckee').get_repos():
newlis.append(repo.name)
# <codecell>
gitlist
# <codecell>
indop = open('index.html', 'r')
# <codecell>
# <codecell>
indop.read()
# <codecell>
from paramiko import SSHClient
from scp import SCPClient
ssh = SSHClient()
ssh.load_system_host_keys()
ssh.connect('example.com')
# <codecell>
import envoy
# <codecell>
import clon
# <codecell>
# <codecell>
| gpl-2.0 | 2,657,833,803,489,301,000 | 11.420792 | 85 | 0.656038 | false |
robwarm/gpaw-symm | tools/niflheim-agts.py | 1 | 5426 | import os
import sys
import glob
import shutil
import subprocess
def cmd(c):
x = os.system(c)
assert x == 0, c
def fail(subject, email=None, filename='/dev/null', mailer='mail'):
assert mailer in ['mailx', 'mail', 'mutt']
import os
if email is not None:
if filename == '/dev/null':
assert os.system('mail -s "%s" %s < %s' %
(subject, email, filename)) == 0
else: # attachments
filenames = filename.split()
if mailer == 'mailx': # new mailx (12?)
attach = ''
for f in filenames:
attach += ' -a %s ' % f
# send with empty body
assert os.system('echo | mail %s -s "%s" %s' %
(attach, subject, email)) == 0
elif mailer == 'mail': # old mailx (8?)
attach = '('
for f in filenames:
ext = os.path.splitext(f)[-1]
if ext:
flog = os.path.basename(f).replace(ext, '.log')
else:
flog = f
attach += 'uuencode %s %s&&' % (f, flog)
# remove final &&
attach = attach[:-2]
attach += ')'
assert os.system('%s | mail -s "%s" %s' %
(attach, subject, email)) == 0
else: # mutt
attach = ''
for f in filenames:
attach += ' -a %s ' % f
# send with empty body
assert os.system('mutt %s -s "%s" %s < /dev/null' %
(attach, subject, email)) == 0
raise SystemExit
if '--dir' in sys.argv:
i = sys.argv.index('--dir')
dir = os.path.abspath(sys.argv[i+1])
else:
dir = 'agts'
if '--email' in sys.argv:
i = sys.argv.index('--email')
email = sys.argv[i+1]
else:
email = None
assert os.path.isdir(dir)
gpawdir = os.path.join(dir, 'gpaw')
# remove the old run directory
if os.path.isdir(dir):
shutil.rmtree(dir)
os.mkdir(dir)
os.chdir(dir)
cmd('svn checkout https://svn.fysik.dtu.dk/projects/gpaw/trunk gpaw')
# a version of gpaw is needed for imports from within this script!
cmd("\
cd " + gpawdir + "&& \
source /home/camp/modulefiles.sh&& \
module load NUMPY&& \
python setup.py build_ext 2>&1 > build_ext.log")
# import gpaw from where it was installed
sys.path.insert(0, gpawdir)
cmd("echo '\
cd '" + gpawdir + "'&& \
source /home/camp/modulefiles.sh&& \
module load NUMPY&& \
module load open64/4.2.3-0 && \
module load openmpi/1.3.3-1.el5.fys.open64.4.2.3 && \
module load hdf5/1.8.6-5.el5.fys.open64.4.2.3.openmpi.1.3.3 && \
python setup.py --remove-default-flags --customize=\
doc/install/Linux/Niflheim/el5-xeon-open64-acml-4.4.0-acml-4.4.0-hdf-SL-2.0.1.py \
build_ext 2>&1 > thul.log' | ssh thul bash")
cmd("echo '\
cd '" + gpawdir + "'&& \
source /home/camp/modulefiles.sh&& \
module load NUMPY&& \
module load open64/4.2.3-0 && \
python setup.py --remove-default-flags --customize=\
doc/install/Linux/Niflheim/el5-opteron-open64-acml-4.4.0-acml-4.4.0-hdf-SL-2.0.1.py \
build_ext 2>&1 > fjorm.log' | ssh fjorm bash")
cmd("""wget --no-check-certificate --quiet \
http://wiki.fysik.dtu.dk/gpaw-files/gpaw-setups-latest.tar.gz && \
tar xzf gpaw-setups-latest.tar.gz && \
rm gpaw-setups-latest.tar.gz && \
mv gpaw-setups-[0-9]* gpaw/gpaw-setups""")
cmd('svn export https://svn.fysik.dtu.dk/projects/ase/trunk ase')
# ase needed
sys.path.insert(0, '%s/ase' % dir)
from gpaw.test.big.agts import AGTSQueue
from gpaw.test.big.niflheim import NiflheimCluster
queue = AGTSQueue()
queue.collect()
cluster = NiflheimCluster(asepath=os.path.join(dir, 'ase'),
setuppath=os.path.join(gpawdir, 'gpaw-setups'))
# Example below is confusing: job.script must NOT be the *.agts.py script,
# but the actual python script to be run!
# testsuite.agts.py does both: see gpaw/test/big/miscellaneous/testsuite.agts.py
#queue.jobs = [job for job in queue.jobs if job.script == 'testsuite.agts.py']
nfailed = queue.run(cluster)
gfiles = os.path.join(dir, 'gpaw-files')
if not os.path.isdir(gfiles):
os.mkdir(gfiles)
queue.copy_created_files(gfiles)
# make files readable by go
files = glob.glob(gfiles + '/*')
for f in files:
os.chmod(f, 0644)
from gpaw.version import version
subject = 'AGTS GPAW %s: ' % str(version)
# Send mail:
sfile = os.path.join(dir, 'status.log')
attach = sfile
if not nfailed:
subject += ' succeeded'
fail(subject, email, attach, mailer='mutt')
else:
subject += ' failed'
# attach failed tests error files
ft = [l.split()[0] for l in open(sfile).readlines() if 'FAILED' in l]
for t in ft:
ef = glob.glob(os.path.join(dir, t) + '.e*')
for f in ef:
attach += ' ' + f
fail(subject, email, attach, mailer='mutt')
if 0:
# Analysis:
import matplotlib
matplotlib.use('Agg')
from gpaw.test.big.analysis import analyse
user = os.environ['USER']
analyse(queue,
'../analysis/analyse.pickle', # file keeping history
'../analysis', # Where to dump figures
rev=niflheim.revision,
#mailto='[email protected]',
mailserver='servfys.fysik.dtu.dk',
attachment='status.log')
| gpl-3.0 | -8,642,778,261,663,894,000 | 30.546512 | 85 | 0.572429 | false |
vitchyr/rlkit | rlkit/util/hyperparameter.py | 1 | 7092 | """
Custom hyperparameter functions.
"""
import abc
import copy
import math
import random
import itertools
from typing import List
import rlkit.pythonplusplus as ppp
class Hyperparameter(metaclass=abc.ABCMeta):
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
class RandomHyperparameter(Hyperparameter):
def __init__(self, name):
super().__init__(name)
self._last_value = None
@abc.abstractmethod
def generate_next_value(self):
"""Return a value for the hyperparameter"""
return
def generate(self):
self._last_value = self.generate_next_value()
return self._last_value
class EnumParam(RandomHyperparameter):
def __init__(self, name, possible_values):
super().__init__(name)
self.possible_values = possible_values
def generate_next_value(self):
return random.choice(self.possible_values)
class LogFloatParam(RandomHyperparameter):
"""
Return something ranging from [min_value + offset, max_value + offset],
distributed with a log.
"""
def __init__(self, name, min_value, max_value, *, offset=0):
super(LogFloatParam, self).__init__(name)
self._linear_float_param = LinearFloatParam("log_" + name,
math.log(min_value),
math.log(max_value))
self.offset = offset
def generate_next_value(self):
return math.e ** (self._linear_float_param.generate()) + self.offset
class LinearFloatParam(RandomHyperparameter):
def __init__(self, name, min_value, max_value):
super(LinearFloatParam, self).__init__(name)
self._min = min_value
self._delta = max_value - min_value
def generate_next_value(self):
return random.random() * self._delta + self._min
class LogIntParam(RandomHyperparameter):
def __init__(self, name, min_value, max_value, *, offset=0):
super().__init__(name)
self._linear_float_param = LinearFloatParam("log_" + name,
math.log(min_value),
math.log(max_value))
self.offset = offset
def generate_next_value(self):
return int(
math.e ** (self._linear_float_param.generate()) + self.offset
)
class LinearIntParam(RandomHyperparameter):
def __init__(self, name, min_value, max_value):
super(LinearIntParam, self).__init__(name)
self._min = min_value
self._max = max_value
def generate_next_value(self):
return random.randint(self._min, self._max)
class FixedParam(RandomHyperparameter):
def __init__(self, name, value):
super().__init__(name)
self._value = value
def generate_next_value(self):
return self._value
class Sweeper(object):
pass
class RandomHyperparameterSweeper(Sweeper):
def __init__(self, hyperparameters=None, default_kwargs=None):
if default_kwargs is None:
default_kwargs = {}
self._hyperparameters = hyperparameters or []
self._validate_hyperparameters()
self._default_kwargs = default_kwargs
def _validate_hyperparameters(self):
names = set()
for hp in self._hyperparameters:
name = hp.name
if name in names:
raise Exception("Hyperparameter '{0}' already added.".format(
name))
names.add(name)
def set_default_parameters(self, default_kwargs):
self._default_kwargs = default_kwargs
def generate_random_hyperparameters(self):
hyperparameters = {}
for hp in self._hyperparameters:
hyperparameters[hp.name] = hp.generate()
hyperparameters = ppp.dot_map_dict_to_nested_dict(hyperparameters)
return ppp.merge_recursive_dicts(
hyperparameters,
copy.deepcopy(self._default_kwargs),
ignore_duplicate_keys_in_second_dict=True,
)
def sweep_hyperparameters(self, function, num_configs):
returned_value_and_params = []
for _ in range(num_configs):
kwargs = self.generate_random_hyperparameters()
score = function(**kwargs)
returned_value_and_params.append((score, kwargs))
return returned_value_and_params
class DeterministicHyperparameterSweeper(Sweeper):
"""
Do a grid search over hyperparameters based on a predefined set of
hyperparameters.
"""
def __init__(self, hyperparameters, default_parameters=None):
"""
:param hyperparameters: A dictionary of the form
```
{
'hp_1': [value1, value2, value3],
'hp_2': [value1, value2, value3],
...
}
```
This format is like the param_grid in SciKit-Learn:
http://scikit-learn.org/stable/modules/grid_search.html#exhaustive-grid-search
:param default_parameters: Default key-value pairs to add to the
dictionary.
"""
self._hyperparameters = hyperparameters
self._default_kwargs = default_parameters or {}
named_hyperparameters = []
for name, values in self._hyperparameters.items():
named_hyperparameters.append(
[(name, v) for v in values]
)
self._hyperparameters_dicts = [
ppp.dot_map_dict_to_nested_dict(dict(tuple_list))
for tuple_list in itertools.product(*named_hyperparameters)
]
def iterate_hyperparameters(self):
"""
Iterate over the hyperparameters in a grid-manner.
:return: List of dictionaries. Each dictionary is a map from name to
hyperpameter.
"""
return [
ppp.merge_recursive_dicts(
hyperparameters,
copy.deepcopy(self._default_kwargs),
ignore_duplicate_keys_in_second_dict=True,
)
for hyperparameters in self._hyperparameters_dicts
]
# TODO(vpong): Test this
class DeterministicSweeperCombiner(object):
"""
A simple wrapper to combiner multiple DeterministicHyperParameterSweeper's
"""
def __init__(self, sweepers: List[DeterministicHyperparameterSweeper]):
self._sweepers = sweepers
def iterate_list_of_hyperparameters(self):
"""
Usage:
```
sweeper1 = DeterministicHyperparameterSweeper(...)
sweeper2 = DeterministicHyperparameterSweeper(...)
combiner = DeterministicSweeperCombiner([sweeper1, sweeper2])
for params_1, params_2 in combiner.iterate_list_of_hyperparameters():
# param_1 = {...}
# param_2 = {...}
```
:return: Generator of hyperparameters, in the same order as provided
sweepers.
"""
return itertools.product(
sweeper.iterate_hyperparameters()
for sweeper in self._sweepers
) | mit | -2,940,930,123,126,832,600 | 30.109649 | 86 | 0.597998 | false |
monkeysecurity/aws_api_classifier | apiapi.py | 1 | 2868 | """
Usage:
apiapi.py (all|mutating) [--csv=output_file]
"""
import csv
from policyuniverse import global_permissions
import re
from tabulate import tabulate
TAGS = {
'DATA_PLANE': ['object', 'bucket'],
'CONTROL_PLANE': ['policy', 'attribute', 'permission'],
'MUTATING': ['create', 'delete', 'modify', 'add', 'remove', 'set', 'update', 'put'],
'READ': ['get', 'view', 'list', 'describe'],
'SIDE_EFFECT': ['start', 'stop', 'export', 'request', 'resend', 'cancel', 'continue', 'estimate', 'execute', 'preview']
}
permissions = dict()
for service_name, service_description in global_permissions.items():
service = service_description['StringPrefix']
permissions[service] = dict()
for action in service_description['Actions']:
action_words = re.findall('[A-Z][^A-Z]*', action)
action_words = [word.lower() for word in action_words]
permissions[service][action] = set()
for tag_name, matches in TAGS.items():
for match in matches:
try:
if match in action_words:
permissions[service][action].add(tag_name)
except IndexError:
if action.lower().startswith(match):
permissions[service][action].add(tag_name)
headers = ['service', 'permission']
headers.extend(TAGS.keys())
def create_permissions_table():
rows = []
for service, actions in permissions.items():
for action, tags in actions.items():
row = [service, action]
for tag in TAGS.keys():
row.append(tag in tags)
rows.append(row)
return rows
def create_mutating_table():
""" Filters permissions by MUTATING or SIDE_EFFECT tag. """
rows = []
for service, actions in permissions.items():
for action, tags in actions.items():
row = [service, action]
for tag in TAGS.keys():
row.append(tag in tags)
# CONTROL_PLANE && (MUTATING or SIDE_EFFECT)
# if 'CONTROL_PLANE' in tags:
if 'MUTATING' in tags:
rows.append(row)
if 'SIDE_EFFECT' in tags:
rows.append(row)
return rows
def output_csv(filename, rows):
with open(filename, 'wb') as csvfile:
csv_writer = csv.writer(csvfile)
csv_writer.writerow(headers)
for row in rows:
csv_writer.writerow(row)
if __name__ == '__main__':
from docopt import docopt
args = docopt(__doc__, version="APIAPI 1.0")
if args.get('mutating'):
rows = create_mutating_table()
elif args.get('all'):
rows = create_permissions_table()
filename = args.get('--csv')
if filename:
output_csv(filename, rows)
else:
print tabulate(rows, headers=headers)
| apache-2.0 | 6,818,567,263,265,383,000 | 28.875 | 123 | 0.57113 | false |
mhogg/bonemapy | bonemapy/elementTypes.py | 1 | 6798 | # -*- coding: utf-8 -*-
# Copyright (C) 2013 Michael Hogg
# This file is part of bonemapy - See LICENSE.txt for information on usage and redistribution
import numpy as np
from abaqusConstants import C3D4, C3D4H, C3D10, C3D10H, C3D10I, C3D10M, C3D10MH
# ~~~~~~~~~~
class elementC3D4():
def __init__(self):
self.name = 'C3D4'
self.desc = 'Linear tetrahedral element'
self.numNodes = 4
self.setNumIntPnts()
self.setIpnums()
self.setIpcs()
self.evalNips()
def setNumIntPnts(self):
self.numIntPnts = 1
def setIpnums(self):
self.ipnums = np.array([i+1 for i in range(self.numIntPnts)])
def setIpcs(self):
g = h = r = 0.25
self.ipcs = np.array([[g,h,r]])
def getN(self,ipc):
g,h,r = ipc
N1 = (1.0-g-h-r)
N2 = g
N3 = h
N4 = r
return np.array([N1,N2,N3,N4])
def evalNips(self):
self.Nips = np.array([self.getN(ip) for ip in self.ipcs])
def interp(self,N,nv):
return np.dot(N,nv)
def getIntPointValues(self,nv):
ipv = [self.interp(N,nv) for N in self.Nips]
return ipv[0]
def setNodeCoords(self):
self.nc = np.array([[ 0.0, 0.0, 0.0],
[ 1.0, 0.0, 0.0],
[ 0.0, 1.0, 0.0],
[ 0.0, 0.0, 1.0]])
# ~~~~~~~~~~
class elementC3D4H(elementC3D4):
def __init__(self):
elementC3D4.__init__(self)
self.name = 'C3D4H'
self.desc = 'Linear tetrahedral element with hybrid formulation'
# ~~~~~~~~~~
class elementC3D10():
def __init__(self):
self.name = 'C3D10'
self.desc = 'Quadratic tetrahedral element'
self.numNodes = 10
self.setNumIntPnts()
self.setIpnums()
self.setIpcs()
self.evalNips()
def setNumIntPnts(self):
self.numIntPnts = 4
def setIpnums(self):
self.ipnums = np.array([i+1 for i in range(self.numIntPnts)])
def setIpcs(self):
alpha = 0.1381966
beta = 0.5854102
self.ipcs = np.array([[alpha,alpha,alpha],
[beta, alpha,alpha],
[alpha,beta, alpha],
[alpha,alpha,beta ]])
def getN(self,ipc):
g,h,r = ipc
N1 = (2.0*(1.0-g-h-r)-1.0)*(1.0-g-h-r)
N2 = (2.0*g-1.0)*g
N3 = (2.0*h-1.0)*h
N4 = (2.0*r-1.0)*r
N5 = 4.0*(1.0-g-h-r)*g
N6 = 4.0*g*h
N7 = 4.0*(1.0-g-h-r)*h
N8 = 4.0*(1.0-g-h-r)*r
N9 = 4.0*g*r
N10 = 4.0*h*r
return np.array([N1,N2,N3,N4,N5,N6,N7,N8,N9,N10])
def evalNips(self):
self.Nips = np.array([self.getN(ip) for ip in self.ipcs])
def interp(self,N,nv):
return np.dot(N,nv)
def getIntPointValues(self,nv):
ipv = [self.interp(N,nv) for N in self.Nips]
return np.array(ipv)
def setNodeCoords(self):
self.nc = np.array([[ 0.0, 0.0, 0.0],
[ 1.0, 0.0, 0.0],
[ 0.0, 1.0, 0.0],
[ 0.0, 0.0, 1.0],
[ 0.5, 0.0, 0.0],
[ 0.5, 0.5, 0.0],
[ 0.0, 0.5, 0.0],
[ 0.0, 0.0, 0.5],
[ 0.5, 0.0, 0.5],
[ 0.0, 0.5, 0.5]])
# ~~~~~~~~~~
class elementC3D10M(elementC3D10):
def __init__(self):
elementC3D10.__init__(self)
self.name = 'C3D10M'
self.desc = 'Quadratic tetrahedral element with modified formulation'
def setIpcs(self):
alpha = 0.1770833333
beta = 0.4687500000
self.ipcs = np.array([[alpha,alpha,alpha],
[beta, alpha,alpha],
[alpha,beta, alpha],
[alpha,alpha,beta ]])
# ~~~~~~~~~~
class elementC3D10H(elementC3D10):
def __init__(self):
elementC3D10.__init__(self)
self.name = 'C3D10H'
self.desc = 'Quadratic tetrahedral element with hybrid formulation'
# ~~~~~~~~~~
class elementC3D10MH(elementC3D10M):
def __init__(self):
elementC3D10M.__init__(self)
self.name = 'C3D10MH'
self.desc = 'Quadratic tetrahedral element with modified hybrid formulation'
# ~~~~~~~~~~
class elementC3D10I(elementC3D10):
def __init__(self):
elementC3D10.__init__(self)
self.name = 'C3D10I'
self.desc = 'Quadratic tetrahedral element with imporved surface stress formulation'
def setNumIntPnts(self):
self.numIntPnts = 11
def setIpcs(self):
# From manual: For the general-purpose C3D10I 10-node tetrahedra ... improved stress
# visualization is obtained through an 11-point integration rule, consisting of 10
# integration points at the elements' nodes and one integration point at their centroid.
self.ipcs = np.array([[0.00,0.00,0.00],
[1.00,0.00,0.00],
[0.00,1.00,0.00],
[0.00,0.00,1.00],
[0.50,0.00,0.00],
[0.50,0.50,0.00],
[0.00,0.50,0.00],
[0.00,0.00,0.50],
[0.50,0.00,0.50],
[0.00,0.50,0.50],
[0.25,0.25,0.25]])
# ~~~~~~~~~~
# Supported element types
seTypes = {}
seTypes[C3D4] = elementC3D4
seTypes[C3D4H] = elementC3D4H
seTypes[C3D10] = elementC3D10
seTypes[C3D10H] = elementC3D10H
seTypes[C3D10I] = elementC3D10I
seTypes[C3D10M] = elementC3D10M
seTypes[C3D10MH] = elementC3D10MH
| mit | -8,532,450,555,790,286,000 | 32.487685 | 130 | 0.416887 | false |
childsish/sofia | templates/genomics/steps/sequence_set.py | 1 | 2159 | import gzip
from sofia.step import Step
from lhc.binf.sequence.reverse_complement import reverse_complement
try:
import pysam
def get_fasta_set(filename):
return pysam.FastaFile(filename)
except ImportError:
from lhc.io.fasta import FastaInOrderAccessSet
def get_fasta_set(filename):
fileobj = gzip.open(filename, 'rt') if filename.endswith('.gz') else open(filename, encoding='utf-8')
return FastaInOrderAccessSet(fileobj)
class GetDownstream1000(Step):
"""
Get the sequence from the given genomic position to 1000 nucleotides downstream.
"""
IN = ['fasta_file', 'genomic_position', 'major_transcript']
OUT = ['downstream_1000']
def __init__(self):
self.fasta_set = None
def consume_input(self, input):
copy = {
'fasta_file': input['fasta_file'][0],
'genomic_position': input['genomic_position'][:],
'major_transcript': input['major_transcript'][:]
}
del input['genomic_position'][:]
del input['major_transcript'][:]
return copy
def run(self, fasta_file, genomic_position, major_transcript):
for position, transcript in zip(genomic_position, major_transcript):
if self.fasta_set is None:
self.fasta_set = get_fasta_set(fasta_file)
if transcript is None:
yield None
continue
chr = str(position.chromosome)
pos = position.position
strand = transcript.strand
start = pos if strand == '+' else pos - 1000
stop = pos if strand == '-' else pos + 1000
seq = self.fasta_set.fetch(chr, start, stop)
yield seq if strand == '+' else reverse_complement(seq)
@classmethod
def get_out_resolvers(cls):
return {
'sync': cls.resolve_out_sync
}
@classmethod
def resolve_out_sync(cls, ins):
if ins['genomic_position'] != ins['major_transcript']:
raise ValueError('unable to resolve sync stream')
return {
'downstream_1000': ins['genomic_position']
}
| gpl-2.0 | 952,286,789,581,727,600 | 31.712121 | 109 | 0.597962 | false |
funkring/fdoo | addons-funkring/report_aeroo/barcodes/write.py | 1 | 8543 | # -*- coding: utf-8 -*-
"""
Barcodes for Python - Module for writing.
Copyright 2009 Peter Gebauer
Cairo backend for writing barcodes.
"Barcodes for Python" is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
"Barcodes for Python" is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
(see file COPYING) along with "Barcodes for Python".
If not, see <http://www.gnu.org/licenses/>.
"""
import warnings
import StringIO
import cairo
CAIRO_RESOLUTION_PER_INCH = 72.0
CAIRO_RESOLUTION_PER_M = 72.0 / 0.0254
CAIRO_RESOLUTION_PER_MM = 72.0 / 25.4
COLOR_BLACK = (0.0, 0.0, 0.0, 1.0)
COLOR_WHITE = (1.0, 1.0, 1.0, 1.0)
class Render(object):
"""
A render object hold some information on colors and geometries needed
when rendering a barcode.
A render implementation needs the ability to write to SVG, PNG and PostScript.
All colors used must be four float tuples.
"""
def __init__(self, barcode, **kwargs):
"""
Valid keywords:
color_on Defaults to COLOR_BLACK.
color_off Defaults to None. (i.e not drawn)
color_bg Defaults to COLOR_WHITE.
color_margin Defaults to COLOR_WHITE.
color_text Defaults to COLOR_BLACK.
margin_left Defaults to 0.
margin_right Defaults to 0.
margin_up Defaults to 0.
margin_down Defaults to 0.
margin Write-only, sets all margins.
color_debug_rect Just for debugging, defaults to None.
text_top Render text above bars, defaults to False.
Any color with None will ommit rendering.
"""
self.barcode = barcode
self.color_on = kwargs.get("color_on", COLOR_BLACK)
self.color_off = kwargs.get("color_off", None)
self.color_bg = kwargs.get("color_bg", COLOR_WHITE)
self.color_margin = kwargs.get("color_margin", COLOR_WHITE)
self.color_text = kwargs.get("color_text", COLOR_BLACK)
self.color_debug_rect = kwargs.get("color_debug_rect")
self.text_top = kwargs.get("text_top", False)
self.set_margin(kwargs.get("margin", 0.0))
if "margin_left" in kwargs:
self.margin_left = kwargs.get("margin_left", 0.0)
if "margin_right" in kwargs:
self.margin_right = kwargs.get("margin_right", 0.0)
if "margin_top" in kwargs:
self.margin_top = kwargs.get("margin_top", 0.0)
if "margin_bottom" in kwargs:
self.margin_bottom = kwargs.get("margin_bottom", 0.0)
def set_margin(self, margin):
self.margin_left = margin
self.margin_right = margin
self.margin_top = margin
self.margin_bottom = margin
def get_margin(self):
raise NotImplementedError("cannot get general margin")
margin = property(get_margin, set_margin)
def has_svg_support(self):
return False
def get_svg(self, width, height):
"""
Should return SVG XML as a string.
"""
raise NotImplementedError()
def has_png_support(self):
return False
def get_png(self, width, height):
"""
Should return PNG binary data as a string.
"""
raise NotImplementedError()
def has_postscript_support(self):
return False
def get_postscript(self):
"""
Should return PostScript data as a string.
"""
raise NotImplementedError()
def _save(self, filename, width, height, data_func):
f = file(filename, "w")
f.write(data_func(width, height))
f.close()
def save_svg(self, filename, width, height):
self._save(filename, width, height, self.get_svg)
def save_png(self, filename, width, height):
self._save(filename, width, height, self.get_png)
def save_postscript(self, filename, width, height):
self._save(filename, width, height, self.get_postscript)
class CairoRender(Render):
"""
A rendering type for Cairo.
"""
def __init__(self, barcode, **kwargs):
Render.__init__(self, barcode, **kwargs)
def has_svg_support(self):
return True
def write_svg(self, io, width, height):
surface = cairo.SVGSurface(io, width, height)
context = cairo.Context(surface)
self.render_barcode_to_cairo_context(context, 0.0, 0.0, width, height)
surface.finish()
return io
def get_svg(self, width, height):
io = StringIO.StringIO()
surface = cairo.SVGSurface(io, width, height)
context = cairo.Context(surface)
self.render_barcode_to_cairo_context(context, 0.0, 0.0, width, height)
surface.finish()
return io.getvalue()
def get_png(self, width, height):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
context = cairo.Context(surface)
self.render_barcode_to_cairo_context(context, 0.0, 0.0, width, height)
io = StringIO.StringIO()
surface.write_to_png(io)
surface.finish()
return io.getvalue()
def render_barcode_to_cairo_context(self, context, ax, ay, aw, ah, barcode = None):
if not barcode:
barcode = self.barcode
context.save()
context.identity_matrix()
font_options = cairo.FontOptions()
font_options.set_hint_style(cairo.HINT_STYLE_NONE)
font_options.set_hint_metrics(cairo.HINT_METRICS_OFF)
context.reset_clip()
context.set_font_options(font_options)
context.rectangle(ax, ay, aw, ah)
context.clip()
if self.color_margin:
self.render_rectangle_to_cairo_context(context, ax, ay, aw, ah, self.color_margin)
ax += self.margin_left
ay += self.margin_top
aw -= (self.margin_right + self.margin_left)
ah -= (self.margin_bottom + self.margin_top)
context.reset_clip()
context.rectangle(ax, ay, aw, ah)
context.clip()
if self.color_bg:
self.render_rectangle_to_cairo_context(context, ax, ay, aw, ah, self.color_bg)
for index, (bx, by, bw, bh, value) in enumerate(barcode.get_bar_geometries()):
if self.text_top:
by = 1.0 - bh
rx, ry, rw, rh = ax + bx * aw, ay + by * ah, bw * aw, bh * ah
if value and self.color_on:
self.render_rectangle_to_cairo_context(context, rx, ry, rw, rh, self.color_on)
elif not value and self.color_off:
self.render_rectangle_to_cairo_context(context, rx, ry, rw, rh, self.color_off)
for index, (bx, by, bw, bh, text) in enumerate(barcode.get_unicode_geometries()):
if self.text_top:
by = 0.0
rx, ry, rw, rh = ax + bx * aw, ay + by * ah, bw * aw, bh * ah
self.render_text_to_cairo_context(context, rx, ry, rw, rh, text, self.color_text)
context.restore()
def render_rectangle_to_cairo_context(self, context, rx, ry, rw, rh, color):
context.save()
context.set_source_rgba(*color)
context.set_line_width(0.0)
context.rectangle(rx, ry, rw, rh)
context.fill()
context.restore()
def render_text_to_cairo_context(self, context, rx, ry, rw, rh, text, color):
context.save()
context.identity_matrix()
if self.color_debug_rect:
context.set_line_width(1.0)
context.set_source_rgba(*self.color_debug_rect)
context.rectangle(rx, ry, rw, rh)
context.stroke()
context.set_source_rgba(*color)
context.set_font_size(rh * 1.2)
tx, ty, text_width, text_height = context.text_extents(text)[:4]
if text_width > rw:
scalew = rw / float(text_width)
else:
scalew = 1.0
if text_height > rh:
scaleh = rh / float(text_height)
else:
scaleh = 1.0
context.move_to(rx + rw * 0.5 - text_width * 0.5 * scalew - tx * scalew, ry + rh)
context.scale(scalew, scaleh)
context.show_text(text)
context.stroke()
context.restore()
| agpl-3.0 | -8,345,297,074,543,309,000 | 35.508547 | 95 | 0.606813 | false |
TC01/AutoCrab | AutoCrab2/python/crabutil.py | 1 | 1225 | """
Utility functions for working with CRAB. Used by most
of the autocrab commands.
"""
import os
def getCrabConfig(dir):
"""Retrieves backup copy of crab config file in directory."""
path = os.path.join(dir, "share", "crab.cfg")
return path
def getCrabLog(dir):
path = os.path.join(dir, "log", "crab.log")
return path
def isCrabDirectory(dir):
"""Check if a directory is a working directory generated by crab."""
isDirectory = True
if not os.path.exists(os.path.join(dir, "job")):
isDirectory = False
if not os.path.exists(os.path.join(dir, "res")):
isDirectory = False
if not os.path.exists(os.path.join(dir, "log")):
isDirectory = False
if not os.path.exists(os.path.join(dir, "share")):
isDirectory = False
return isDirectory
def isCrabConfig(filename):
"""Check if a file is a legal crab config file."""
valid = False
if ".cfg" in filename:
valid = True
configFile = open(filename)
text = configFile.read()
# Make sure we have the right blocks inside.
if not ("[CRAB]" in text and "[CMSSW]" in text and "[USER]" in text):
valid = False
# Make sure we're not an autocrab template config file
if "$INS_SHORTNAME" in text or "$INS_LONGNAME" in text:
valid = False
return valid
| mit | 1,058,610,956,331,786,500 | 25.06383 | 70 | 0.69551 | false |
tritoanst/ccxt | python/ccxt/bxinth.py | 1 | 8476 | # -*- coding: utf-8 -*-
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
class bxinth (Exchange):
def describe(self):
return self.deep_extend(super(bxinth, self).describe(), {
'id': 'bxinth',
'name': 'BX.in.th',
'countries': 'TH', # Thailand
'rateLimit': 1500,
'hasCORS': False,
'hasFetchTickers': True,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766412-567b1eb4-5ed7-11e7-94a8-ff6a3884f6c5.jpg',
'api': 'https://bx.in.th/api',
'www': 'https://bx.in.th',
'doc': 'https://bx.in.th/info/api',
},
'api': {
'public': {
'get': [
'', # ticker
'options',
'optionbook',
'orderbook',
'pairing',
'trade',
'tradehistory',
],
},
'private': {
'post': [
'balance',
'biller',
'billgroup',
'billpay',
'cancel',
'deposit',
'getorders',
'history',
'option-issue',
'option-bid',
'option-sell',
'option-myissue',
'option-mybid',
'option-myoptions',
'option-exercise',
'option-cancel',
'option-history',
'order',
'withdrawal',
'withdrawal-history',
],
},
},
'fees': {
'trading': {
'taker': 0.25 / 100,
'maker': 0.25 / 100,
},
},
})
def fetch_markets(self):
markets = self.publicGetPairing()
keys = list(markets.keys())
result = []
for p in range(0, len(keys)):
market = markets[keys[p]]
id = str(market['pairing_id'])
base = market['secondary_currency']
quote = market['primary_currency']
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'info': market,
})
return result
def common_currency_code(self, currency):
# why would they use three letters instead of four for currency codes
if currency == 'DAS':
return 'DASH'
if currency == 'DOG':
return 'DOGE'
return currency
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostBalance()
balance = response['balance']
result = {'info': balance}
currencies = list(balance.keys())
for c in range(0, len(currencies)):
currency = currencies[c]
code = self.common_currency_code(currency)
account = {
'free': float(balance[currency]['available']),
'used': 0.0,
'total': float(balance[currency]['total']),
}
account['used'] = account['total'] - account['free']
result[code] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, params={}):
self.load_markets()
orderbook = self.publicGetOrderbook(self.extend({
'pairing': self.market_id(symbol),
}, params))
return self.parse_order_book(orderbook)
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market:
symbol = market['symbol']
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': float(ticker['orderbook']['bids']['highbid']),
'ask': float(ticker['orderbook']['asks']['highbid']),
'vwap': None,
'open': None,
'close': None,
'first': None,
'last': float(ticker['last_price']),
'change': float(ticker['change']),
'percentage': None,
'average': None,
'baseVolume': float(ticker['volume_24hours']),
'quoteVolume': None,
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
tickers = self.publicGet(params)
result = {}
ids = list(tickers.keys())
for i in range(0, len(ids)):
id = ids[i]
ticker = tickers[id]
market = self.markets_by_id[id]
symbol = market['symbol']
result[symbol] = self.parse_ticker(ticker, market)
return result
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
tickers = self.publicGet(self.extend({
'pairing': market['id'],
}, params))
id = str(market['id'])
ticker = tickers[id]
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market):
timestamp = self.parse8601(trade['trade_date'])
return {
'id': trade['trade_id'],
'info': trade,
'order': trade['order_id'],
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': trade['trade_type'],
'price': float(trade['rate']),
'amount': float(trade['amount']),
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
response = self.publicGetTrade(self.extend({
'pairing': market['id'],
}, params))
return self.parse_trades(response['trades'], market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
response = self.privatePostOrder(self.extend({
'pairing': self.market_id(symbol),
'type': side,
'amount': amount,
'rate': price,
}, params))
return {
'info': response,
'id': str(response['order_id']),
}
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
pairing = None # TODO fixme
return self.privatePostCancel({
'order_id': id,
'pairing': pairing,
})
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/'
if path:
url += path + '/'
if params:
url += '?' + self.urlencode(params)
if api == 'private':
self.check_required_credentials()
nonce = self.nonce()
auth = self.apiKey + str(nonce) + self.secret
signature = self.hash(self.encode(auth), 'sha256')
body = self.urlencode(self.extend({
'key': self.apiKey,
'nonce': nonce,
'signature': signature,
# twofa: self.twofa,
}, params))
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
if api == 'public':
return response
if 'success' in response:
if response['success']:
return response
raise ExchangeError(self.id + ' ' + self.json(response))
| mit | -6,969,734,723,890,171,000 | 33.737705 | 126 | 0.461185 | false |
mileistone/test | utils/test/datasets/coco.py | 1 | 18590 | # --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
from datasets.imdb import imdb
import datasets.ds_utils as ds_utils
from fast_rcnn.config import cfg
import os.path as osp
import sys
import os
import numpy as np
import scipy.sparse
import scipy.io as sio
import cPickle
import json
import uuid
# COCO API
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from pycocotools import mask as COCOmask
def _filter_crowd_proposals(roidb, crowd_thresh):
"""
Finds proposals that are inside crowd regions and marks them with
overlap = -1 (for all gt rois), which means they will be excluded from
training.
"""
for ix, entry in enumerate(roidb):
overlaps = entry['gt_overlaps'].toarray()
crowd_inds = np.where(overlaps.max(axis=1) == -1)[0]
non_gt_inds = np.where(entry['gt_classes'] == 0)[0]
if len(crowd_inds) == 0 or len(non_gt_inds) == 0:
continue
iscrowd = [int(True) for _ in xrange(len(crowd_inds))]
crowd_boxes = ds_utils.xyxy_to_xywh(entry['boxes'][crowd_inds, :])
non_gt_boxes = ds_utils.xyxy_to_xywh(entry['boxes'][non_gt_inds, :])
ious = COCOmask.iou(non_gt_boxes, crowd_boxes, iscrowd)
bad_inds = np.where(ious.max(axis=1) > crowd_thresh)[0]
overlaps[non_gt_inds[bad_inds], :] = -1
roidb[ix]['gt_overlaps'] = scipy.sparse.csr_matrix(overlaps)
return roidb
class coco(imdb):
def __init__(self, image_set, year):
imdb.__init__(self, 'coco_' + year + '_' + image_set)
# COCO specific config options
self.config = {'top_k' : 2000,
'use_salt' : True,
'cleanup' : True,
'crowd_thresh' : 0.7,
'rpn_file': None,
'min_size' : 2}
# name, paths
self._year = year
self._image_set = image_set
self._data_path = '/home/shifeng/data/Object_Detection/coco'
# load COCO API, classes, class <-> id mappings
self._COCO = COCO(self._get_ann_file())
cats = self._COCO.loadCats(self._COCO.getCatIds())
self._classes = tuple(['__background__'] + [c['name'] for c in cats])
self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
self._class_to_coco_cat_id = dict(zip([c['name'] for c in cats],
self._COCO.getCatIds()))
self._image_index = self._load_image_set_index()
# Default to roidb handler
self.set_proposal_method('selective_search')
self.competition_mode(False)
# Some image sets are "views" (i.e. subsets) into others.
# For example, minival2014 is a random 5000 image subset of val2014.
# This mapping tells us where the view's images and proposals come from.
self._view_map = {
'minival2014' : 'val2014', # 5k val2014 subset
'valminusminival2014' : 'val2014', # val2014 \setminus minival2014
'test-dev2015' : 'test2015',
}
coco_name = image_set + year # e.g., "val2014"
self._data_name = (self._view_map[coco_name]
if self._view_map.has_key(coco_name)
else coco_name)
# Dataset splits that have ground-truth annotations (test splits
# do not have gt annotations)
self._gt_splits = ('train', 'val', 'minival')
def _get_ann_file(self):
prefix = 'instances' if self._image_set.find('test') == -1 \
else 'image_info'
return osp.join(self._data_path, 'annotations',
prefix + '_' + self._image_set + self._year + '.json')
def _load_image_set_index(self):
"""
Load image ids.
"""
image_ids = self._COCO.getImgIds()
return image_ids
def _get_widths(self):
anns = self._COCO.loadImgs(self._image_index)
widths = [ann['width'] for ann in anns]
return widths
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
# Example image path for index=119993:
# images/train2014/COCO_train2014_000000119993.jpg
file_name = ('COCO_' + self._data_name + '_' +
str(index).zfill(12) + '.jpg')
image_path = osp.join(self._data_path, 'images',
self._data_name, file_name)
assert osp.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def selective_search_roidb(self):
return self._roidb_from_proposals('selective_search')
def edge_boxes_roidb(self):
return self._roidb_from_proposals('edge_boxes_AR')
def mcg_roidb(self):
return self._roidb_from_proposals('MCG')
def rpn_roidb(self):
if (self._image_set != 'val') and ('test' not in self._image_set):
gt_roidb = self.gt_roidb()
rpn_roidb = self._load_rpn_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, rpn_roidb)
else:
roidb = self._load_rpn_roidb(None)
return roidb
def _load_rpn_roidb(self, gt_roidb):
filename = self.config['rpn_file']
print 'loading {}'.format(filename)
assert os.path.exists(filename), \
'rpn data not found at: {}'.format(filename)
with open(filename, 'rb') as f:
box_list = cPickle.load(f)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _roidb_from_proposals(self, method):
"""
Creates a roidb from pre-computed proposals of a particular methods.
"""
top_k = self.config['top_k']
cache_file = osp.join(self.cache_path, self.name +
'_{:s}_top{:d}'.format(method, top_k) +
'_roidb.pkl')
if osp.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{:s} {:s} roidb loaded from {:s}'.format(self.name, method,
cache_file)
return roidb
if self._image_set in self._gt_splits:
gt_roidb = self.gt_roidb()
method_roidb = self._load_proposals(method, gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, method_roidb)
# Make sure we don't use proposals that are contained in crowds
roidb = _filter_crowd_proposals(roidb, self.config['crowd_thresh'])
else:
roidb = self._load_proposals(method, None)
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote {:s} roidb to {:s}'.format(method, cache_file)
return roidb
def _load_proposals(self, method, gt_roidb):
"""
Load pre-computed proposals in the format provided by Jan Hosang:
http://www.mpi-inf.mpg.de/departments/computer-vision-and-multimodal-
computing/research/object-recognition-and-scene-understanding/how-
good-are-detection-proposals-really/
For MCG, use boxes from http://www.eecs.berkeley.edu/Research/Projects/
CS/vision/grouping/mcg/ and convert the file layout using
lib/datasets/tools/mcg_munge.py.
"""
box_list = []
top_k = self.config['top_k']
valid_methods = [
'MCG',
'selective_search',
'edge_boxes_AR',
'edge_boxes_70']
assert method in valid_methods
print 'Loading {} boxes'.format(method)
for i, index in enumerate(self._image_index):
if i % 1000 == 0:
print '{:d} / {:d}'.format(i + 1, len(self._image_index))
box_file = osp.join(
cfg.DATA_DIR, 'coco_proposals', method, 'mat',
self._get_box_file(index))
raw_data = sio.loadmat(box_file)['boxes']
boxes = np.maximum(raw_data - 1, 0).astype(np.uint16)
if method == 'MCG':
# Boxes from the MCG website are in (y1, x1, y2, x2) order
boxes = boxes[:, (1, 0, 3, 2)]
# Remove duplicate boxes and very small boxes and then take top k
keep = ds_utils.unique_boxes(boxes)
boxes = boxes[keep, :]
keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
boxes = boxes[keep, :]
boxes = boxes[:top_k, :]
box_list.append(boxes)
# Sanity check
im_ann = self._COCO.loadImgs(index)[0]
width = im_ann['width']
height = im_ann['height']
ds_utils.validate_boxes(boxes, width=width, height=height)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = osp.join(self.cache_path, self.name + '_gt_roidb.pkl')
if osp.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} gt roidb loaded from {}'.format(self.name, cache_file)
return roidb
gt_roidb = [self._load_coco_annotation(index)
for index in self._image_index]
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote gt roidb to {}'.format(cache_file)
return gt_roidb
def _load_coco_annotation(self, index):
"""
Loads COCO bounding-box instance annotations. Crowd instances are
handled by marking their overlaps (with all categories) to -1. This
overlap value means that crowd "instances" are excluded from training.
"""
im_ann = self._COCO.loadImgs(index)[0]
width = im_ann['width']
height = im_ann['height']
annIds = self._COCO.getAnnIds(imgIds=index, iscrowd=None)
objs = self._COCO.loadAnns(annIds)
# Sanitize bboxes -- some are invalid
valid_objs = []
for obj in objs:
x1 = np.max((0, obj['bbox'][0]))
y1 = np.max((0, obj['bbox'][1]))
x2 = np.min((width - 1, x1 + np.max((0, obj['bbox'][2] - 1))))
y2 = np.min((height - 1, y1 + np.max((0, obj['bbox'][3] - 1))))
if obj['area'] > 0 and x2 >= x1 and y2 >= y1:
obj['clean_bbox'] = [x1, y1, x2, y2]
valid_objs.append(obj)
objs = valid_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
seg_areas = np.zeros((num_objs), dtype=np.float32)
# Lookup table to map from COCO category ids to our internal class
# indices
coco_cat_id_to_class_ind = dict([(self._class_to_coco_cat_id[cls],
self._class_to_ind[cls])
for cls in self._classes[1:]])
for ix, obj in enumerate(objs):
cls = coco_cat_id_to_class_ind[obj['category_id']]
boxes[ix, :] = obj['clean_bbox']
gt_classes[ix] = cls
seg_areas[ix] = obj['area']
if obj['iscrowd']:
# Set overlap to -1 for all classes for crowd objects
# so they will be excluded during training
overlaps[ix, :] = -1.0
else:
overlaps[ix, cls] = 1.0
ds_utils.validate_boxes(boxes, width=width, height=height)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes' : boxes,
'gt_classes': gt_classes,
'gt_overlaps' : overlaps,
'flipped' : False,
'seg_areas' : seg_areas}
def _get_box_file(self, index):
# first 14 chars / first 22 chars / all chars + .mat
# COCO_val2014_0/COCO_val2014_000000447/COCO_val2014_000000447991.mat
file_name = ('COCO_' + self._data_name +
'_' + str(index).zfill(12) + '.mat')
return osp.join(file_name[:14], file_name[:22], file_name)
def _print_detection_eval_metrics(self, coco_eval):
IoU_lo_thresh = 0.5
IoU_hi_thresh = 0.95
def _get_thr_ind(coco_eval, thr):
ind = np.where((coco_eval.params.iouThrs > thr - 1e-5) &
(coco_eval.params.iouThrs < thr + 1e-5))[0][0]
iou_thr = coco_eval.params.iouThrs[ind]
assert np.isclose(iou_thr, thr)
return ind
ind_lo = _get_thr_ind(coco_eval, IoU_lo_thresh)
ind_hi = _get_thr_ind(coco_eval, IoU_hi_thresh)
# precision has dims (iou, recall, cls, area range, max dets)
# area range index 0: all area ranges
# max dets index 2: 100 per image
precision = \
coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, :, 0, 2]
ap_default = np.mean(precision[precision > -1])
print ('~~~~ Mean and per-category AP @ IoU=[{:.2f},{:.2f}] '
'~~~~').format(IoU_lo_thresh, IoU_hi_thresh)
print '{:.1f}'.format(100 * ap_default)
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
# minus 1 because of __background__
precision = coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, cls_ind - 1, 0, 2]
ap = np.mean(precision[precision > -1])
print '{:.1f}'.format(100 * ap)
print '~~~~ Summary metrics ~~~~'
coco_eval.summarize()
cfg.mAP = coco_eval.stats
def _do_detection_eval(self, res_file, output_dir):
ann_type = 'bbox'
coco_dt = self._COCO.loadRes(res_file)
coco_eval = COCOeval(self._COCO, coco_dt)
coco_eval.params.useSegm = (ann_type == 'segm')
coco_eval.evaluate()
coco_eval.accumulate()
self._print_detection_eval_metrics(coco_eval)
eval_file = osp.join(output_dir, 'detection_results.pkl')
with open(eval_file, 'wb') as fid:
cPickle.dump(coco_eval, fid, cPickle.HIGHEST_PROTOCOL)
print 'Wrote COCO eval results to: {}'.format(eval_file)
def _coco_results_one_category(self, boxes, cat_id):
results = []
for im_ind, index in enumerate(self.image_index):
if type(boxes[im_ind]) == list:
continue
dets = boxes[im_ind].astype(np.float)
if dets == []:
continue
# dets = dets[:500, :]
scores = dets[:, -1]
xs = dets[:, 0]
ys = dets[:, 1]
ws = dets[:, 2] - xs + 1
hs = dets[:, 3] - ys + 1
results.extend(
[{'image_id' : index,
'category_id' : cat_id,
'bbox' : [xs[k], ys[k], ws[k], hs[k]],
'score' : scores[k]} for k in xrange(dets.shape[0])])
# if cfg.single_scale_test is True:
# results.extend(
# [{'image_id' : index,
# 'category_id' : cat_id,
# 'bbox' : [xs[k], ys[k], ws[k], hs[k]],
# 'score' : scores[k]} for k in xrange(dets.shape[0])])
# else:
# results.extend(
# [{'image_id' : index,
# 'category_id' : cat_id,
# 'bbox' : [round(xs[k], 2), round(ys[k], 2), round(ws[k], 2), round(hs[k], 2)],
# 'score' : round(scores[k], 3)} for k in xrange(dets.shape[0])])
return results
def _write_coco_results_file(self, all_boxes, res_file):
# [{"image_id": 42,
# "category_id": 18,
# "bbox": [258.15,41.29,348.26,243.78],
# "score": 0.236}, ...]
results = []
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print 'Collecting {} results ({:d}/{:d})'.format(cls, cls_ind,
self.num_classes - 1)
coco_cat_id = self._class_to_coco_cat_id[cls]
results.extend(self._coco_results_one_category(all_boxes[cls_ind],
coco_cat_id))
print 'Writing results json to {}'.format(res_file)
with open(res_file, 'w') as fid:
json.dump(results, fid)
def evaluate_detections(self, all_boxes, output_dir):
res_file = osp.join(output_dir, ('detections_' +
self._image_set +
self._year +
'_results'))
if self.config['use_salt']:
res_file += '_{}'.format(str(uuid.uuid4()))
res_file += '.json'
self._write_coco_results_file(all_boxes, res_file)
# Only do evaluation on non-test sets
if self._image_set.find('test') == -1:
self._do_detection_eval(res_file, output_dir)
# Optionally cleanup results json file
if self.config['cleanup']:
os.remove(res_file)
def competition_mode(self, on):
if on:
self.config['use_salt'] = False
self.config['cleanup'] = False
else:
self.config['use_salt'] = True
self.config['cleanup'] = True
| mit | 8,357,223,769,747,148,000 | 41.132251 | 100 | 0.516138 | false |
praetorian-inc/pentestly | modules/reporting/xml.py | 1 | 1655 | from recon.core.module import BaseModule
from dicttoxml import dicttoxml
from xml.dom.minidom import parseString
import codecs
import os
class Module(BaseModule):
meta = {
'name': 'XML Report Generator',
'author': 'Eric Humphries (@e2fsck) and Tim Tomes (@LaNMaSteR53)',
'version': 'v0.0.2',
'description': 'Creates a XML report.',
'options': (
('tables', 'hosts, contacts, credentials', True, 'comma delineated list of tables'),
('filename', os.path.join(BaseModule.workspace, 'results.xml'), True, 'path and filename for report output'),
),
}
def module_run(self):
filename = self.options['filename']
with codecs.open(filename, 'wb', encoding='utf-8') as outfile:
# build a list of table names
tables = [x.strip() for x in self.options['tables'].split(',')]
data_dict = {}
cnt = 0
for table in tables:
data_dict[table] = []
columns = [x[0] for x in self.get_columns(table)]
rows = self.query('SELECT "%s" FROM "%s" ORDER BY 1' % ('", "'.join(columns), table))
for row in rows:
row_dict = {}
for i in range(0,len(columns)):
row_dict[columns[i]] = row[i]
data_dict[table].append(row_dict)
cnt += 1
# write the xml to a file
reparsed = parseString(dicttoxml(data_dict))
outfile.write(reparsed.toprettyxml(indent=' '*4))
self.output('%d records added to \'%s\'.' % (cnt, filename))
| gpl-3.0 | 5,376,551,073,993,372,000 | 40.375 | 121 | 0.538973 | false |
micahcochran/geopandas | geopandas/tools/tests/test_sjoin.py | 1 | 10287 | from __future__ import absolute_import
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from shapely.geometry import Point, Polygon
import geopandas
from geopandas import GeoDataFrame, GeoSeries, read_file, base
from geopandas import sjoin
import pytest
from pandas.util.testing import assert_frame_equal
pandas_0_18_problem = 'fails under pandas < 0.19 due to pandas issue 15692,'\
'not problem with sjoin.'
@pytest.fixture()
def dfs(request):
polys1 = GeoSeries(
[Polygon([(0, 0), (5, 0), (5, 5), (0, 5)]),
Polygon([(5, 5), (6, 5), (6, 6), (5, 6)]),
Polygon([(6, 0), (9, 0), (9, 3), (6, 3)])])
polys2 = GeoSeries(
[Polygon([(1, 1), (4, 1), (4, 4), (1, 4)]),
Polygon([(4, 4), (7, 4), (7, 7), (4, 7)]),
Polygon([(7, 7), (10, 7), (10, 10), (7, 10)])])
df1 = GeoDataFrame({'geometry': polys1, 'df1': [0, 1, 2]})
df2 = GeoDataFrame({'geometry': polys2, 'df2': [3, 4, 5]})
if request.param == 'string-index':
df1.index = ['a', 'b', 'c']
df2.index = ['d', 'e', 'f']
# construction expected frames
expected = {}
part1 = df1.copy().reset_index().rename(
columns={'index': 'index_left'})
part2 = df2.copy().iloc[[0, 1, 1, 2]].reset_index().rename(
columns={'index': 'index_right'})
part1['_merge'] = [0, 1, 2]
part2['_merge'] = [0, 0, 1, 3]
exp = pd.merge(part1, part2, on='_merge', how='outer')
expected['intersects'] = exp.drop('_merge', axis=1).copy()
part1 = df1.copy().reset_index().rename(
columns={'index': 'index_left'})
part2 = df2.copy().reset_index().rename(
columns={'index': 'index_right'})
part1['_merge'] = [0, 1, 2]
part2['_merge'] = [0, 3, 3]
exp = pd.merge(part1, part2, on='_merge', how='outer')
expected['contains'] = exp.drop('_merge', axis=1).copy()
part1['_merge'] = [0, 1, 2]
part2['_merge'] = [3, 1, 3]
exp = pd.merge(part1, part2, on='_merge', how='outer')
expected['within'] = exp.drop('_merge', axis=1).copy()
return [request.param, df1, df2, expected]
@pytest.mark.skipif(not base.HAS_SINDEX, reason='Rtree absent, skipping')
class TestSpatialJoin:
@pytest.mark.parametrize('dfs', ['default-index', 'string-index'],
indirect=True)
@pytest.mark.parametrize('op', ['intersects', 'contains', 'within'])
def test_inner(self, op, dfs):
index, df1, df2, expected = dfs
res = sjoin(df1, df2, how='inner', op=op)
exp = expected[op].dropna().copy()
exp = exp.drop('geometry_y', axis=1).rename(
columns={'geometry_x': 'geometry'})
exp[['df1', 'df2']] = exp[['df1', 'df2']].astype('int64')
if index == 'default-index':
exp[['index_left', 'index_right']] = \
exp[['index_left', 'index_right']].astype('int64')
exp = exp.set_index('index_left')
exp.index.name = None
assert_frame_equal(res, exp)
@pytest.mark.parametrize('dfs', ['default-index', 'string-index'],
indirect=True)
@pytest.mark.parametrize('op', ['intersects', 'contains', 'within'])
def test_left(self, op, dfs):
index, df1, df2, expected = dfs
res = sjoin(df1, df2, how='left', op=op)
exp = expected[op].dropna(subset=['index_left']).copy()
exp = exp.drop('geometry_y', axis=1).rename(
columns={'geometry_x': 'geometry'})
exp['df1'] = exp['df1'].astype('int64')
if index == 'default-index':
exp['index_left'] = exp['index_left'].astype('int64')
# TODO: in result the dtype is object
res['index_right'] = res['index_right'].astype(float)
exp = exp.set_index('index_left')
exp.index.name = None
assert_frame_equal(res, exp)
@pytest.mark.parametrize('dfs', ['default-index', 'string-index'],
indirect=True)
@pytest.mark.parametrize('op', ['intersects', 'contains', 'within'])
def test_right(self, op, dfs):
index, df1, df2, expected = dfs
res = sjoin(df1, df2, how='right', op=op)
exp = expected[op].dropna(subset=['index_right']).copy()
exp = exp.drop('geometry_x', axis=1).rename(
columns={'geometry_y': 'geometry'})
exp['df2'] = exp['df2'].astype('int64')
if index == 'default-index':
exp['index_right'] = exp['index_right'].astype('int64')
res['index_left'] = res['index_left'].astype(float)
exp = exp.set_index('index_right')
exp = exp.reindex(columns=res.columns)
assert_frame_equal(res, exp, check_index_type=False)
@pytest.mark.skipif(not base.HAS_SINDEX, reason='Rtree absent, skipping')
class TestSpatialJoinNYBB:
def setup_method(self):
nybb_filename = geopandas.datasets.get_path('nybb')
self.polydf = read_file(nybb_filename)
self.crs = self.polydf.crs
N = 20
b = [int(x) for x in self.polydf.total_bounds]
self.pointdf = GeoDataFrame(
[{'geometry': Point(x, y),
'pointattr1': x + y, 'pointattr2': x - y}
for x, y in zip(range(b[0], b[2], int((b[2]-b[0])/N)),
range(b[1], b[3], int((b[3]-b[1])/N)))],
crs=self.crs)
def test_geometry_name(self):
# test sjoin is working with other geometry name
polydf_original_geom_name = self.polydf.geometry.name
self.polydf = (self.polydf.rename(columns={'geometry': 'new_geom'})
.set_geometry('new_geom'))
assert polydf_original_geom_name != self.polydf.geometry.name
res = sjoin(self.polydf, self.pointdf, how="left")
assert self.polydf.geometry.name == res.geometry.name
def test_sjoin_left(self):
df = sjoin(self.pointdf, self.polydf, how='left')
assert df.shape == (21, 8)
for i, row in df.iterrows():
assert row.geometry.type == 'Point'
assert 'pointattr1' in df.columns
assert 'BoroCode' in df.columns
def test_sjoin_right(self):
# the inverse of left
df = sjoin(self.pointdf, self.polydf, how="right")
df2 = sjoin(self.polydf, self.pointdf, how="left")
assert df.shape == (12, 8)
assert df.shape == df2.shape
for i, row in df.iterrows():
assert row.geometry.type == 'MultiPolygon'
for i, row in df2.iterrows():
assert row.geometry.type == 'MultiPolygon'
def test_sjoin_inner(self):
df = sjoin(self.pointdf, self.polydf, how="inner")
assert df.shape == (11, 8)
def test_sjoin_op(self):
# points within polygons
df = sjoin(self.pointdf, self.polydf, how="left", op="within")
assert df.shape == (21, 8)
assert df.ix[1]['BoroName'] == 'Staten Island'
# points contain polygons? never happens so we should have nulls
df = sjoin(self.pointdf, self.polydf, how="left", op="contains")
assert df.shape == (21, 8)
assert np.isnan(df.ix[1]['Shape_Area'])
def test_sjoin_bad_op(self):
# AttributeError: 'Point' object has no attribute 'spandex'
with pytest.raises(ValueError):
sjoin(self.pointdf, self.polydf, how="left", op="spandex")
def test_sjoin_duplicate_column_name(self):
pointdf2 = self.pointdf.rename(columns={'pointattr1': 'Shape_Area'})
df = sjoin(pointdf2, self.polydf, how="left")
assert 'Shape_Area_left' in df.columns
assert 'Shape_Area_right' in df.columns
def test_sjoin_values(self):
# GH190
self.polydf.index = [1, 3, 4, 5, 6]
df = sjoin(self.pointdf, self.polydf, how='left')
assert df.shape == (21, 8)
df = sjoin(self.polydf, self.pointdf, how='left')
assert df.shape == (12, 8)
@pytest.mark.skipif(str(pd.__version__) < LooseVersion('0.19'),
reason=pandas_0_18_problem)
@pytest.mark.xfail
def test_no_overlapping_geometry(self):
# Note: these tests are for correctly returning GeoDataFrame
# when result of the join is empty
df_inner = sjoin(self.pointdf.iloc[17:], self.polydf, how='inner')
df_left = sjoin(self.pointdf.iloc[17:], self.polydf, how='left')
df_right = sjoin(self.pointdf.iloc[17:], self.polydf, how='right')
# Recent Pandas development has introduced a new way of handling merges
# this change has altered the output when no overlapping geometries
if str(pd.__version__) > LooseVersion('0.18.1'):
right_idxs = pd.Series(range(0, 5), name='index_right',
dtype='int64')
else:
right_idxs = pd.Series(name='index_right', dtype='int64')
expected_inner_df = pd.concat(
[self.pointdf.iloc[:0],
pd.Series(name='index_right', dtype='int64'),
self.polydf.drop('geometry', axis=1).iloc[:0]],
axis=1)
expected_inner = GeoDataFrame(
expected_inner_df, crs={'init': 'epsg:4326', 'no_defs': True})
expected_right_df = pd.concat(
[self.pointdf.drop('geometry', axis=1).iloc[:0],
pd.concat([pd.Series(name='index_left', dtype='int64'),
right_idxs],
axis=1),
self.polydf],
axis=1)
expected_right = GeoDataFrame(
expected_right_df, crs={'init': 'epsg:4326', 'no_defs': True})\
.set_index('index_right')
expected_left_df = pd.concat(
[self.pointdf.iloc[17:],
pd.Series(name='index_right', dtype='int64'),
self.polydf.iloc[:0].drop('geometry', axis=1)],
axis=1)
expected_left = GeoDataFrame(
expected_left_df, crs={'init': 'epsg:4326', 'no_defs': True})
assert expected_inner.equals(df_inner)
assert expected_right.equals(df_right)
assert expected_left.equals(df_left)
@pytest.mark.skip("Not implemented")
def test_sjoin_outer(self):
df = sjoin(self.pointdf, self.polydf, how="outer")
assert df.shape == (21, 8)
| bsd-3-clause | 130,542,483,010,739,260 | 37.52809 | 79 | 0.564207 | false |
crackcell/www-entity-mining | people/stars/ent.sina.com.cn/crawler.py | 1 | 2883 | #!/usr/bin/env python
# -*- encoding: utf-8; indent-tabs-mode: nil -*-
"""
crawler
~~~~~~~
desc
:copyright: (c) 2015 Menglong TAN.
"""
import os
import sys
import re
import urllib2
import time
import BeautifulSoup
import logging
logger = logging.getLogger(__file__)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
fmt = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(fmt)
logger.addHandler(ch)
class Star(object):
def __init__(self):
self.name = ""
self.gender = ""
self.nation = ""
self.birth = ""
self.horoscope = ""
self.height = ""
def __repr__(self):
return "%s\t%s\t%s\t%s\t%s\t%s" % (self.name, self.gender, self.nation,
self.birth, self.horoscope,
self.height)
def extract_list(url):
headers = {"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.1) Gecko/20090624 Firefox/3.5"}
req = urllib2.Request(url, headers=headers)
resp = None
try:
resp = urllib2.urlopen(req)
except urllib2.HTTPError, e:
print "Error Code:", e.code
return
except urllib2.URLError, e:
print "Error Reason:", e.reason
return
soup = BeautifulSoup.BeautifulSoup(resp.read())
stars = []
for star in soup.findAll("div", attrs={"class":"item-intro left"}):
s = Star()
s.name = str(star.find("a", attrs={"style":"overflow: hidden;text-overflow: ellipsis;white-space: nowrap;width:140px;"}).contents[0]).strip()
for p in star.findAll("p"):
if str(p.contents[0]).startswith("<span class=\"txt\">性别:</span>"):
s.gender = str(p.contents[1]).strip()
elif str(p.contents[0]).startswith("<span class=\"txt\">国籍:</span>"):
s.nation = str(p.contents[2]).strip()
elif str(p.contents[0]).startswith("<span class=\"txt\">出生日期:</span>"):
s.birth = str(p.contents[1]).strip()
elif str(p.contents[0]).startswith("<span class=\"txt\">星座:</span>"):
s.horoscope = str(p.contents[1].contents[0]).strip()
elif str(p.contents[0]).startswith("<span class=\"txt\">身高:</span>"):
s.height = str(p.contents[1]).strip()
stars.append(s)
return stars
if __name__ == "__main__":
list_url = "http://ku.ent.sina.com.cn/star/search&page_no="
total_page = 1068
f = open("stars.dat", "w+")
for i in range(total_page):
logger.info("progress: %d/%d", i + 1, total_page)
stars = extract_list(list_url + str(i + 1))
for star in stars:
f.write(str(star) + "\n")
f.flush()
time.sleep(2)
f.close()
| bsd-3-clause | 7,542,542,099,566,126,000 | 31.862069 | 149 | 0.55439 | false |
TFMV/veloce | classifier.py | 1 | 4504 | import pyspark
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.classification import LogisticRegressionWithSGD
from pyspark.mllib.tree import DecisionTree
from pyspark import SparkContext
from pyspark.sql import SQLContext
##If running through pyspark, start pyspark as follows: pyspark --packages com.databricks:spark-csv_2.10:1.2.0
sc = pyspark.SparkContext()
sqlContext = SQLContext(sc)
#We point the context at a CSV file on disk. The result is a RDD, not the content of the file. This is a Spark transformation
raw_rdd = sc.textFile("/tmp/titanic.csv")
# We need to make the necessary installs to allow the following line to function (create dataframe from CSV file)
#df = sqlContext.read.format('com.databricks.spark.csv').options(header='true').load('/tmp/titanic.csv')
We query RDD for the number of lines in the file. The call here causes the file to be read and the result computed. This is a Spark action
raw_rdd.count()
#We query for the first five rows of the RDD. Even though the data is small, we shouldn't get into the habit of pulling the entire dataset into the notebook. Many datasets that we might want to work with using Spark will be much too large to fit in memory of a single machine.
raw_rdd.take(5)
#We see a header row followed by a set of data rows. We filter out the header to define a new RDD containing only the data rows.
header = raw_rdd.first()
data_rdd = raw_rdd.filter(lambda line: line != header)
#We take a random sample of the data rows to better understand the possible values.
data_rdd.takeSample(False, 5, 0)
#We see that the first value in every row is a passenger number. The next three values are the passenger attributes we might use to predict passenger survival: ticket class, age group, and gender. The final value is the survival ground truth.
#Create labeled points (i.e., feature vectors and ground truth)
#Now we define a function to turn the passenger attributions into structured LabeledPoint objects
def row_to_labeled_point(line):
passenger_id, klass, age, sex, survived = [segs.strip('"') for segs in line.split(',')]
klass = int(klass[0]) - 1
if (age not in ['adults', 'child'] or
sex not in ['man', 'women'] or
survived not in ['yes', 'no']):
raise RuntimeError('unknown value')
features = [
klass,
(1 if age == 'adults' else 0),
(1 if sex == 'women' else 0)
]
return LabeledPoint(1 if survived == 'yes' else 0, features)
#We apply the function to all rows.
labeled_points_rdd = data_rdd.map(row_to_labeled_point)
#We take a random sample of the resulting points to inspect them.
labeled_points_rdd.takeSample(False, 5, 0)
#We split the transformed data into a training (70%) and test set (30%), and print the total number of items in each segment.
training_rdd, test_rdd = labeled_points_rdd.randomSplit([0.7, 0.3], seed = 0)
training_count = training_rdd.count()
test_count = test_rdd.count()
training_count, test_count
#Now we train a DecisionTree model. We specify that we're training a boolean classifier (i.e., there are two outcomes). We also specify that all of our features are categorical and the number of possible categories for each.
model = DecisionTree.trainClassifier(training_rdd, numClasses=2, categoricalFeaturesInfo={0: 3,1: 2,2: 2})
#We now apply the trained model to the feature values in the test set to get the list of predicted outcomines.
predictions_rdd = model.predict(test_rdd.map(lambda x: x.features))
#We bundle our predictions with the ground truth outcome for each passenger in the test set.
truth_and_predictions_rdd = test_rdd.map(lambda lp: lp.label).zip(predictions_rdd)
#Now we compute the test error (% predicted survival outcomes == actual outcomes) and display the decision tree for good measure
accuracy = truth_and_predictions_rdd.filter(lambda v_p: v_p[0] == v_p[1]).count() / float(test_count)
print('Accuracy =', accuracy)
print(model.toDebugString())
#For a simple comparison, we also train and test a LogisticRegressionWithSGD model
model = LogisticRegressionWithSGD.train(training_rdd)
predictions_rdd = model.predict(test_rdd.map(lambda x: x.features))
labels_and_predictions_rdd = test_rdd.map(lambda lp: lp.label).zip(predictions_rdd)
accuracy = labels_and_predictions_rdd.filter(lambda v_p: v_p[0] == v_p[1]).count() / float(test_count)
print('Accuracy =', accuracy) | mit | -4,944,954,862,459,573,000 | 46.451613 | 276 | 0.72913 | false |
Forage/Gramps | gramps/gui/plug/tool.py | 1 | 11556 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2005-2007 Donald N. Allingham
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2011 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"ToolGeneration Framework"
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
from __future__ import print_function
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().gettext
import logging
log = logging.getLogger(".")
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import TOOL_OPTIONS
from gramps.gen.display.name import displayer as name_displayer
from gramps.gen.errors import WindowActiveError
from gramps.gen.plug._options import (Options, OptionHandler, OptionList,
OptionListCollection)
from gramps.gen.plug import (TOOL_DEBUG, TOOL_ANAL, TOOL_DBPROC, TOOL_DBFIX,
TOOL_REVCTL, TOOL_UTILS)
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
tool_categories = {
TOOL_DEBUG : ("ToolDebug", _("Debug")),
TOOL_ANAL : ("ToolAnExp", _("Analysis and Exploration")),
TOOL_DBPROC : ("ToolProc", _("Family Tree Processing")),
TOOL_DBFIX : ("ToolRep", _("Family Tree Repair")),
TOOL_REVCTL : ("ToolRev", _("Revision Control")),
TOOL_UTILS : ("ToolUtil", _("Utilities")),
}
#-------------------------------------------------------------------------
#
# Tool
#
#-------------------------------------------------------------------------
class Tool(object):
"""
The Tool base class. This is a base class for generating
customized tools. It cannot be used as is, but it can be easily
sub-classed to create a functional tool.
"""
def __init__(self, dbstate, options_class, name):
from . import MenuToolOptions
self.db = dbstate.db
try:
if issubclass(options_class, MenuToolOptions):
# FIXME: pass in person_id
self.options = options_class(name, None, dbstate)
else: # must be some kind of class or we get a TypeError
self.options = options_class(name)
except TypeError:
self.options = options_class
self.options.load_previous_values()
if hasattr(options_class, 'options_dict'):
old_opts = options_class.saved_options_dict
for key in options_class.options_dict:
if options_class.options_dict[key] != old_opts[key]:
self.options.options_dict[key] = old_opts[key]
def run_tool(self):
pass
class BatchTool(Tool):
"""
Same as Tool, except the warning is displayed about the potential
loss of undo history. Should be used for tools using batch transactions.
"""
def __init__(self, dbstate, options_class, name):
# TODO: should we replace this with a callback?
from ..dialog import QuestionDialog2
warn_dialog = QuestionDialog2(
_('Undo history warning'),
_('Proceeding with this tool will erase the undo history '
'for this session. In particular, you will not be able '
'to revert the changes made by this tool or any changes '
'made prior to it.\n\n'
'If you think you may want to revert running this tool, '
'please stop here and backup your database.'),
_('_Proceed with the tool'), _('_Stop'))
if not warn_dialog.run():
self.fail = True
return
Tool.__init__(self, dbstate, options_class, name)
self.fail = False
class ActivePersonTool(Tool):
"""
Same as Tool , except the existence of the active person is checked
and the tool is aborted if no active person exists. Should be used
for tools that depend on active person.
"""
def __init__(self, dbstate, uistate, options_class, name):
if not uistate.get_active('Person'):
# TODO: should we replace this with a callback?
from ..dialog import ErrorDialog
ErrorDialog(_('Active person has not been set'),
_('You must select an active person for this '
'tool to work properly.'))
self.fail = True
return
Tool.__init__(self, dbstate, options_class, name)
self.fail = False
#------------------------------------------------------------------------
#
# Command-line tool
#
#------------------------------------------------------------------------
class CommandLineTool(object):
"""
Provide a way to run tool from the command line.
"""
def __init__(self, database, name, category, option_class, options_str_dict,
noopt=False):
self.database = database
self.category = category
self.option_class = option_class(name)
self.option_class.load_previous_values()
self.show = options_str_dict.pop('show', None)
self.options_str_dict = options_str_dict
self.init_options(noopt)
self.parse_option_str()
self.show_options()
def init_options(self, noopt):
self.options_dict = {'id' : ''}
self.options_help = {'id' : ["=ID", "Gramps ID of a central person."], }
if noopt:
return
# Add tool-specific options
for key in self.option_class.handler.options_dict:
if key not in self.options_dict:
self.options_dict[key] = self.option_class.handler.options_dict[key]
# Add help for tool-specific options
for key in self.option_class.options_help:
if key not in self.options_help:
self.options_help[key] = self.option_class.options_help[key]
def parse_option_str(self):
from gramps.cli.plug import _convert_str_to_match_type
for opt in self.options_str_dict:
if opt in self.options_dict:
self.options_dict[opt] = \
_convert_str_to_match_type(self.options_str_dict[opt],
self.options_dict[opt])
self.option_class.handler.options_dict[opt] = self.options_dict[opt]
else:
print("Ignoring unknown option: %s" % opt)
person_id = self.options_dict['id']
self.person = self.database.get_person_from_gramps_id(person_id)
id_list = []
for person in self.database.iter_people():
id_list.append("%s\t%s" % (
person.get_gramps_id(),
name_displayer.display(person)))
self.options_help['id'].append(id_list)
self.options_help['id'].append(False)
def show_options(self):
if not self.show:
return
elif self.show == 'all':
print(" Available options:")
for key in self.options_dict:
print(" %s" % key)
print(" Use 'show=option' to see description and acceptable values")
elif self.show in self.options_dict:
print(' %s%s\t%s' % (self.show,
self.options_help[self.show][0],
self.options_help[self.show][1]))
print(" Available values are:")
vals = self.options_help[self.show][2]
if isinstance(vals, (list, tuple)):
if self.options_help[self.show][3]:
for num in range(len(vals)):
print(" %d\t%s" % (num, vals[num]))
else:
for val in vals:
print(" %s" % val)
else:
print(" %s" % self.options_help[self.show][2])
else:
self.show = None
#------------------------------------------------------------------------
#
# Generic task functions for tools
#
#------------------------------------------------------------------------
# Standard GUI tool generic task
def gui_tool(dbstate, uistate, tool_class, options_class, translated_name,
name, category, callback):
"""
tool - task starts the report. The plugin system requires that the
task be in the format of task that takes a database and a person as
its arguments.
"""
try:
tool_class(dbstate, uistate, options_class, name, callback)
except WindowActiveError:
pass
except:
log.error("Failed to start tool.", exc_info=True)
# Command-line generic task
def cli_tool(dbstate, name, category, tool_class, options_class, options_str_dict):
clt = CommandLineTool(dbstate.db, name, category,
options_class, options_str_dict)
clt.option_class.saved_options_dict = clt.options_dict
# Exit here if show option was given
if clt.show:
return
# run tool
try:
tool_class(dbstate, None, clt.option_class, name, None)
except:
log.error("Failed to start tool.", exc_info=True)
#-------------------------------------------------------------------------
#
# Class handling options for plugins
#
#-------------------------------------------------------------------------
class ToolOptionHandler(OptionHandler):
"""
Implements handling of the options for the plugins.
"""
def __init__(self, module_name, options_dict, person_id=None):
OptionHandler.__init__(self, module_name, options_dict, person_id)
def init_subclass(self):
self.collection_class = OptionListCollection
self.list_class = OptionList
self.filename = TOOL_OPTIONS
#------------------------------------------------------------------------
#
# Tool Options class
#
#------------------------------------------------------------------------
class ToolOptions(Options):
"""
Defines options and provides handling interface.
This is a base Options class for the tools. All tools, options
classes should derive from it.
"""
def __init__(self, name, person_id=None):
"""
Initialize the class, performing usual house-keeping tasks.
Subclasses MUST call this in their __init__() method.
"""
self.name = name
self.person_id = person_id
self.options_dict = {}
self.options_help = {}
self.handler = None
def load_previous_values(self):
self.handler = ToolOptionHandler(self.name, self.options_dict, self.person_id)
| gpl-2.0 | 6,896,968,333,442,710,000 | 35 | 86 | 0.536777 | false |
tensorflow/addons | tensorflow_addons/image/tests/transform_ops_test.py | 1 | 16494 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for transform ops."""
from distutils.version import LooseVersion
import pytest
import numpy as np
import tensorflow as tf
from skimage import transform
from tensorflow_addons.image import transform_ops
from tensorflow_addons.utils import test_utils
_DTYPES = {
tf.dtypes.uint8,
tf.dtypes.int32,
tf.dtypes.int64,
tf.dtypes.float16,
tf.dtypes.float32,
tf.dtypes.float64,
}
@pytest.mark.with_device(["cpu", "gpu"])
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.parametrize("dtype", _DTYPES)
def test_compose(dtype):
image = tf.constant(
[[1, 1, 1, 0], [1, 0, 0, 0], [1, 1, 1, 0], [0, 0, 0, 0]], dtype=dtype
)
# Rotate counter-clockwise by pi / 2.
rotation = transform_ops.angles_to_projective_transforms(np.pi / 2, 4, 4)
# Translate right by 1 (the transformation matrix is always inverted,
# hence the -1).
translation = tf.constant([1, 0, -1, 0, 1, 0, 0, 0], dtype=tf.dtypes.float32)
composed = transform_ops.compose_transforms([rotation, translation])
image_transformed = transform_ops.transform(image, composed)
np.testing.assert_equal(
[[0, 0, 0, 0], [0, 1, 0, 1], [0, 1, 0, 1], [0, 1, 1, 1]],
image_transformed.numpy(),
)
@pytest.mark.with_device(["cpu", "gpu"])
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.parametrize("dtype", _DTYPES)
def test_extreme_projective_transform(dtype):
image = tf.constant(
[[1, 0, 1, 0], [0, 1, 0, 1], [1, 0, 1, 0], [0, 1, 0, 1]], dtype=dtype
)
transformation = tf.constant([1, 0, 0, 0, 1, 0, -1, 0], tf.dtypes.float32)
image_transformed = transform_ops.transform(image, transformation)
np.testing.assert_equal(
[[1, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0]],
image_transformed.numpy(),
)
@pytest.mark.with_device(["cpu", "gpu"])
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.parametrize("dtype", _DTYPES)
@pytest.mark.parametrize("fill_value", [0.0, 1.0])
def test_transform_constant_fill_mode(dtype, fill_value):
if fill_value != 0.0 and LooseVersion(tf.__version__) < LooseVersion("2.4.0"):
pytest.skip("Nonzero fill_value is not supported for TensorFlow < 2.4.0.")
image = tf.constant(
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]], dtype=dtype
)
expected = np.asarray(
[
[fill_value, 0, 1, 2],
[fill_value, 4, 5, 6],
[fill_value, 8, 9, 10],
[fill_value, 12, 13, 14],
],
dtype=dtype.as_numpy_dtype,
)
# Translate right by 1 (the transformation matrix is always inverted,
# hence the -1).
translation = tf.constant([1, 0, -1, 0, 1, 0, 0, 0], dtype=tf.float32)
image_transformed = transform_ops.transform(
image,
translation,
fill_mode="constant",
fill_value=fill_value,
)
np.testing.assert_equal(image_transformed.numpy(), expected)
@pytest.mark.with_device(["cpu", "gpu"])
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.parametrize("dtype", _DTYPES)
def test_transform_reflect_fill_mode(dtype):
image = tf.constant(
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]], dtype=dtype
)
expected = np.asarray(
[[0, 0, 1, 2], [4, 4, 5, 6], [8, 8, 9, 10], [12, 12, 13, 14]],
dtype=dtype.as_numpy_dtype,
)
# Translate right by 1 (the transformation matrix is always inverted,
# hence the -1).
translation = tf.constant([1, 0, -1, 0, 1, 0, 0, 0], dtype=tf.float32)
image_transformed = transform_ops.transform(image, translation, fill_mode="reflect")
np.testing.assert_equal(image_transformed.numpy(), expected)
@pytest.mark.with_device(["cpu", "gpu"])
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.parametrize("dtype", _DTYPES)
def test_transform_wrap_fill_mode(dtype):
image = tf.constant(
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]], dtype=dtype
)
expected = np.asarray(
[[3, 0, 1, 2], [7, 4, 5, 6], [11, 8, 9, 10], [15, 12, 13, 14]],
dtype=dtype.as_numpy_dtype,
)
# Translate right by 1 (the transformation matrix is always inverted,
# hence the -1).
translation = tf.constant([1, 0, -1, 0, 1, 0, 0, 0], dtype=tf.float32)
image_transformed = transform_ops.transform(image, translation, fill_mode="wrap")
np.testing.assert_equal(image_transformed.numpy(), expected)
@pytest.mark.skipif(
LooseVersion(tf.__version__) < LooseVersion("2.4.0"),
reason="NEAREST fill mode is not supported for TensorFlow < 2.4.0.",
)
@pytest.mark.with_device(["cpu", "gpu"])
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.parametrize("dtype", _DTYPES)
def test_transform_nearest_fill_mode(dtype):
image = tf.constant(
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]], dtype=dtype
)
expected = np.asarray(
[[0, 0, 0, 1], [4, 4, 4, 5], [8, 8, 8, 9], [12, 12, 12, 13]],
dtype=dtype.as_numpy_dtype,
)
# Translate right by 2 (the transformation matrix is always inverted,
# hence the -2).
translation = tf.constant([1, 0, -2, 0, 1, 0, 0, 0], dtype=tf.float32)
image_transformed = transform_ops.transform(image, translation, fill_mode="nearest")
np.testing.assert_equal(image_transformed.numpy(), expected)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_transform_static_output_shape():
image = tf.constant([[1.0, 2.0], [3.0, 4.0]])
result = transform_ops.transform(
image, tf.random.uniform([8], -1, 1), output_shape=[3, 5]
)
np.testing.assert_equal([3, 5], result.shape)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_transform_unknown_shape():
fn = tf.function(transform_ops.transform).get_concrete_function(
tf.TensorSpec(shape=None, dtype=tf.float32), [1, 0, 0, 0, 1, 0, 0, 0]
)
for shape in (2, 4), (2, 4, 3), (1, 2, 4, 3):
image = tf.ones(shape=shape)
np.testing.assert_equal(image.numpy(), fn(image).numpy())
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def _test_grad(input_shape, output_shape=None):
image_size = tf.math.cumprod(input_shape)[-1]
image_size = tf.cast(image_size, tf.float32)
test_image = tf.reshape(tf.range(0, image_size, dtype=tf.float32), input_shape)
# Scale test image to range [0, 0.01]
test_image = (test_image / image_size) * 0.01
def transform_fn(x):
x.set_shape(input_shape)
transform = transform_ops.angles_to_projective_transforms(np.pi / 2, 4, 4)
return transform_ops.transform(
images=x, transforms=transform, output_shape=output_shape
)
theoretical, numerical = tf.test.compute_gradient(transform_fn, [test_image])
np.testing.assert_almost_equal(theoretical[0], numerical[0], decimal=6)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_grad():
_test_grad([8, 8])
_test_grad([8, 8], [4, 4])
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.parametrize("dtype", _DTYPES)
def test_transform_data_types(dtype):
image = tf.constant([[1, 2], [3, 4]], dtype=dtype)
np.testing.assert_equal(
np.array([[4, 4], [4, 4]]).astype(dtype.as_numpy_dtype),
transform_ops.transform(image, [1] * 8),
)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_transform_eager():
image = tf.constant([[1.0, 2.0], [3.0, 4.0]])
np.testing.assert_equal(
np.array([[4, 4], [4, 4]]), transform_ops.transform(image, [1] * 8)
)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.parametrize("dtype", _DTYPES)
def test_zeros(dtype):
for shape in [(5, 5), (24, 24), (2, 24, 24, 3)]:
for angle in [0, 1, np.pi / 2.0]:
image = tf.zeros(shape, dtype)
np.testing.assert_equal(
transform_ops.rotate(image, angle),
np.zeros(shape, dtype.as_numpy_dtype),
)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.parametrize("dtype", _DTYPES)
def test_rotate_even(dtype):
image = tf.reshape(tf.cast(tf.range(36), dtype), (6, 6))
image_rep = tf.tile(image[None, :, :, None], [3, 1, 1, 1])
angles = tf.constant([0.0, np.pi / 4.0, np.pi / 2.0], tf.float32)
image_rotated = transform_ops.rotate(image_rep, angles)
np.testing.assert_equal(
image_rotated.numpy()[:, :, :, 0],
[
[
[0, 1, 2, 3, 4, 5],
[6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35],
],
[
[0, 3, 4, 11, 17, 0],
[2, 3, 9, 16, 23, 23],
[1, 8, 15, 21, 22, 29],
[6, 13, 20, 21, 27, 34],
[12, 18, 19, 26, 33, 33],
[0, 18, 24, 31, 32, 0],
],
[
[5, 11, 17, 23, 29, 35],
[4, 10, 16, 22, 28, 34],
[3, 9, 15, 21, 27, 33],
[2, 8, 14, 20, 26, 32],
[1, 7, 13, 19, 25, 31],
[0, 6, 12, 18, 24, 30],
],
],
)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.parametrize("dtype", _DTYPES)
def test_rotate_odd(dtype):
image = tf.reshape(tf.cast(tf.range(25), dtype), (5, 5))
image_rep = tf.tile(image[None, :, :, None], [3, 1, 1, 1])
angles = tf.constant([np.pi / 4.0, 1.0, -np.pi / 2.0], tf.float32)
image_rotated = transform_ops.rotate(image_rep, angles)
np.testing.assert_equal(
image_rotated.numpy()[:, :, :, 0],
[
[
[0, 3, 8, 9, 0],
[1, 7, 8, 13, 19],
[6, 6, 12, 18, 18],
[5, 11, 16, 17, 23],
[0, 15, 16, 21, 0],
],
[
[0, 3, 9, 14, 0],
[2, 7, 8, 13, 19],
[1, 6, 12, 18, 23],
[5, 11, 16, 17, 22],
[0, 10, 15, 21, 0],
],
[
[20, 15, 10, 5, 0],
[21, 16, 11, 6, 1],
[22, 17, 12, 7, 2],
[23, 18, 13, 8, 3],
[24, 19, 14, 9, 4],
],
],
)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.parametrize("dtype", _DTYPES)
def test_compose_rotate(dtype):
image = tf.constant(
[[1, 1, 1, 0], [1, 0, 0, 0], [1, 1, 1, 0], [0, 0, 0, 0]], dtype=dtype
)
# Rotate counter-clockwise by pi / 2.
rotation = transform_ops.angles_to_projective_transforms(np.pi / 2, 4, 4)
# Translate right by 1 (the transformation matrix is always inverted,
# hence the -1).
translation = tf.constant([1, 0, -1, 0, 1, 0, 0, 0], dtype=tf.float32)
composed = transform_ops.compose_transforms([rotation, translation])
image_transformed = transform_ops.transform(image, composed)
np.testing.assert_equal(
image_transformed.numpy(),
[[0, 0, 0, 0], [0, 1, 0, 1], [0, 1, 0, 1], [0, 1, 1, 1]],
)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_bilinear():
image = tf.constant(
[
[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0],
],
tf.float32,
)
# The following result matches:
# >>> scipy.ndimage.rotate(image, 45, order=1, reshape=False)
# which uses spline interpolation of order 1, equivalent to bilinear
# interpolation.
transformed = transform_ops.rotate(image, np.pi / 4.0, interpolation="BILINEAR")
np.testing.assert_allclose(
transformed.numpy(),
[
[0.000, 0.000, 0.343, 0.000, 0.000],
[0.000, 0.586, 0.914, 0.586, 0.000],
[0.343, 0.914, 0.000, 0.914, 0.343],
[0.000, 0.586, 0.914, 0.586, 0.000],
[0.000, 0.000, 0.343, 0.000, 0.000],
],
atol=0.001,
)
transformed = transform_ops.rotate(image, np.pi / 4.0, interpolation="NEAREST")
np.testing.assert_allclose(
transformed.numpy(),
[
[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 0, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0],
],
)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_bilinear_uint8():
image = tf.constant(
np.asarray(
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 255, 255, 255, 0.0],
[0.0, 255, 0.0, 255, 0.0],
[0.0, 255, 255, 255, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
np.uint8,
),
tf.uint8,
)
# == np.rint((expected image above) * 255)
transformed = transform_ops.rotate(image, np.pi / 4.0, interpolation="BILINEAR")
np.testing.assert_equal(
transformed.numpy(),
[
[0.0, 0.0, 87.0, 0.0, 0.0],
[0.0, 149, 233, 149, 0.0],
[87.0, 233, 0.0, 233, 87.0],
[0.0, 149, 233, 149, 0.0],
[0.0, 0.0, 87.0, 0.0, 0.0],
],
)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_rotate_static_shape():
image = tf.linalg.diag([1.0, 2.0, 3.0])
result = transform_ops.rotate(
image, tf.random.uniform((), -1, 1), interpolation="BILINEAR"
)
np.testing.assert_equal(image.get_shape(), result.get_shape())
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_unknown_shape():
fn = tf.function(transform_ops.rotate).get_concrete_function(
tf.TensorSpec(shape=None, dtype=tf.float32), 0
)
for shape in (2, 4), (2, 4, 3), (1, 2, 4, 3):
image = tf.ones(shape=shape)
np.testing.assert_equal(image.numpy(), fn(image).numpy())
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.parametrize("dtype", _DTYPES - {tf.dtypes.float16})
def test_shear_x(dtype):
image = np.random.randint(low=0, high=255, size=(4, 4, 3)).astype(
dtype.as_numpy_dtype
)
color = tf.constant([255, 0, 255], tf.int32)
level = tf.random.uniform(shape=(), minval=0, maxval=1)
tf_image = tf.constant(image)
sheared_img = transform_ops.shear_x(tf_image, level, replace=color)
transform_matrix = transform.AffineTransform(
np.array([[1, level.numpy(), 0], [0, 1, 0], [0, 0, 1]])
)
expected_img = transform.warp(
image, transform_matrix, order=0, cval=-1, preserve_range=True
)
mask = np.where(expected_img == -1)
expected_img[mask[0], mask[1], :] = color
np.testing.assert_equal(sheared_img.numpy(), expected_img)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.parametrize("dtype", _DTYPES - {tf.dtypes.float16})
def test_shear_y(dtype):
image = np.random.randint(low=0, high=255, size=(4, 4, 3)).astype(
dtype.as_numpy_dtype
)
color = tf.constant([255, 0, 255], tf.int32)
level = tf.random.uniform(shape=(), minval=0, maxval=1)
tf_image = tf.constant(image)
sheared_img = transform_ops.shear_y(image=tf_image, level=level, replace=color)
transform_matrix = transform.AffineTransform(
np.array([[1, 0, 0], [level.numpy(), 1, 0], [0, 0, 1]])
)
expected_img = transform.warp(
image, transform_matrix, order=0, cval=-1, preserve_range=True
)
mask = np.where(expected_img == -1)
expected_img[mask[0], mask[1], :] = color
test_utils.assert_allclose_according_to_type(sheared_img.numpy(), expected_img)
| apache-2.0 | -1,881,526,463,542,878,700 | 34.547414 | 88 | 0.56566 | false |
onshape-public/onshape-clients | python/onshape_client/oas/models/bt_body_type_filter112.py | 1 | 7225 | # coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_body_type_filter112_all_of
except ImportError:
bt_body_type_filter112_all_of = sys.modules[
"onshape_client.oas.models.bt_body_type_filter112_all_of"
]
try:
from onshape_client.oas.models import bt_query_filter183
except ImportError:
bt_query_filter183 = sys.modules["onshape_client.oas.models.bt_query_filter183"]
class BTBodyTypeFilter112(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("body_type",): {
"SOLID": "SOLID",
"SHEET": "SHEET",
"WIRE": "WIRE",
"POINT": "POINT",
"MATE_CONNECTOR": "MATE_CONNECTOR",
"COMPOSITE": "COMPOSITE",
"UNKNOWN": "UNKNOWN",
},
}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"body_type": (str,), # noqa: E501
"bt_type": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"body_type": "bodyType", # noqa: E501
"bt_type": "btType", # noqa: E501
}
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_body_type_filter112.BTBodyTypeFilter112 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
body_type (str): [optional] # noqa: E501
bt_type (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_from_server": _from_server,
"_configuration": _configuration,
}
required_args = {}
# remove args whose value is Null because they are unset
required_arg_names = list(required_args.keys())
for required_arg_name in required_arg_names:
if required_args[required_arg_name] is nulltype.Null:
del required_args[required_arg_name]
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in six.iteritems(kwargs):
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
# discard variable.
continue
setattr(self, var_name, var_value)
@staticmethod
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
return {
"anyOf": [],
"allOf": [
bt_body_type_filter112_all_of.BTBodyTypeFilter112AllOf,
bt_query_filter183.BTQueryFilter183,
],
"oneOf": [],
}
| mit | -6,208,763,084,608,335,000 | 33.903382 | 84 | 0.578685 | false |
alex/django-old | django/contrib/auth/tests/auth_backends.py | 1 | 9965 | import warnings
from django.conf import settings
from django.contrib.auth.models import User, Group, Permission, AnonymousUser
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
class BackendTest(TestCase):
backend = 'django.contrib.auth.backends.ModelBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = (self.backend,)
User.objects.create_user('test', '[email protected]', 'test')
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
def test_has_perm(self):
user = User.objects.get(username='test')
self.assertEqual(user.has_perm('auth.test'), False)
user.is_staff = True
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
user.is_superuser = True
user.save()
self.assertEqual(user.has_perm('auth.test'), True)
user.is_staff = False
user.is_superuser = False
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
user.is_staff = True
user.is_superuser = True
user.is_active = False
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
def test_custom_perms(self):
user = User.objects.get(username='test')
content_type=ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
user.user_permissions.add(perm)
user.save()
# reloading user to purge the _perm_cache
user = User.objects.get(username='test')
self.assertEqual(user.get_all_permissions() == set([u'auth.test']), True)
self.assertEqual(user.get_group_permissions(), set([]))
self.assertEqual(user.has_module_perms('Group'), False)
self.assertEqual(user.has_module_perms('auth'), True)
perm = Permission.objects.create(name='test2', content_type=content_type, codename='test2')
user.user_permissions.add(perm)
user.save()
perm = Permission.objects.create(name='test3', content_type=content_type, codename='test3')
user.user_permissions.add(perm)
user.save()
user = User.objects.get(username='test')
self.assertEqual(user.get_all_permissions(), set([u'auth.test2', u'auth.test', u'auth.test3']))
self.assertEqual(user.has_perm('test'), False)
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), True)
perm = Permission.objects.create(name='test_group', content_type=content_type, codename='test_group')
group = Group.objects.create(name='test_group')
group.permissions.add(perm)
group.save()
user.groups.add(group)
user = User.objects.get(username='test')
exp = set([u'auth.test2', u'auth.test', u'auth.test3', u'auth.test_group'])
self.assertEqual(user.get_all_permissions(), exp)
self.assertEqual(user.get_group_permissions(), set([u'auth.test_group']))
self.assertEqual(user.has_perms(['auth.test3', 'auth.test_group']), True)
user = AnonymousUser()
self.assertEqual(user.has_perm('test'), False)
self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), False)
def test_has_no_object_perm(self):
"""Regressiontest for #12462"""
user = User.objects.get(username='test')
content_type=ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
user.user_permissions.add(perm)
user.save()
self.assertEqual(user.has_perm('auth.test', 'object'), False)
self.assertEqual(user.get_all_permissions('object'), set([]))
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.get_all_permissions(), set(['auth.test']))
class TestObj(object):
pass
class SimpleRowlevelBackend(object):
supports_object_permissions = True
# This class also supports tests for anonymous user permissions,
# via subclasses which just set the 'supports_anonymous_user' attribute.
def has_perm(self, user, perm, obj=None):
if not obj:
return # We only support row level perms
if isinstance(obj, TestObj):
if user.username == 'test2':
return True
elif user.is_anonymous() and perm == 'anon':
# not reached due to supports_anonymous_user = False
return True
return False
def has_module_perms(self, user, app_label):
return app_label == "app1"
def get_all_permissions(self, user, obj=None):
if not obj:
return [] # We only support row level perms
if not isinstance(obj, TestObj):
return ['none']
if user.is_anonymous():
return ['anon']
if user.username == 'test2':
return ['simple', 'advanced']
else:
return ['simple']
def get_group_permissions(self, user, obj=None):
if not obj:
return # We only support row level perms
if not isinstance(obj, TestObj):
return ['none']
if 'test_group' in [group.name for group in user.groups.all()]:
return ['group_perm']
else:
return ['none']
class RowlevelBackendTest(TestCase):
"""
Tests for auth backend that supports object level permissions
"""
backend = 'django.contrib.auth.tests.auth_backends.SimpleRowlevelBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = self.curr_auth + (self.backend,)
self.user1 = User.objects.create_user('test', '[email protected]', 'test')
self.user2 = User.objects.create_user('test2', '[email protected]', 'test')
self.user3 = User.objects.create_user('test3', '[email protected]', 'test')
warnings.filterwarnings('ignore', category=DeprecationWarning,
module='django.contrib.auth')
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
warnings.resetwarnings()
warnings.simplefilter('ignore', PendingDeprecationWarning)
def test_has_perm(self):
self.assertEqual(self.user1.has_perm('perm', TestObj()), False)
self.assertEqual(self.user2.has_perm('perm', TestObj()), True)
self.assertEqual(self.user2.has_perm('perm'), False)
self.assertEqual(self.user2.has_perms(['simple', 'advanced'], TestObj()), True)
self.assertEqual(self.user3.has_perm('perm', TestObj()), False)
self.assertEqual(self.user3.has_perm('anon', TestObj()), False)
self.assertEqual(self.user3.has_perms(['simple', 'advanced'], TestObj()), False)
def test_get_all_permissions(self):
self.assertEqual(self.user1.get_all_permissions(TestObj()), set(['simple']))
self.assertEqual(self.user2.get_all_permissions(TestObj()), set(['simple', 'advanced']))
self.assertEqual(self.user2.get_all_permissions(), set([]))
def test_get_group_permissions(self):
content_type=ContentType.objects.get_for_model(Group)
group = Group.objects.create(name='test_group')
self.user3.groups.add(group)
self.assertEqual(self.user3.get_group_permissions(TestObj()), set(['group_perm']))
class AnonymousUserBackend(SimpleRowlevelBackend):
supports_anonymous_user = True
class NoAnonymousUserBackend(SimpleRowlevelBackend):
supports_anonymous_user = False
class AnonymousUserBackendTest(TestCase):
"""
Tests for AnonymousUser delegating to backend if it has 'supports_anonymous_user' = True
"""
backend = 'django.contrib.auth.tests.auth_backends.AnonymousUserBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = (self.backend,)
self.user1 = AnonymousUser()
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
def test_has_perm(self):
self.assertEqual(self.user1.has_perm('perm', TestObj()), False)
self.assertEqual(self.user1.has_perm('anon', TestObj()), True)
def test_has_perms(self):
self.assertEqual(self.user1.has_perms(['anon'], TestObj()), True)
self.assertEqual(self.user1.has_perms(['anon', 'perm'], TestObj()), False)
def test_has_module_perms(self):
self.assertEqual(self.user1.has_module_perms("app1"), True)
self.assertEqual(self.user1.has_module_perms("app2"), False)
def test_get_all_permissions(self):
self.assertEqual(self.user1.get_all_permissions(TestObj()), set(['anon']))
class NoAnonymousUserBackendTest(TestCase):
"""
Tests that AnonymousUser does not delegate to backend if it has 'supports_anonymous_user' = False
"""
backend = 'django.contrib.auth.tests.auth_backends.NoAnonymousUserBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = self.curr_auth + (self.backend,)
self.user1 = AnonymousUser()
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
def test_has_perm(self):
self.assertEqual(self.user1.has_perm('perm', TestObj()), False)
self.assertEqual(self.user1.has_perm('anon', TestObj()), False)
def test_has_perms(self):
self.assertEqual(self.user1.has_perms(['anon'], TestObj()), False)
def test_has_module_perms(self):
self.assertEqual(self.user1.has_module_perms("app1"), False)
self.assertEqual(self.user1.has_module_perms("app2"), False)
def test_get_all_permissions(self):
self.assertEqual(self.user1.get_all_permissions(TestObj()), set())
| bsd-3-clause | 4,535,194,109,111,291,000 | 38.387352 | 109 | 0.649875 | false |
APTrust/EarthDiver | dpnode/dpn/client/apiclient.py | 1 | 2153 | import json
from os.path import normpath
from http.client import HTTPSConnection, HTTPConnection
from urllib.parse import urlencode, urlparse
class APIException(Exception):
pass
class APIClient:
"""
A basic client to query the DPN REST API.
"""
def __init__(self, url, api_token):
self.set_token(api_token)
self.headers = {
'Authorization': "Token %s" % self.api_token,
'Accept': 'application/json',
'Content-Type': 'application/json',
}
self._set_conn(url)
def _set_conn(self, url):
"""
Parses the url and sets connection details and path information for the
client.
:param url: String of the url to use as the root for API calls.
:return: None
"""
url = urlparse(url)
self.baseurl = url.netloc
self.basepath = url.path
if url.scheme == 'https':
conn = HTTPSConnection
elif url.scheme == 'http':
conn = HTTPConnection
else:
raise APIException("Unsupported protocol %s" % url.scheme)
self.conn = conn(self.baseurl)
def set_token(self, token):
self.api_token = token
def _get_path(self, path):
url = normpath("%s%s" % (self.basepath, path))
url = url.replace('//', '/')
if path.endswith('/'):
url += '/'
return url
def _request(self, method, path, data=None, params=None):
url = self._get_path(path)
if params:
url = "%s?%s" % (url, urlencode(params))
self.conn.request(method, url, body=json.dumps(data), headers=self.headers)
return self.conn.getresponse()
def get(self, path, params=None):
return self._request('GET', path, params=params)
def post(self, path, data=None):
return self._request('POST', path, data=data)
def put(self, path, data=None):
return self._request('PUT', path, data=data)
def patch(self, path, data=None):
return self._request('PATCH', path, data=data)
def delete(self, path):
return self._request('DELETE', path)
| apache-2.0 | -1,497,941,737,126,121,000 | 28.493151 | 83 | 0.579656 | false |
hmendozap/auto-sklearn | test/util/test_common.py | 1 | 1264 | # -*- encoding: utf-8 -*-
from __future__ import print_function
from functools import partial
import os
import unittest
from autosklearn.util import set_auto_seed, get_auto_seed, del_auto_seed, \
check_pid
class TestUtilsCommon(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.env_key = 'AUTOSKLEARN_SEED'
def test_auto_seed(self):
value = 123
set_auto_seed(value)
self.assertEqual(os.environ[self.env_key], str(value))
del_auto_seed()
self.assertEqual(os.environ.get(self.env_key), None)
def test_get_auto_seed(self):
del_auto_seed()
self.assertRaises(AssertionError, get_auto_seed)
set_auto_seed([])
self.assertRaises(ValueError, get_auto_seed)
self.assertRaises(ValueError, partial(set_auto_seed, 5))
del_auto_seed()
set_auto_seed(5)
self.assertEqual(os.environ.get(self.env_key), str(5))
def test_check_pid(self):
our_pid = os.getpid()
exists = check_pid(our_pid)
self.assertTrue(exists)
our_pid = -11000 # We hope this pid does not exist
exists = check_pid(our_pid)
self.assertFalse(exists)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -3,619,677,723,574,373,400 | 26.478261 | 75 | 0.626582 | false |
Inspq/ansible | lib/ansible/modules/identity/sx5/sx5_client.py | 1 | 16092 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, INSPQ Team SX5
#
# This file is not part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
author: "Etienne Sadio ([email protected]"
module: sx5_client
short_description: Configure a client application url in SX5 DB
description:
- This module creates, creat or update client application url.
version_added: "2.3"
options:
idp_url:
description:
- The url of the Keycloak server.
default: http://localhost:8080/auth
required: true
username:
description:
- The username to logon to the master realm in Keycloak.
required: true
password:
description:
- The password for the user to logon the master realm in Keycloak.
required: true
realm:
description:
- The name of the Keycloak realm in which is the client.
required: true
clientId:
description:
- OIDC Client ID for the client in Keycloak.
required: true
force:
choices: [ "yes", "no" ]
default: "no"
description:
- If yes, allows to remove client and recreate it.
required: false
state:
description:
- Control if the client must exists or not
choices: [ "present", "absent" ]
default: present
required: false
clientUrl:
description:
- Client URL.
default:
required: true
sx5url:
description:
- sx5 REST services URL.
default:
required: true
notes:
- module does not modify clientId.
'''
EXAMPLES = '''
- name: Create a client client1 with default settings.
SX5_client:
idp_url: http://localhost:8080/auth
realm: Master
clientId: client1
clientUrl: http://localhost:8088/clients
sx5url: http://localhost/client1
state: present
- name: Re-create client1
SX5_client:
idp_url: http://localhost:8080/auth
realm: Master
clientId: client1
clientUrl: http://localhost:8088/clients
sx5url: http://localhost/client1
state: present
force: yes
- name: Remove client1
SX5_client:
idp_url: http://localhost:8080/auth
realm: Master
clientId: client1
clientUrl: http://localhost:8088/clients
sx5url: http://localhost/client1
state: absent
'''
RETURN = '''
ansible_facts:
description: JSON representation for the client.
returned: on success
type: dict
stderr:
description: Error message if it is the case
returned: on error
type: str
rc:
description: return code, 0 if success, 1 otherwise.
returned: always
type: bool
changed:
description: Return True if the operation changed the client on the SX5 DB, false otherwise.
returned: always
type: bool
'''
import requests
import json
import urllib
from ansible.module_utils.sx5_client_utils import *
from __builtin__ import isinstance
def main():
module = AnsibleModule(
argument_spec = dict(
idp_url=dict(type='str', required=True),
username=dict(type='str', required=True),
password=dict(required=True),
realm=dict(type='str', required=True),
clientId=dict(type='str', required=True),
force=dict(type='bool', default=False),
state=dict(choices=["absent", "present"], default='present'),
clientUrl = dict(type='str', required=True),
sx5url = dict(type='str', required=True),
),
supports_check_mode=True,
)
params = module.params.copy()
params['force'] = module.boolean(module.params['force'])
result = client(params)
if result['rc'] != 0:
module.fail_json(msg='non-zero return code', **result)
else:
module.exit_json(**result)
def client(params):
idp_url = params['idp_url']
username = params['username']
password = params['password']
realm = params['realm']
force = params['force']
sx5url = params['sx5url']
state = params['state']
clientSvcBaseUrl = idp_url + "/auth/admin/realms/" + realm + "/clients/"
# Créer un représentation du client pour BD clientURL
newClientDBRepresentation = {}
newClientDBRepresentation["clientId"] = params['clientId'].decode("utf-8")
newClientDBRepresentation["realmId"] = params['realm'].decode("utf-8")
newClientDBRepresentation["url"] = params['clientUrl'].decode("utf-8")
if params['username'] is not None:
newClientDBRepresentation["username"] = params['username'].decode("utf-8")
else:
newClientDBRepresentation["username"] = ""
if params['password'] is not None:
newClientDBRepresentation["password"] = params['password'].decode("utf-8")
else:
newClientDBRepresentation["password"] = ""
rc = 0
result = dict()
changed = False
try:
headers = loginAndSetHeaders(idp_url, username, password)
except Exception, e:
result = dict(
stderr = 'login: ' + str(e),
rc = 1,
changed = changed
)
return result
try:
# Vérifier si le client existe sur le serveur Keycloak
getResponse = requests.get(clientSvcBaseUrl, headers=headers, params={'clientId': newClientDBRepresentation["clientId"]})
except Exception, e:
result = dict(
stderr = 'Keycloak client get: ' + str(e),
rc = 1,
changed = changed
)
return result
if len(getResponse.json()) == 0: # Le client n'existe pas
result = dict(
stderr = 'Client not exit in IDP: ' + newClientDBRepresentation["clientId"],
rc = 1,
changed = changed
)
return result
if state == 'present':# Si le status est présent
if force: # Si force est de yes modifier le client meme s'il existe
try:
getResponse = sx5RestClientget(sx5url,newClientDBRepresentation["clientId"],newClientDBRepresentation["realmId"])
if getResponse.status_code == 200:
dataResponse = getResponse.json()
body = {
"clientId": newClientDBRepresentation["clientId"],
"realmId": newClientDBRepresentation["realmId"],
"url": newClientDBRepresentation["url"],
"username": newClientDBRepresentation["username"],
"password": newClientDBRepresentation["password"]
}
try:
getResponse = requests.put(sx5url+'/'+str(dataResponse['id']),json=body)
dataResponse = getResponse.json()
changed = True
fact = dict(
clientSx5 = dataResponse
)
result = dict(
ansible_facts = fact,
rc = 0,
changed = changed
)
except requests.exceptions.RequestException, e:
fact = dict(
clientSx5 = newClientDBRepresentation)
result = dict(
ansible_facts= fact,
stderr = 'Update client: ' + newClientDBRepresentation["clientId"] + ' erreur: ' + str(e),
rc = 1,
changed = changed
)
except ValueError, e:
fact = dict(
clientSx5 = newClientDBRepresentation)
result = dict(
ansible_facts= fact,
stderr = 'Update client: ' + newClientDBRepresentation["clientId"] + ' erreur: ' + str(e),
rc = 1,
changed = changed
)
else:
body = {
"clientId": newClientDBRepresentation["clientId"],
"realmId": newClientDBRepresentation["realmId"],
"url": newClientDBRepresentation["url"],
"username": newClientDBRepresentation["username"],
"password": newClientDBRepresentation["password"]
}
try:
getResponse = requests.post(sx5url,json=body)
dataResponse = getResponse.json()
changed = true
fact = dict(
clientSx5 = dataResponse
)
result = dict(
ansible_facts = fact,
rc = 0,
changed = changed
)
except requests.exceptions.RequestException, e:
fact = dict(
clientSx5 = newClientDBRepresentation)
result = dict(
ansible_facts= fact,
stderr = 'Create client: ' + newClientDBRepresentation["clientId"] + ' erreur: ' + str(e),
rc = 1,
changed = changed
)
except ValueError, e:
fact = dict(
clientSx5 = newClientDBRepresentation)
result = dict(
ansible_facts= fact,
stderr = 'Create client: ' + newClientDBRepresentation["clientId"] + ' erreur: ' + str(e),
rc = 1,
changed = changed
)
except Exception, e:
result = dict(
stderr = 'Client get in Force = yes and state = present: ' + str(e),
rc = 1,
changed = changed
)
else: # Si force est de no modifier le client s'il existe change
try:
getResponse = sx5RestClientget(sx5url,newClientDBRepresentation["clientId"],newClientDBRepresentation["realmId"])
if getResponse.status_code == 404:
try:
clientSx5ServRepresentation = sx5RestClientCreat(sx5url,newClientDBRepresentation["clientId"],newClientDBRepresentation["realmId"],newClientDBRepresentation["url"],newClientDBRepresentation["username"],newClientDBRepresentation["password"])
changed = True
fact = dict(
clientSx5 = clientSx5ServRepresentation
)
result = dict(
ansible_facts = fact,
rc = 0,
changed = changed
)
except Exception, e:
result = dict(
stderr = 'Client create or update error: ' + str(e),
rc = 1,
changed = changed
)
else:
dataResponse = getResponse.json()
excludes = []
if isDictEquals(dataResponse,newClientDBRepresentation,excludes):
changed = False
else:
try:
clientSx5ServRepresentation = sx5RestClientCreat(sx5url,newClientDBRepresentation["clientId"],newClientDBRepresentation["realmId"],newClientDBRepresentation["url"],newClientDBRepresentation["username"],newClientDBRepresentation["password"])
changed = True
fact = dict(
clientSx5 = clientSx5ServRepresentation
)
result = dict(
ansible_facts = fact,
rc = 0,
changed = changed
)
except Exception, e:
result = dict(
stderr = 'Client create or update error: ' + str(e),
rc = 1,
changed = changed
)
except Exception, e:
result = dict(
stderr = 'Client get in force = no and state = present: ' + str(e),
rc = 1,
changed = changed
)
elif state == 'absent':# Supprimer le client
try:
getResponse = sx5RestClientget(sx5url,newClientDBRepresentation["clientId"],newClientDBRepresentation["realmId"])
if getResponse.status_code == 200:
dataResponse = getResponse.json()
try:
deleteResponse = requests.delete(sx5url+ '/' + dataResponse['id'],headers=headers)
changed = True
result = dict(
stdout = 'deleted' + deleteResponse,
rc = 0,
changed = changed
)
except requests.exceptions.RequestException, e:
fact = dict(
clientSx5 = dataResponse)
result = dict(
ansible_facts= fact,
stderr = 'Delete client: ' + newClientDBRepresentation["clientId"] + ' erreur: ' + str(e),
rc = 1,
changed = changed
)
except ValueError, e:
fact = dict(
clientSx5 = dataResponse)
result = dict(
ansible_facts = fact,
stderr = 'Delete client: ' + newClientDBRepresentation["clientId"] + ' erreur: ' + str(e),
rc = 1,
changed = changed
)
else:
result = dict(
stdout = 'Client or realm not fond',
rc = 0,
changed = changed
)
except Exception, e:
result = dict(
stderr = 'Client get in state = absent : ' + str(e),
rc = 1,
changed = changed
)
return result
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 | -4,687,860,115,322,721,000 | 38.723457 | 268 | 0.486387 | false |
kovidgoyal/kitty | kitty/layout/grid.py | 1 | 12595 | #!/usr/bin/env python
# vim:fileencoding=utf-8
# License: GPLv3 Copyright: 2020, Kovid Goyal <kovid at kovidgoyal.net>
from functools import lru_cache
from itertools import repeat
from math import ceil, floor
from typing import (
Any, Callable, Dict, Generator, List, Optional, Sequence, Set, Tuple
)
from kitty.borders import BorderColor
from kitty.types import Edges
from kitty.typing import WindowType
from kitty.window_list import WindowGroup, WindowList
from .base import (
BorderLine, Layout, LayoutData, LayoutDimension, ListOfWindows,
NeighborsMap, layout_dimension, lgd, variable_bias
)
from .tall import neighbors_for_tall_window
@lru_cache()
def calc_grid_size(n: int) -> Tuple[int, int, int, int]:
if n <= 5:
ncols = 1 if n == 1 else 2
else:
for ncols in range(3, (n // 2) + 1):
if ncols * ncols >= n:
break
nrows = n // ncols
special_rows = n - (nrows * (ncols - 1))
special_col = 0 if special_rows < nrows else ncols - 1
return ncols, nrows, special_rows, special_col
class Grid(Layout):
name = 'grid'
no_minimal_window_borders = True
def remove_all_biases(self) -> bool:
self.biased_rows: Dict[int, float] = {}
self.biased_cols: Dict[int, float] = {}
return True
def column_layout(
self,
num: int,
bias: Optional[Sequence[float]] = None,
) -> LayoutDimension:
decoration_pairs = tuple(repeat((0, 0), num))
return layout_dimension(lgd.central.left, lgd.central.width, lgd.cell_width, decoration_pairs, bias=bias, left_align=lgd.align_top_left)
def row_layout(
self,
num: int,
bias: Optional[Sequence[float]] = None,
) -> LayoutDimension:
decoration_pairs = tuple(repeat((0, 0), num))
return layout_dimension(lgd.central.top, lgd.central.height, lgd.cell_height, decoration_pairs, bias=bias, left_align=lgd.align_top_left)
def variable_layout(self, layout_func: Callable[..., LayoutDimension], num_windows: int, biased_map: Dict[int, float]) -> LayoutDimension:
return layout_func(num_windows, bias=variable_bias(num_windows, biased_map) if num_windows > 1 else None)
def apply_bias(self, idx: int, increment: float, all_windows: WindowList, is_horizontal: bool = True) -> bool:
b = self.biased_cols if is_horizontal else self.biased_rows
num_windows = all_windows.num_groups
ncols, nrows, special_rows, special_col = calc_grid_size(num_windows)
def position_for_window_idx(idx: int) -> Tuple[int, int]:
row_num = col_num = 0
def on_col_done(col_windows: List[int]) -> None:
nonlocal col_num, row_num
row_num = 0
col_num += 1
for window_idx, xl, yl in self.layout_windows(
num_windows, nrows, ncols, special_rows, special_col, on_col_done):
if idx == window_idx:
return row_num, col_num
row_num += 1
row_num, col_num = position_for_window_idx(idx)
if is_horizontal:
b = self.biased_cols
if ncols < 2:
return False
bias_idx = col_num
attr = 'biased_cols'
def layout_func(windows: ListOfWindows, bias: Optional[Sequence[float]] = None) -> LayoutDimension:
return self.column_layout(num_windows, bias=bias)
else:
b = self.biased_rows
if max(nrows, special_rows) < 2:
return False
bias_idx = row_num
attr = 'biased_rows'
def layout_func(windows: ListOfWindows, bias: Optional[Sequence[float]] = None) -> LayoutDimension:
return self.row_layout(num_windows, bias=bias)
before_layout = list(self.variable_layout(layout_func, num_windows, b))
candidate = b.copy()
before = candidate.get(bias_idx, 0)
candidate[bias_idx] = before + increment
if before_layout == list(self.variable_layout(layout_func, num_windows, candidate)):
return False
setattr(self, attr, candidate)
return True
def layout_windows(
self,
num_windows: int,
nrows: int, ncols: int,
special_rows: int, special_col: int,
on_col_done: Callable[[List[int]], None] = lambda col_windows: None
) -> Generator[Tuple[int, LayoutData, LayoutData], None, None]:
# Distribute windows top-to-bottom, left-to-right (i.e. in columns)
xlayout = self.variable_layout(self.column_layout, ncols, self.biased_cols)
yvals_normal = tuple(self.variable_layout(self.row_layout, nrows, self.biased_rows))
yvals_special = yvals_normal if special_rows == nrows else tuple(self.variable_layout(self.row_layout, special_rows, self.biased_rows))
pos = 0
for col in range(ncols):
rows = special_rows if col == special_col else nrows
yls = yvals_special if col == special_col else yvals_normal
xl = next(xlayout)
col_windows = []
for i, yl in enumerate(yls):
window_idx = pos + i
yield window_idx, xl, yl
col_windows.append(window_idx)
pos += rows
on_col_done(col_windows)
def do_layout(self, all_windows: WindowList) -> None:
n = all_windows.num_groups
if n == 1:
self.layout_single_window_group(next(all_windows.iter_all_layoutable_groups()))
return
ncols, nrows, special_rows, special_col = calc_grid_size(n)
groups = tuple(all_windows.iter_all_layoutable_groups())
win_col_map: List[List[WindowGroup]] = []
def on_col_done(col_windows: List[int]) -> None:
col_windows_w = [groups[i] for i in col_windows]
win_col_map.append(col_windows_w)
def extents(ld: LayoutData) -> Tuple[int, int]:
start = ld.content_pos - ld.space_before
size = ld.space_before + ld.space_after + ld.content_size
return start, size
def layout(ld: LayoutData, cell_length: int, before_dec: int, after_dec: int) -> LayoutData:
start, size = extents(ld)
space_needed_for_decorations = before_dec + after_dec
content_size = size - space_needed_for_decorations
number_of_cells = content_size // cell_length
cell_area = number_of_cells * cell_length
extra = content_size - cell_area
if extra > 0 and not lgd.align_top_left:
before_dec += extra // 2
return LayoutData(start + before_dec, number_of_cells, before_dec, size - cell_area - before_dec, cell_area)
def position_window_in_grid_cell(window_idx: int, xl: LayoutData, yl: LayoutData) -> None:
wg = groups[window_idx]
edges = Edges(wg.decoration('left'), wg.decoration('top'), wg.decoration('right'), wg.decoration('bottom'))
xl = layout(xl, lgd.cell_width, edges.left, edges.right)
yl = layout(yl, lgd.cell_height, edges.top, edges.bottom)
self.set_window_group_geometry(wg, xl, yl)
for window_idx, xl, yl in self.layout_windows(
n, nrows, ncols, special_rows, special_col, on_col_done):
position_window_in_grid_cell(window_idx, xl, yl)
def minimal_borders(self, all_windows: WindowList) -> Generator[BorderLine, None, None]:
n = all_windows.num_groups
if not lgd.draw_minimal_borders or n < 2:
return
needs_borders_map = all_windows.compute_needs_borders_map(lgd.draw_active_borders)
ncols, nrows, special_rows, special_col = calc_grid_size(n)
is_first_row: Set[int] = set()
is_last_row: Set[int] = set()
is_first_column: Set[int] = set()
is_last_column: Set[int] = set()
groups = tuple(all_windows.iter_all_layoutable_groups())
bw = groups[0].effective_border()
if not bw:
return
xl: LayoutData = LayoutData()
yl: LayoutData = LayoutData()
prev_col_windows: List[int] = []
layout_data_map: Dict[int, Tuple[LayoutData, LayoutData]] = {}
def on_col_done(col_windows: List[int]) -> None:
nonlocal prev_col_windows, is_first_column
if col_windows:
is_first_row.add(groups[col_windows[0]].id)
is_last_row.add(groups[col_windows[-1]].id)
if not prev_col_windows:
is_first_column = {groups[x].id for x in col_windows}
prev_col_windows = col_windows
all_groups_in_order: List[WindowGroup] = []
for window_idx, xl, yl in self.layout_windows(n, nrows, ncols, special_rows, special_col, on_col_done):
wg = groups[window_idx]
all_groups_in_order.append(wg)
layout_data_map[wg.id] = xl, yl
is_last_column = {groups[x].id for x in prev_col_windows}
active_group = all_windows.active_group
def ends(yl: LayoutData) -> Tuple[int, int]:
return yl.content_pos - yl.space_before, yl.content_pos + yl.content_size + yl.space_after
def borders_for_window(gid: int) -> Generator[Edges, None, None]:
xl, yl = layout_data_map[gid]
left, right = ends(xl)
top, bottom = ends(yl)
first_row, last_row = gid in is_first_row, gid in is_last_row
first_column, last_column = gid in is_first_column, gid in is_last_column
# Horizontal
if not first_row:
yield Edges(left, top, right, top + bw)
if not last_row:
yield Edges(left, bottom - bw, right, bottom)
# Vertical
if not first_column:
yield Edges(left, top, left + bw, bottom)
if not last_column:
yield Edges(right - bw, top, right, bottom)
for wg in all_groups_in_order:
for edges in borders_for_window(wg.id):
yield BorderLine(edges)
for wg in all_groups_in_order:
if needs_borders_map.get(wg.id):
color = BorderColor.active if wg is active_group else BorderColor.bell
for edges in borders_for_window(wg.id):
yield BorderLine(edges, color)
def neighbors_for_window(self, window: WindowType, all_windows: WindowList) -> NeighborsMap:
n = all_windows.num_groups
if n < 4:
return neighbors_for_tall_window(1, window, all_windows)
wg = all_windows.group_for_window(window)
assert wg is not None
ncols, nrows, special_rows, special_col = calc_grid_size(n)
blank_row: List[Optional[int]] = [None for i in range(ncols)]
matrix = tuple(blank_row[:] for j in range(max(nrows, special_rows)))
wi = all_windows.iter_all_layoutable_groups()
pos_map: Dict[int, Tuple[int, int]] = {}
col_counts: List[int] = []
for col in range(ncols):
rows = special_rows if col == special_col else nrows
for row in range(rows):
w = next(wi)
matrix[row][col] = wid = w.id
pos_map[wid] = row, col
col_counts.append(rows)
row, col = pos_map[wg.id]
def neighbors(row: int, col: int) -> List[int]:
try:
ans = matrix[row][col]
except IndexError:
ans = None
return [] if ans is None else [ans]
def side(row: int, col: int, delta: int) -> List[int]:
neighbor_col = col + delta
neighbor_nrows = col_counts[neighbor_col]
nrows = col_counts[col]
if neighbor_nrows == nrows:
return neighbors(row, neighbor_col)
start_row = floor(neighbor_nrows * row / nrows)
end_row = ceil(neighbor_nrows * (row + 1) / nrows)
xs = []
for neighbor_row in range(start_row, end_row):
xs.extend(neighbors(neighbor_row, neighbor_col))
return xs
return {
'top': neighbors(row-1, col) if row else [],
'bottom': neighbors(row + 1, col),
'left': side(row, col, -1) if col else [],
'right': side(row, col, 1) if col < ncols - 1 else [],
}
def layout_state(self) -> Dict[str, Any]:
return {
'biased_cols': self.biased_cols,
'biased_rows': self.biased_rows
}
| gpl-3.0 | -4,798,671,985,011,705,000 | 40.705298 | 145 | 0.581421 | false |
georgeha/software_engineering | aws/restolingo/placeorder/models.py | 1 | 3621 | """
This file contains all the models that are used in our system
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from autoslug import AutoSlugField
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
# Create your models here.
class Employee(models.Model):
username = models.CharField(max_length=31)
password = models.TextField()
role = models.IntegerField()
def __unicode__(self):
return self.username
class Table(models.Model):
user_id = models.ForeignKey(User)
slug = AutoSlugField(max_length=31,
unique=True,
null=True,
populate_from=str('id'),
help_text='A label for URL config.')
status = models.CharField(max_length=10,
help_text='available - occupied - dirty') # the names should be available, occupied, dirty
def __str__(self):
return str(self.id)
class Item(models.Model):
name = models.CharField(max_length=50)
price = models.FloatField(default=0)
type = models.CharField(max_length=30,help_text="snacks - desserts")
slug = AutoSlugField(max_length=31,
unique=True,
null=True,
populate_from='name',
help_text='A label for URL config.')
def __str__(self):
return self.name
class Order(models.Model):
user_id = models.ForeignKey(User)
table_id = models.ForeignKey(Table)
status = models.IntegerField(default=0)
order_date = models.DateTimeField('date ordered',
auto_now_add=True)
items = models.ManyToManyField(Item, through='OrderItem') # Many to many relationship is created automatically here
slug = AutoSlugField(max_length=31,
unique=True,
null=True,
populate_from=str('order_date'),
help_text='A label for URL config.')
total = models.FloatField(default=0)
def get_absolute_url(self):
return reverse('orders_detail',
kwargs={'slug': self.slug})
def get_update_url(self):
return reverse('make_order_in_progress',
kwargs={'slug': self.slug})
def get_in_progress_url(self):
return reverse('orders_detail',
kwargs={'slug': self.slug})
def get_done_url(self):
return reverse('make_order_done',
kwargs={'slug': self.slug})
def __str__(self):
return str(self.slug)
class OrderItem(models.Model):
order_id = models.ForeignKey(Order, on_delete=models.CASCADE)
item_id = models.ForeignKey(Item, on_delete=models.CASCADE)
class DynamicOrder(models.Model):
item_name = models.CharField(max_length=50)
cost = models.FloatField(default=0)
table_id=models.ForeignKey(Table, on_delete=models.CASCADE)
def __str__(self):
return str(self.item_name)
class Shift(models.Model):
user_id = models.ForeignKey(User)
start_date = models.DateTimeField('start_date')
end_date = models.DateTimeField('end_date')
def __str__(self):
return str(self.id)
class OrderList(models.Model):
order_id = models.ForeignKey(Order)
item_name = models.ForeignKey(Item)
table_id=models.ForeignKey(Table)
status = models.IntegerField(default=1)
def __str__(self):
return str(self.id)
| mit | -3,469,167,692,740,765,000 | 30.215517 | 120 | 0.604529 | false |
helena-project/beetle | controller/manager/tasks.py | 1 | 5949 |
import socket
from datetime import timedelta
from celery.decorators import task, periodic_task
from celery.utils.log import get_task_logger
from django.utils import timezone
from beetle.models import VirtualDevice, PrincipalGroup
from network.models import ConnectedGateway, ConnectedDevice, DeviceMapping, \
ServiceInstance
from state.models import AdminAuthInstance, UserAuthInstance, \
PasscodeAuthInstance, ExclusiveLease
from main.constants import GATEWAY_CONNECTION_TIMEOUT
from .application.manager import IPC_COMMAND_PATH
logger = get_task_logger(__name__)
def _expand_principal_list(principals):
contains_all = False
ret = set()
for principal in principals:
if principal.name == "*":
contains_all = True
elif isinstance(principal, VirtualDevice):
ret.add(principal)
elif isinstance(principal, PrincipalGroup):
for member in principal.members.all():
ret.add(principal)
return ret, contains_all
def _get_device_instances(devices):
ret = set()
for device in devices:
try:
other_instance = ConnectedDevice.objects.get(
device__name=device.name)
ret.add(other_instance)
except ConnectedDevice.DoesNotExist:
pass
return ret
def _filter_not_mapped_already_from(from_device, to_devices):
existing_mappings = DeviceMapping.objects.filter(
from_device=from_device, to_device__in=to_devices)
for mapping in existing_mappings:
to_devices.discard(mapping.to_device)
def _filter_not_mapped_already_to(from_devices, to_device):
existing_mappings = DeviceMapping.objects.filter(
from_device__in=from_devices, to_device=to_device)
for mapping in existing_mappings:
from_devices.discard(mapping.from_device)
def _make_mappings(device_instance, serve_to=None, client_to=None):
if not serve_to and not client_to:
return
def _make_single_mapping(s, from_device, to_device):
msg = "map %d %d" % (from_device.id, to_device.id)
logger.info(msg)
s.send(msg)
s = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
s.connect(IPC_COMMAND_PATH)
if serve_to:
for other_device in serve_to:
_make_single_mapping(s, device_instance, other_device)
if client_to:
for other_device in client_to:
_make_single_mapping(s, other_device, device_instance)
s.shutdown(socket.SHUT_RDWR)
s.close()
def _get_discoverers(device_instance):
disc_by, disc_by_all = _expand_principal_list(
device_instance.device.discoverable_by.all())
def _discovery_allowed(other_instance):
return disc_by_all or other_instance.device in disc_by
services = ServiceInstance.objects.filter(device_instance=device_instance
).values("service")
potential_clients = ConnectedDevice.objects.filter(
interested_services__in=services)
potential_clients = set(x for x in potential_clients if _discovery_allowed(x))
_filter_not_mapped_already_to(potential_clients, device_instance)
return potential_clients
@task(name="connect_device")
def connect_device_evt(device_instance_id):
"""Create mappings when a device first connects."""
device_instance = ConnectedDevice.objects.get(id=device_instance_id)
serve_to, _ = _expand_principal_list(device_instance.device.serve_to.all())
serve_to = _get_device_instances(serve_to)
client_to, _ = _expand_principal_list(device_instance.device.client_to.all())
client_to = _get_device_instances(client_to)
_make_mappings(device_instance, serve_to=serve_to, client_to=client_to)
@task(name="update_device")
def update_device_evt(device_instance_id):
"""Create mappings when a device has been updated."""
device_instance = ConnectedDevice.objects.get(id=device_instance_id)
serve_to, _= _expand_principal_list(device_instance.device.serve_to.all())
serve_to = _get_device_instances(serve_to)
serve_to |= _get_discoverers(device_instance)
_filter_not_mapped_already_from(device_instance, serve_to)
client_to, _ = _expand_principal_list(device_instance.device.client_to.all())
client_to = _get_device_instances(client_to)
_filter_not_mapped_already_to(client_to, device_instance)
_make_mappings(device_instance, serve_to=serve_to, client_to=client_to)
@task(name="register_device_interest")
def register_interest_service_evt(device_instance_id, service_uuid):
"""Find devices in the network with the service."""
device_instance = ConnectedDevice.objects.get(id=device_instance_id)
def _discovery_allowed(other_instance):
for principal in other_instance.device.discoverable_by.all():
if principal.name == "*":
return True
elif principal.name == device_instance.device.name:
return True
elif isinstance(principal, PrincipalGroup):
for member in principal.members.all():
if member.name == device_instance.device.name:
return True
return False
potential_servers = set(x.device_instance for x in \
ServiceInstance.objects.filter(service__uuid=service_uuid))
potential_servers = set(x for x in potential_servers if _discovery_allowed(x))
_filter_not_mapped_already_to(potential_servers, device_instance)
_make_mappings(device_instance, client_to=potential_servers)
@periodic_task(
run_every=timedelta(seconds=60),
name="timeout_gateways",
ignore_result=True
)
def timeout_gateways():
"""Remove lingering gateway sessions"""
logger.info("Timing out gateway instances.")
threshold = timezone.now() - timedelta(seconds=GATEWAY_CONNECTION_TIMEOUT)
ConnectedGateway.objects.filter(is_connected=False,
last_updated__lt=threshold).delete()
@periodic_task(
run_every=timedelta(seconds=60),
name="timeout_access_control_state",
ignore_result=True
)
def timeout_access_control_state():
"""Obliterate any expired state."""
logger.info("Timing out access control state.")
current_time = timezone.now()
AdminAuthInstance.objects.filter(expire__lt=current_time).delete()
UserAuthInstance.objects.filter(expire__lt=current_time).delete()
PasscodeAuthInstance.objects.filter(expire__lt=current_time).delete()
ExclusiveLease.objects.filter(expire__lt=current_time).delete()
| apache-2.0 | -5,559,685,777,284,174,000 | 31.508197 | 79 | 0.756598 | false |
EmanuelStoyanov/NomNom-Food-Delivery | tests.py | 1 | 4962 | import unittest
import database
import logged_menu
class database_tests(unittest.TestCase):
def setUp(self):
database.create_users_table()
database.create_restaurant_table()
database.create_orders_table()
database.create_taxes_table()
database.register('Barney', '420', "Mclarens")
def test_admin(self):
database.cursor.execute("SELECT username, password \
FROM users WHERE username = 'admin'")
admin = database.cursor.fetchone()
self.assertEqual(admin[0], 'admin')
self.assertEqual(admin[1], 'ADMINNOMNOM')
def test_register_username(self):
database.cursor.execute('SELECT username FROM users \
WHERE username = (?) AND password = (?)', ('Barney', '420'))
username = database.cursor.fetchone()
self.assertEqual(username[0], 'Barney')
def test_register_address(self):
database.cursor.execute('SELECT address FROM users \
WHERE username = (?) AND password = (?)', ('Barney', '420'))
username = database.cursor.fetchone()
self.assertEqual(username[0], 'Mclarens')
def test_login(self):
logged_user = database.login('Barney', '420')
self.assertEqual(logged_user.get_username(), 'Barney')
def test_login_wrong_password(self):
logged_user = database.login('Barney', '421')
self.assertFalse(logged_user)
def test_add_existing_restaurant(self):
database.create_menu_table('speedy')
self.assertFalse(database.create_menu_table('speedy'))
def test_add_pizza_price(self):
database.create_menu_table('speedy')
database.add('speedy', 'pizza', 3.50)
database.cursor.execute("SELECT price \
FROM speedy WHERE products = 'pizza'")
price = database.cursor.fetchone()
self.assertEqual(3.50, price[0])
def test_add_pizza_twice(self):
database.create_menu_table('speedy')
database.add('speedy', 'pizza', 3.50)
self.assertFalse(database.add('speedy', 'pizza', 4.00))
def test_close_a_closed_restaurant(self):
database.create_menu_table('speedy')
self.assertFalse(database.close('speedy'))
def test_open_closed_restaurant(self):
database.create_menu_table('speedy')
self.assertTrue(database.open('speedy'))
def test_open_an_opened_restaurant(self):
database.create_menu_table('subway')
database.open('subway')
self.assertFalse(database.open('subway'))
def test_close_an_open_restaurant(self):
database.create_menu_table('subway')
database.open('subway')
self.assertTrue(database.close('subway'))
def test_set_status(self):
database.create_menu_table('speedy')
database.update_status_restaurant('speedy', 'Not taking orders.')
database.cursor.execute("SELECT status \
FROM restaurants WHERE name = 'speedy'")
status = database.cursor.fetchone()
self.assertEqual('Not taking orders.', status[0])
def test_is_there_such_restaurant(self):
database.create_menu_table('speedy')
self.assertTrue(database.is_there_such_restaurant('speedy'))
def test_is_there_not_existing_restaurant(self):
self.assertFalse(database.is_there_such_restaurant('speedy'))
def test_is_open_closed(self):
database.create_menu_table('speedy')
self.assertFalse(database.is_open('speedy'))
def test_is_open_opened(self):
database.create_menu_table('speedy')
database.open('speedy')
self.assertTrue(database.is_open('speedy'))
def test_valid_product(self):
database.create_menu_table('speedy')
database.add('speedy', 'pizza', 3.5)
self.assertTrue(database.valid_product('speedy', 'pizza'))
def test_not_valid_product(self):
database.create_menu_table('speedy')
database.add('speedy', 'pizza', 3.5)
self.assertFalse(database.valid_product('speedy', 'spaghetti'))
def test_delivery_tax_existing_district(self):
database.add_district('Lulin', 4)
self.assertEqual((True, 4), database.delivery_tax('Lulin'))
def test_delivery_tax_unexisting_district(self):
self.assertEqual((False, 0), database.delivery_tax('Lulin'))
def test_status_unexisting_restaurant(self):
self.assertEqual("There is no such restaurant",
database.status_restaurant('speedy'))
def test_status_existing_restaurant(self):
database.create_menu_table('speedy')
self.assertEqual("Status is Not busy",
database.status_restaurant('speedy'))
def tearDown(self):
database.cursor.execute('DROP TABLE users')
database.cursor.execute('DROP TABLE restaurants')
database.cursor.execute('DROP table orders')
database.cursor.execute('DROP table taxes')
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -4,299,222,095,564,951,000 | 34.956522 | 73 | 0.644498 | false |
italopaiva/propositional-logic | operations.py | 1 | 6549 | """Describe the possible operations."""
from lp.interpreter import Interpreter, TruthTable, SetTruthTable
class Operation:
"""Base class for operations."""
def perform(self, *args):
"""Perform the operation."""
raise NotImplementedError
def parse(self, line):
"""
Generic parser for operations.
A line generally comes like that:
OPERATION, formula1, formula2, ...
So it returns the comma separated values without the operation as list.
"""
args = line.split(',')
return args[1:]
class SemanticStatus(Operation):
"""Verify the semantic status of a formula."""
SYMBOL = 'S'
def perform(self, formula):
"""Check a formula semantic status."""
truth_table = TruthTable(formula)
valuations = truth_table.get_formula_valuations()
formula_values = []
for line, valuation in valuations.items():
formula_values.append(valuation[1])
status = self.check_status(formula_values)
return '[%s, [%s]]' % (status, truth_table.str_representation())
def check_status(self, formula_values):
"""Get the formulas semantic status based on its valuations."""
tautology = True in formula_values and False not in formula_values
contradiction = False in formula_values and True not in formula_values
if tautology:
status = "TAUTOLOGIA"
elif contradiction:
status = "CONTRADICAO"
else:
status = "CONTINGENCIA"
return status
class SemanticEquivalence(Operation):
"""Verify if two formulas are semantic equivalent."""
SYMBOL = 'EQ'
def perform(self, formula1, formula2):
"""Check if the two formulas are equivalent."""
quid_pro_quo, truth_table = self.check_equivalence(formula1, formula2)
equivalent = 'SIM' if quid_pro_quo else 'NAO'
return '[%s, [%s]]' % (
equivalent,
truth_table.str_representation()
)
def check_equivalence(self, formula1, formula2):
"""."""
truth_table = SetTruthTable([formula1, formula2])
formula1 = Interpreter.parse_expression(formula1)
formula2 = Interpreter.parse_expression(formula2)
models1 = truth_table.get_formula_models(formula1.str_representation())
models2 = truth_table.get_formula_models(formula2.str_representation())
equivalent = True
for valuation_index, valuation in models1.items():
if valuation_index not in models2:
equivalent = False
break
if equivalent:
# Check if the second formula models are in the first formula
for valuation_index, valuation in models2.items():
if valuation_index not in models1:
equivalent = False
break
return equivalent, truth_table
class Consistency(Operation):
"""Verify if a set of formulas is consistent."""
SYMBOL = 'C'
def perform(self, formulas):
"""Check if the set of formulas is consistent."""
truth_table = SetTruthTable(formulas)
formulas_models = truth_table.get_formulas_set_models()
consistent = 'SIM' if formulas_models else 'NAO'
return '[%s, [%s]]' % (
consistent,
truth_table.str_representation()
)
def parse(self, line):
"""Parse a bracketed, comma separated formulas into a list."""
# Remove the operation symbol from the line
line = line.replace(self.SYMBOL, '')
# Remove the whitespaces and the first character (that will be a comma)
line = "".join(line.split())[1:]
# Remove the brackets of the string
line = line.replace('[', '').replace(']', '')
# Split the line on comma to get all formulas of the set as list
args = line.split(',')
return [args]
class LogicConsequence(Operation):
"""Verify if a formula is logic consequence of a set of formulas."""
SYMBOL = 'CL'
def perform(self, formulas_set, formula):
"""Check if the formula is logic consequence of the formulas_set."""
if '' in formulas_set and len(formulas_set) is 1:
return self.is_logic_consequence_of_empty_set(formula)
truth_table = SetTruthTable(formulas_set + [formula])
formula = Interpreter.parse_expression(formula)
formulas = {}
for f in formulas_set:
form = Interpreter.parse_expression(f)
formulas[form.str_representation()] = form
set_models = truth_table.get_formulas_set_models(formulas)
formula_models = truth_table.get_formula_models(
formula.str_representation()
)
logic_consequence = True
for valuation_index, valuation in set_models.items():
if valuation_index not in formula_models:
logic_consequence = False
break
consequence = 'SIM' if logic_consequence else 'NAO'
return '[%s, [%s]]' % (
consequence,
truth_table.str_representation()
)
def is_logic_consequence_of_empty_set(self, formula):
"""Check if a formula is logic consequence of the empty set."""
truth_table = TruthTable(formula)
valuations = truth_table.get_formula_valuations()
logic_consequence = True
for valuation_index, valuation in valuations.items():
if valuation[1] is False:
logic_consequence = False
break
consequence = 'SIM' if logic_consequence else 'NAO'
return '[%s, [%s]]' % (
consequence,
truth_table.str_representation()
)
def parse(self, line):
"""Parse a bracketed, comma separated formulas into a list."""
# Remove the operation symbol from the line
line = line.replace(self.SYMBOL, '')
# Remove the whitespaces and the first character (that will be a comma)
line = "".join(line.split())[1:]
# Remove the brackets of the string
line = line.replace('[', '').replace(']', '')
# Split the line on comma to get all formulas of the set as list
args = line.split(',')
# The set of formulas will be all the elements but the last one
formulas_set = args[:-1]
# The formula will be the last element in the list
formula = args[-1]
return [formulas_set, formula]
| mit | 4,685,439,699,903,485,000 | 31.909548 | 79 | 0.602077 | false |
leiferikb/bitpop | src/chrome/common/extensions/docs/server2/servlet.py | 1 | 4143 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class RequestHeaders(object):
'''A custom dictionary impementation for headers which ignores the case
of requests, since different HTTP libraries seem to mangle them.
'''
def __init__(self, dict_):
if isinstance(dict_, RequestHeaders):
self._dict = dict_
else:
self._dict = dict((k.lower(), v) for k, v in dict_.iteritems())
def get(self, key, default=None):
return self._dict.get(key.lower(), default)
def __repr__(self):
return repr(self._dict)
def __str__(self):
return repr(self._dict)
class Request(object):
'''Request data.
'''
def __init__(self, path, host, headers):
self.path = path.lstrip('/')
self.host = host.rstrip('/')
self.headers = RequestHeaders(headers)
@staticmethod
def ForTest(path, host='http://developer.chrome.com', headers=None):
return Request(path, host, headers or {})
def __repr__(self):
return 'Request(path=%s, host=%s, headers=%s)' % (
self.path, self.host, self.headers)
def __str__(self):
return repr(self)
class _ContentBuilder(object):
'''Builds the response content.
'''
def __init__(self):
self._buf = []
def Append(self, content):
if isinstance(content, unicode):
content = content.encode('utf-8', 'replace')
self._buf.append(content)
def ToString(self):
self._Collapse()
return self._buf[0]
def __str__(self):
return self.ToString()
def __len__(self):
return len(self.ToString())
def _Collapse(self):
self._buf = [''.join(self._buf)]
class Response(object):
'''The response from Get().
'''
def __init__(self, content=None, headers=None, status=None):
self.content = _ContentBuilder()
if content is not None:
self.content.Append(content)
self.headers = {}
if headers is not None:
self.headers.update(headers)
self.status = status
@staticmethod
def Ok(content, headers=None):
'''Returns an OK (200) response.
'''
return Response(content=content, headers=headers, status=200)
@staticmethod
def Redirect(url, permanent=False):
'''Returns a redirect (301 or 302) response.
'''
status = 301 if permanent else 302
return Response(headers={'Location': url}, status=status)
@staticmethod
def NotFound(content, headers=None):
'''Returns a not found (404) response.
'''
return Response(content=content, headers=headers, status=404)
@staticmethod
def NotModified(content, headers=None):
return Response(content=content, headers=headers, status=304)
@staticmethod
def InternalError(content, headers=None):
'''Returns an internal error (500) response.
'''
return Response(content=content, headers=headers, status=500)
def Append(self, content):
'''Appends |content| to the response content.
'''
self.content.append(content)
def AddHeader(self, key, value):
'''Adds a header to the response.
'''
self.headers[key] = value
def AddHeaders(self, headers):
'''Adds several headers to the response.
'''
self.headers.update(headers)
def SetStatus(self, status):
self.status = status
def GetRedirect(self):
if self.headers.get('Location') is None:
return (None, None)
return (self.headers.get('Location'), self.status == 301)
def IsNotFound(self):
return self.status == 404
def __eq__(self, other):
return (isinstance(other, self.__class__) and
str(other.content) == str(self.content) and
other.headers == self.headers and
other.status == self.status)
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return 'Response(content=%s bytes, status=%s, headers=%s)' % (
len(self.content), self.status, self.headers)
def __str__(self):
return repr(self)
class Servlet(object):
def __init__(self, request):
self._request = request
def Get(self):
'''Returns a Response.
'''
raise NotImplemented()
| gpl-3.0 | 2,190,137,133,376,314,000 | 25.056604 | 73 | 0.64253 | false |
lnmds/jorge | ext/gambling.py | 1 | 5013 | import asyncio
import decimal
import random
import discord
from discord.ext import commands
from .common import Cog
EMOJI_POOL = ':thinking: :snail: :shrug: :chestnut: :ok_hand: :eggplant:'.split()
BET_MULTIPLIER_EMOJI = ':thinking:'
X4_EMOJI = [':snail:', ':chestnut:', ':shrug:']
X6_EMOJI = [':eggplant:', ':ok_hand:']
class Gambling(Cog):
"""Gambling commands."""
def __init__(self, bot):
super().__init__(bot)
self.duels = {}
@commands.command()
async def duel(self, ctx, challenged_user: discord.User, amount: decimal.Decimal):
"""Duel a user for coins.
The winner of the duel is the person that sends a message first as soon
as josé says "GO".
"""
amount = round(amount, 2)
if amount > 3:
await ctx.send('Can\'t duel with more than 3JC.')
return
if amount <= 0:
await ctx.send('lul')
return
challenger_user = ctx.author
challenger = ctx.author.id
challenged = challenged_user.id
if challenger in self.duels:
await ctx.send('You are already in a duel.')
return
challenger_acc = await self.jcoin.get_account(challenger)
if challenger_acc is None:
await ctx.send('You don\'t have a wallet.')
return
challenged_acc = await self.jcoin.get_account(challenged)
if challenged_acc is None:
await ctx.send('Challenged person doesn\'t have a wallet.')
return
if amount > challenger_acc['amount'] or amount > challenged_acc['amount']:
await ctx.send('One of you don\'t have tnough funds to make the duel.')
return
await ctx.send(f'{challenged_user}, you got challenged for a duel :gun: by {challenger_user} with a total of {amount}JC, accept it? (y/n)')
def yn_check(msg):
return msg.author.id == challenged and msg.channel == ctx.channel
try:
msg = await self.bot.wait_for('message', timeout=10, check=yn_check)
except asyncio.TimeoutError:
await ctx.send('timeout reached')
return
if msg.content != 'y':
await ctx.send('Challenged person didn\'t say a lowercase y.')
return
self.duels[challenger] = {
'challenged': challenged,
'amount': amount,
}
countdown = 3
countdown_msg = await ctx.send(f'First to send a message wins! {countdown}')
for i in reversed(range(1, 4)):
await countdown_msg.edit(content=f'{i}...')
await asyncio.sleep(1)
await asyncio.sleep(random.randint(2, 7))
await ctx.send('**GO!**')
duelists = [challenged, challenger]
def duel_check(msg):
return msg.channel == ctx.channel and msg.author.id in duelists
try:
msg = await self.bot.wait_for('message', timeout=5, check=duel_check)
except asyncio.TimeoutError:
await ctx.send('u guys suck')
return
winner = msg.author.id
duelists.remove(winner)
loser = duelists[0]
try:
await self.jcoin.transfer(loser, winner, amount)
except self.jcoin.TransferError as err:
await ctx.send(f'Failed to tranfer: {err!r}')
return
await ctx.send(f'<@{winner}> won {amount}JC.')
del self.duels[challenger]
@commands.command()
async def slots(self, ctx, amount: decimal.Decimal):
"""little slot machine"""
await self.jcoin.ensure_taxbank(ctx)
await self.jcoin.pricing(ctx, amount)
res = []
slots = [random.choice(EMOJI_POOL) for i in range(3)]
res.append(' '.join(slots))
bet_multiplier = slots.count(BET_MULTIPLIER_EMOJI) * 2
for emoji in slots:
if slots.count(emoji) == 3:
if emoji in X4_EMOJI:
bet_multiplier = 4
elif emoji in X6_EMOJI:
bet_multiplier = 6
applied_amount = amount * bet_multiplier
res.append(f'**Multiplier**: {bet_multiplier}x')
res.append(f'bet: {amount}, won: {applied_amount}')
if applied_amount > 0:
try:
await self.jcoin.transfer(ctx.guild.id, ctx.author.id, applied_amount)
except self.jcoin.TransferError as err:
await ctx.send(f'err: {err!r}')
else:
res.append(':peach:')
await ctx.send('\n'.join(res))
@commands.command()
async def flip(self, ctx):
"""Flip a coin. (49%, 49%, 2%)"""
p = random.random()
if p < .49:
await ctx.send('https://i.imgur.com/oEEkybO.png')
elif .49 < p < .98:
await ctx.send('https://i.imgur.com/c9smEW6.png')
else:
await ctx.send('https://i.imgur.com/yDPUp3P.png')
def setup(bot):
bot.add_cog(Gambling(bot))
| mit | -950,188,890,208,021,200 | 29.560976 | 147 | 0.56265 | false |
erinspace/osf.io | website/search_migration/__init__.py | 1 | 34093 | JSON_UPDATE_NODES_SQL = """
SELECT json_agg(
json_build_object(
'_type', CASE
WHEN N.type = 'osf.registration'
THEN 'registration'
WHEN PREPRINT.URL IS NOT NULL
THEN 'preprint'
WHEN PARENT_GUID._id IS NULL
THEN 'project'
ELSE 'component'
END
, '_index', '{index}'
, 'doc_as_upsert', TRUE
, '_id', NODE_GUID._id
, '_op_type', 'update'
, 'doc', json_build_object(
'contributors', (SELECT json_agg(json_build_object(
'url', CASE
WHEN U.is_active
THEN '/' || USER_GUID._id || '/'
ELSE NULL
END
, 'fullname', U.fullname
))
FROM osf_osfuser AS U
INNER JOIN osf_contributor AS CONTRIB
ON (U.id = CONTRIB.user_id)
LEFT OUTER JOIN osf_guid AS USER_GUID
ON (U.id = USER_GUID.object_id AND (USER_GUID.content_type_id = (SELECT id FROM django_content_type WHERE model = 'osfuser')))
WHERE (CONTRIB.node_id = N.id AND CONTRIB.visible = TRUE))
, 'extra_search_terms', CASE
WHEN strpos(N.title, '-') + strpos(N.title, '_') + strpos(N.title, '.') > 0
THEN translate(N.title, '-_.', ' ')
ELSE ''
END
, 'normalized_title', N.title
, 'registered_date', N.registered_date
, 'id', NODE_GUID._id
, 'category', CASE
WHEN N.type = 'osf.registration'
THEN 'registration'
WHEN PREPRINT.URL IS NOT NULL
THEN 'preprint'
WHEN PARENT_GUID._id IS NULL
THEN 'project'
ELSE 'component'
END
, 'title', N.title
, 'parent_id', PARENT_GUID._id
, 'embargo_end_date', EMBARGO.DATA ->> 'end_date'
, 'is_pending_registration', CASE
WHEN N.type = 'osf.registration'
THEN REGISTRATION_APPROVAL.PENDING
ELSE FALSE
END
, 'is_pending_embargo', EMBARGO.DATA ->> 'pending'
, 'is_registration', N.type = 'osf.registration'
, 'is_pending_retraction', RETRACTION.state = 'pending'
, 'is_retracted', RETRACTION.state = 'approved'
, 'preprint_url', PREPRINT.URL
, 'boost', CASE
WHEN N.type = 'osf.node'
THEN 2
ELSE 1
END
, 'public', N.is_public
, 'description', N.description
, 'tags', (CASE
WHEN TAGS.names IS NOT NULL
THEN TAGS.names
ELSE
'{{}}'::TEXT[]
END)
, 'affiliated_institutions', (SELECT array_agg(INST.name)
FROM osf_institution AS INST
INNER JOIN osf_abstractnode_affiliated_institutions
ON (INST.id = osf_abstractnode_affiliated_institutions.institution_id)
WHERE osf_abstractnode_affiliated_institutions.abstractnode_id = N.id)
, 'license', json_build_object(
'text', LICENSE.text
, 'name', LICENSE.name
, 'id', LICENSE.license_id
, 'copyright_holders', LICENSE.copyright_holders
, 'year', LICENSE.year
)
, 'url', '/' || NODE_GUID._id || '/'
, 'date_created', N.created
, 'wikis', CASE
WHEN RETRACTION.state != 'approved'
THEN
(SELECT json_agg(json_build_object(
translate(WP.page_name, '.', ' '), WV."content"
))
FROM addons_wiki_wikipage AS WP
LEFT JOIN LATERAL (
SELECT
content,
identifier
FROM addons_wiki_wikiversion
WHERE addons_wiki_wikiversion.wiki_page_id = WP.id
ORDER BY identifier DESC
LIMIT 1
) WV ON TRUE
WHERE WP.node_id = N.id
AND WP.deleted IS NULL)
ELSE
'{{}}'::JSON
END
)
)
)
FROM osf_abstractnode AS N
LEFT JOIN LATERAL (
SELECT _id
FROM osf_guid
WHERE object_id = N.id
AND content_type_id = (SELECT id FROM django_content_type WHERE model = 'abstractnode')
LIMIT 1
) NODE_GUID ON TRUE
LEFT JOIN LATERAL (
SELECT _id
FROM osf_guid
WHERE object_id = (
SELECT parent_id
FROM osf_noderelation
WHERE child_id = N.id
AND is_node_link = FALSE
LIMIT 1)
AND content_type_id = (SELECT id FROM django_content_type WHERE model = 'abstractnode')
LIMIT 1
) PARENT_GUID ON TRUE
LEFT JOIN LATERAL (
SELECT array_agg(TAG.name) as names
FROM osf_tag AS TAG
INNER JOIN osf_abstractnode_tags ON (TAG.id = osf_abstractnode_tags.tag_id)
WHERE (TAG.system = FALSE AND osf_abstractnode_tags.abstractnode_id = N.id)
LIMIT 1
) TAGS ON TRUE
LEFT JOIN LATERAL (
SELECT
osf_nodelicense.license_id,
osf_nodelicense.name,
osf_nodelicense.text,
osf_nodelicenserecord.year,
osf_nodelicenserecord.copyright_holders
FROM osf_nodelicenserecord
INNER JOIN osf_abstractnode ON (osf_nodelicenserecord.id = osf_abstractnode.node_license_id)
LEFT OUTER JOIN osf_nodelicense ON (osf_nodelicenserecord.node_license_id = osf_nodelicense.id)
WHERE osf_abstractnode.id = N.id
) LICENSE ON TRUE
LEFT JOIN LATERAL (SELECT (
CASE WHEN N.type = 'osf.registration'
THEN
(CASE WHEN N.retraction_id IS NOT NULL
THEN
(SELECT state
FROM osf_retraction
WHERE id = N.retraction_id)
ELSE
(WITH RECURSIVE ascendants AS (
SELECT
parent_id,
child_id,
1 AS LEVEL,
ARRAY [child_id] AS cids,
'' :: VARCHAR AS state
FROM osf_noderelation
WHERE is_node_link IS FALSE AND child_id = N.id
UNION ALL
SELECT
S.parent_id,
D.child_id,
D.level + 1,
D.cids || S.child_id,
R.state
FROM ascendants AS D
INNER JOIN osf_noderelation AS S
ON D.parent_id = S.child_id
INNER JOIN osf_abstractnode AS A
ON D.child_id = A.id
INNER JOIN osf_retraction AS R
ON A.retraction_id = R.id
WHERE S.is_node_link IS FALSE
AND N.id = ANY (cids)
) SELECT state
FROM ascendants
WHERE child_id = N.id
AND state IS NOT NULL
ORDER BY LEVEL ASC
LIMIT 1)
END)
ELSE
(SELECT '' :: VARCHAR AS state)
END
)) RETRACTION ON TRUE
LEFT JOIN LATERAL (
SELECT (
CASE WHEN N.type = 'osf.registration'
THEN (
CASE WHEN N.embargo_id IS NOT NULL
THEN (
SELECT json_build_object(
'pending', state = 'unapproved',
'end_date',
CASE WHEN state = 'approved'
THEN
TO_CHAR(end_date, 'Day, Mon DD, YYYY')
ELSE
NULL
END
) AS DATA
FROM osf_retraction
WHERE id = N.retraction_id
)
ELSE (
WITH RECURSIVE ascendants AS (
SELECT
parent_id,
child_id,
1 AS LEVEL,
ARRAY [child_id] AS cids,
'' :: VARCHAR AS state,
NULL :: TIMESTAMP WITH TIME ZONE AS end_date
FROM osf_noderelation
WHERE is_node_link IS FALSE AND child_id = N.id
UNION ALL
SELECT
S.parent_id,
D.child_id,
D.level + 1,
D.cids || S.child_id,
E.state,
E.end_date
FROM ascendants AS D
JOIN osf_noderelation AS S
ON D.parent_id = S.child_id
JOIN osf_abstractnode AS A
ON D.child_id = A.id
JOIN osf_embargo AS E
ON A.retraction_id = E.id
WHERE S.is_node_link IS FALSE
AND N.id = ANY (cids)
) SELECT json_build_object(
'pending', state = 'unapproved',
'end_date',
CASE WHEN state = 'approved'
THEN
TO_CHAR(end_date, 'Day, Mon DD, YYYY')
ELSE
NULL
END
) AS DATA
FROM ascendants
WHERE child_id = N.id
AND state IS NOT NULL
ORDER BY LEVEL ASC
LIMIT 1
) END
)
ELSE (
SELECT json_build_object(
'pending', FALSE,
'end_date', NULL
) AS DATA
) END
)
) EMBARGO ON TRUE
LEFT JOIN LATERAL ( SELECT (
CASE WHEN N.type = 'osf.registration' AND N.registration_approval_id IS NOT NULL
THEN (
SELECT state = 'unapproved' AS PENDING
FROM osf_registrationapproval
WHERE id = N.retraction_id
)
ELSE (
SELECT FALSE AS PENDING
) END)
) REGISTRATION_APPROVAL ON TRUE
LEFT JOIN LATERAL (
SELECT
CASE WHEN ((osf_abstractprovider.domain_redirect_enabled AND osf_abstractprovider.domain IS NOT NULL) OR
osf_abstractprovider._id = 'osf')
THEN
'/' || (SELECT G._id
FROM osf_guid G
WHERE (G.object_id = P.id)
AND (G.content_type_id = (SELECT id FROM django_content_type WHERE model = 'preprintservice'))
ORDER BY created ASC, id ASC
LIMIT 1) || '/'
ELSE
'/preprints/' || osf_abstractprovider._id || '/' || (SELECT G._id
FROM osf_guid G
WHERE (G.object_id = P.id)
AND (G.content_type_id = (SELECT id FROM django_content_type WHERE model = 'preprintservice'))
ORDER BY created ASC, id ASC
LIMIT 1) || '/'
END AS URL
FROM osf_preprintservice P
INNER JOIN osf_abstractprovider ON P.provider_id = osf_abstractprovider.id
WHERE P.node_id = N.id
AND P.machine_state != 'initial' -- is_preprint
AND N.preprint_file_id IS NOT NULL
AND N.is_public = TRUE
AND N._is_preprint_orphan != TRUE
ORDER BY P.is_published DESC, P.created DESC
LIMIT 1
) PREPRINT ON TRUE
WHERE (TYPE = 'osf.node' OR TYPE = 'osf.registration' OR TYPE = 'osf.quickfilesnode')
AND is_public IS TRUE
AND is_deleted IS FALSE
AND (spam_status IS NULL OR NOT (spam_status = 2 or (spam_status = 1 AND {spam_flagged_removed_from_search})))
AND NOT (UPPER(N.title::text) LIKE UPPER('%Bulk stress 201%') OR UPPER(N.title::text) LIKE UPPER('%Bulk stress 202%') OR UPPER(N.title::text) LIKE UPPER('%OSF API Registration test%') -- is_qa_node
OR N.id IN -- Comes from website.settings.DO_NOT_INDEX_LIST
(SELECT THRUTAGS.abstractnode_id
FROM osf_abstractnode_tags THRUTAGS
INNER JOIN osf_tag TAGS ON (THRUTAGS.tag_id = TAGS.id)
WHERE (TAGS.name = 'qatest'
OR TAGS.name = 'qa test')))
AND NOT (N.id IN -- node.archiving
(SELECT AJ.dst_node_id -- May need to be made recursive as AJ table grows
FROM osf_archivejob AJ
WHERE (AJ.status != 'FAILURE' AND AJ.status != 'SUCCESS'
AND AJ.dst_node_id IS NOT NULL)))
AND id > {page_start}
AND id <= {page_end}
LIMIT 1;
"""
JSON_UPDATE_FILES_SQL = """
SELECT json_agg(
json_build_object(
'_type', 'file'
, '_index', '{index}'
, 'doc_as_upsert', TRUE
, '_id', F._id
, '_op_type', 'update'
, 'doc', json_build_object(
'id', F._id
, 'deep_url', CASE WHEN F.provider = 'osfstorage'
THEN '/' || (NODE.DATA ->> 'guid') || '/files/' || F.provider || '/' || F._id
ELSE '/' || (NODE.DATA ->> 'guid') || '/files/' || F.provider || F._path
END
, 'guid_url', CASE WHEN FILE_GUID._id IS NOT NULL
THEN '/' || FILE_GUID._id || '/'
ELSE NULL
END
, 'tags', (CASE
WHEN TAGS.names IS NOT NULL
THEN TAGS.names
ELSE
'{{}}'::TEXT[]
END)
, 'name', F.name
, 'category', 'file'
, 'node_url', '/' || (NODE.DATA ->> 'guid') || '/'
, 'node_title', NODE.DATA ->> 'title'
, 'parent_id', NODE.DATA ->> 'parent_guid'
, 'is_registration', NODE.DATA ->> 'is_registration' = 'true' -- Weirdness from the lateral join causes this to be a string
, 'is_retracted', NODE.DATA ->> 'is_retracted' = 'true' -- Weirdness from the lateral join causes this to be a string
, 'extra_search_terms', CASE WHEN strpos(F.name, '-') + strpos(F.name, '_') + strpos(F.name, '.') > 0
THEN translate(F.name, '-_.', ' ')
ELSE ''
END
)
)
)
FROM osf_basefilenode AS F
LEFT JOIN LATERAL (
SELECT _id
FROM osf_guid
WHERE object_id = F.id
AND content_type_id = (SELECT id FROM django_content_type WHERE model = 'basefilenode')
LIMIT 1
) FILE_GUID ON TRUE
LEFT JOIN LATERAL (
SELECT array_agg(TAG.name) AS names
FROM osf_tag AS TAG
INNER JOIN osf_basefilenode_tags ON (TAG.id = osf_basefilenode_tags.tag_id)
WHERE (TAG.system = FALSE AND osf_basefilenode_tags.basefilenode_id = F.id)
) TAGS ON TRUE
LEFT JOIN LATERAL (
SELECT json_build_object(
'is_registration', (CASE WHEN N.type = 'osf.registration'
THEN TRUE
ELSE FALSE
END)
, 'title', N.title
, 'guid', (SELECT _id
FROM osf_guid
WHERE object_id = N.id
AND content_type_id = (SELECT id
FROM django_content_type
WHERE model = 'abstractnode')
LIMIT 1)
, 'parent_guid', (SELECT _id
FROM osf_guid
WHERE object_id = (
SELECT parent_id
FROM osf_noderelation
WHERE child_id = N.id
AND is_node_link = FALSE
LIMIT 1)
AND content_type_id = (SELECT id
FROM django_content_type
WHERE model = 'abstractnode')
LIMIT 1)
, 'is_retracted', (CASE WHEN N.type = 'osf.registration'
THEN
(CASE WHEN N.retraction_id IS NOT NULL
THEN
(SELECT state = 'approved'
FROM osf_retraction
WHERE id = N.retraction_id)
ELSE
(WITH RECURSIVE ascendants AS (
SELECT
parent_id,
child_id,
1 AS LEVEL,
ARRAY [child_id] AS cids,
FALSE AS is_retracted
FROM osf_noderelation
WHERE is_node_link IS FALSE AND child_id = N.id
UNION ALL
SELECT
S.parent_id,
D.child_id,
D.level + 1,
D.cids || S.child_id,
R.state = 'approved' AS is_retracted
FROM ascendants AS D
INNER JOIN osf_noderelation AS S
ON D.parent_id = S.child_id
INNER JOIN osf_abstractnode AS A
ON D.child_id = A.id
INNER JOIN osf_retraction AS R
ON A.retraction_id = R.id
WHERE S.is_node_link IS FALSE
AND N.id = ANY (cids)
) SELECT is_retracted
FROM ascendants
WHERE child_id = N.id
ORDER BY is_retracted DESC -- Put TRUE at the top
LIMIT 1)
END)
ELSE
FALSE
END)
) AS DATA
FROM osf_abstractnode N
WHERE (N.id = F.target_object_id AND (
SELECT id FROM "django_content_type" WHERE (
"django_content_type"."model" = 'abstractnode' AND "django_content_type"."app_label" = 'osf'
)) = F.target_content_type_id)
LIMIT 1
) NODE ON TRUE
WHERE name IS NOT NULL
AND name != ''
AND target_object_id = ANY (SELECT id
FROM osf_abstractnode
WHERE (TYPE = 'osf.node' OR TYPE = 'osf.registration' OR TYPE = 'osf.quickfilesnode')
AND is_public IS TRUE
AND is_deleted IS FALSE
AND (spam_status IS NULL OR NOT (spam_status = 2 or (spam_status = 1 AND {spam_flagged_removed_from_search})))
AND NOT (UPPER(osf_abstractnode.title::text) LIKE UPPER('%Bulk stress 201%') OR UPPER(osf_abstractnode.title::text) LIKE UPPER('%Bulk stress 202%') OR UPPER(osf_abstractnode.title::text) LIKE UPPER('%OSF API Registration test%') -- is_qa_node
OR "osf_abstractnode"."id" IN
(SELECT THRUTAGS.abstractnode_id
FROM osf_abstractnode_tags THRUTAGS
INNER JOIN osf_tag TAGS ON (THRUTAGS.tag_id = TAGS.id)
WHERE (TAGS.name = 'qatest'
OR TAGS.name = 'qa test')))
AND NOT (osf_abstractnode.id IN
(SELECT AJ.dst_node_id
FROM osf_archivejob AJ
WHERE (AJ.status != 'FAILURE' AND AJ.status != 'SUCCESS'
AND AJ.dst_node_id IS NOT NULL)))
)
AND target_content_type_id = (SELECT id FROM "django_content_type" WHERE ("django_content_type"."model" = 'abstractnode' AND "django_content_type"."app_label" = 'osf'))
AND id > {page_start}
AND id <= {page_end}
LIMIT 1;
"""
JSON_UPDATE_USERS_SQL = """
SELECT json_agg(
json_build_object(
'_type', 'user'
, '_index', '{index}'
, 'doc_as_upsert', TRUE
, '_id', USER_GUID._id
, '_op_type', 'update'
, 'doc', json_build_object(
'id', USER_GUID._id
, 'user', U.fullname
, 'normalized_user', U.fullname
, 'normalized_names', json_build_object(
'fullname', U.fullname
, 'given_name', U.given_name
, 'family_name', U.family_name
, 'middle_names', U.middle_names
, 'suffix', U.suffix
)
, 'names', json_build_object(
'fullname', U.fullname
, 'given_name', U.given_name
, 'family_name', U.family_name
, 'middle_names', U.middle_names
, 'suffix', U.suffix
)
, 'job', CASE
WHEN U.jobs :: JSON -> 0 -> 'institution' IS NOT NULL
THEN
(U.jobs :: JSON -> 0 -> 'institution') :: TEXT
ELSE
''
END
, 'job_title', (CASE
WHEN U.jobs :: JSON -> 0 -> 'title' IS NOT NULL
THEN
(U.jobs :: JSON -> 0 -> 'title') :: TEXT
ELSE
''
END)
, 'all_jobs', (SELECT array_agg(DISTINCT (JOB :: JSON -> 'institution') :: TEXT)
FROM
(SELECT json_array_elements(jobs :: JSON) AS JOB
FROM osf_osfuser
WHERE id = U.id
) AS JOBS)
, 'school', (CASE
WHEN U.schools :: JSON -> 0 -> 'institution' IS NOT NULL
THEN
(U.schools :: JSON -> 0 -> 'institution') :: TEXT
ELSE
''
END)
, 'all_schools', (SELECT array_agg(DISTINCT (SCHOOL :: JSON -> 'institution') :: TEXT)
FROM
(SELECT json_array_elements(schools :: JSON) AS SCHOOL
FROM osf_osfuser
WHERE id = U.id
) AS SCHOOLS)
, 'category', 'user'
, 'degree', (CASE
WHEN U.schools :: JSON -> 0 -> 'degree' IS NOT NULL
THEN
(U.schools :: JSON -> 0 -> 'degree') :: TEXT
ELSE
''
END)
, 'social', (SELECT json_object_agg(
key,
(
CASE
WHEN key = 'orcid'
THEN 'http://orcid.org/' || value
WHEN key = 'github'
THEN 'http://github.com/' || value
WHEN key = 'scholar'
THEN 'http://scholar.google.com/citations?user=' || value
WHEN key = 'twitter'
THEN 'http://twitter.com/' || value
WHEN key = 'profileWebsites'
THEN value
WHEN key = 'linkedIn'
THEN 'https://www.linkedin.com/' || value
WHEN key = 'impactStory'
THEN 'https://impactstory.org/u/' || value
WHEN key = 'researcherId'
THEN 'http://researcherid.com/rid/' || value
WHEN key = 'researchGate'
THEN 'https://researchgate.net/profile/' || value
WHEN key = 'academiaInstitution'
THEN 'https://' || value
WHEN key = 'academiaProfileID'
THEN '.academia.edu/' || value
WHEN key = 'baiduScholar'
THEN 'http://xueshu.baidu.com/scholarID/' || value
WHEN key = 'ssrn'
THEN 'http://papers.ssrn.com/sol3/cf_dev/AbsByAuth.cfm?per_id=' || value
END
))
FROM jsonb_each_text(
(SELECT social
FROM osf_osfuser
WHERE id = U.id)
)
WHERE value IS NOT NULL
AND value != ''
AND value != '[]'
)
, 'boost', 2
)
)
)
FROM osf_osfuser AS U
LEFT JOIN LATERAL (
SELECT _id
FROM osf_guid
WHERE object_id = U.id
AND content_type_id = ANY (SELECT id
FROM django_content_type
WHERE model = 'osfuser')
LIMIT 1
) USER_GUID ON TRUE
WHERE is_active = TRUE
AND id > {page_start}
AND id <= {page_end}
LIMIT 1;
"""
JSON_DELETE_NODES_SQL = """
SELECT json_agg(
json_build_object(
'_type', CASE
WHEN N.type = 'osf.registration'
THEN 'registration'
WHEN PREPRINT.is_preprint > 0
THEN 'preprint'
WHEN PARENT_GUID._id IS NULL
THEN 'project'
ELSE 'component'
END
, '_index', '{index}'
, '_id', NODE_GUID._id
, '_op_type', 'delete'
)
)
FROM osf_abstractnode AS N
LEFT JOIN LATERAL (
SELECT _id
FROM osf_guid
WHERE object_id = N.id
AND content_type_id = (SELECT id FROM django_content_type WHERE model = 'abstractnode')
LIMIT 1
) NODE_GUID ON TRUE
LEFT JOIN LATERAL (
SELECT _id
FROM osf_guid
WHERE object_id = (
SELECT parent_id
FROM osf_noderelation
WHERE child_id = N.id
AND is_node_link = FALSE
LIMIT 1)
AND content_type_id = (SELECT id FROM django_content_type WHERE model = 'abstractnode')
LIMIT 1
) PARENT_GUID ON TRUE
LEFT JOIN LATERAL (
SELECT COUNT(P.id) as is_preprint
FROM osf_preprintservice P
WHERE P.node_id = N.id
AND P.machine_state != 'initial'
AND N.preprint_file_id IS NOT NULL
AND N.is_public = TRUE
AND N._is_preprint_orphan != TRUE
LIMIT 1
) PREPRINT ON TRUE
WHERE NOT ((TYPE = 'osf.node' OR TYPE = 'osf.registration' OR TYPE = 'osf.quickfilesnode')
AND N.is_public IS TRUE
AND N.is_deleted IS FALSE
AND (spam_status IS NULL OR NOT (spam_status = 2 or (spam_status = 1 AND {spam_flagged_removed_from_search})))
AND NOT (UPPER(N.title::text) LIKE UPPER('%Bulk stress 201%') OR UPPER(N.title::text) LIKE UPPER('%Bulk stress 202%') OR UPPER(N.title::text) LIKE UPPER('%OSF API Registration test%') -- is_qa_node
OR N.id IN -- Comes from website.settings.DO_NOT_INDEX_LIST
(SELECT THRUTAGS.abstractnode_id
FROM osf_abstractnode_tags THRUTAGS
INNER JOIN osf_tag TAGS ON (THRUTAGS.tag_id = TAGS.id)
WHERE (TAGS.name = 'qatest'
OR TAGS.name = 'qa test')))
AND NOT (N.id IN -- node.archiving
(SELECT AJ.dst_node_id -- May need to be made recursive as AJ table grows
FROM osf_archivejob AJ
WHERE (AJ.status != 'FAILURE' AND AJ.status != 'SUCCESS'
AND AJ.dst_node_id IS NOT NULL)))
)
AND id > {page_start}
AND id <= {page_end}
LIMIT 1;
"""
JSON_DELETE_FILES_SQL = """
SELECT json_agg(json_build_object(
'_type', 'file'
, '_index', '{index}'
, '_id', F._id
, '_op_type', 'delete'
))
FROM osf_basefilenode AS F
WHERE NOT (name IS NOT NULL
AND name != ''
AND target_object_id = ANY (SELECT id
FROM osf_abstractnode
WHERE (TYPE = 'osf.node' OR TYPE = 'osf.registration' OR TYPE = 'osf.quickfilesnode')
AND is_public IS TRUE
AND is_deleted IS FALSE
AND (spam_status IS NULL OR NOT (spam_status = 2 or (spam_status = 1 AND {spam_flagged_removed_from_search})))
-- settings.SPAM_FLAGGED_REMOVE_FROM_SEARCH
-- node.archiving or is_qa_node
AND NOT (UPPER(osf_abstractnode.title::text) LIKE UPPER('%Bulk stress 201%') OR UPPER(osf_abstractnode.title::text) LIKE UPPER('%Bulk stress 202%') OR UPPER(osf_abstractnode.title::text) LIKE UPPER('%OSF API Registration test%') -- is_qa_node
OR "osf_abstractnode"."id" IN
(SELECT THRUTAGS.abstractnode_id
FROM osf_abstractnode_tags THRUTAGS
INNER JOIN osf_tag TAGS ON (THRUTAGS.tag_id = TAGS.id)
WHERE (TAGS.name = 'qatest'
OR TAGS.name = 'qa test')))
AND NOT (osf_abstractnode.id IN
(SELECT AJ.dst_node_id
FROM osf_archivejob AJ
WHERE (AJ.status != 'FAILURE' AND AJ.status != 'SUCCESS'
AND AJ.dst_node_id IS NOT NULL)))
)
)
AND target_content_type_id = (SELECT id FROM "django_content_type" WHERE ("django_content_type"."model" = 'abstractnode' AND "django_content_type"."app_label" = 'osf'))
AND id > {page_start}
AND id <= {page_end}
LIMIT 1;
"""
JSON_DELETE_USERS_SQL = """
SELECT json_agg(
json_build_object(
'_type', 'user'
, '_index', '{index}'
, '_id', USER_GUID._id
, '_op_type', 'delete'
)
)
FROM osf_osfuser AS U
LEFT JOIN LATERAL (
SELECT _id
FROM osf_guid
WHERE object_id = U.id
AND content_type_id = ANY (SELECT id
FROM django_content_type
WHERE model = 'osfuser')
LIMIT 1
) USER_GUID ON TRUE
WHERE is_active != TRUE
AND id > {page_start}
AND id <= {page_end}
LIMIT 1;
"""
| apache-2.0 | -1,947,196,518,070,275,300 | 44.885599 | 273 | 0.406506 | false |
duncanmmacleod/gwpy | gwpy/plot/axes.py | 1 | 21895 | # -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2018-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Extension of `~matplotlib.axes.Axes` for gwpy
"""
import warnings
from functools import wraps
from math import log
from numbers import Number
import numpy
from astropy.time import Time
from matplotlib import rcParams
from matplotlib.artist import allow_rasterization
from matplotlib.axes import Axes as _Axes
from matplotlib.axes._base import _process_plot_var_args
from matplotlib.collections import PolyCollection
from matplotlib.lines import Line2D
from matplotlib.projections import register_projection
from . import (Plot, colorbar as gcbar)
from .colors import format_norm
from .gps import GPS_SCALES
from .legend import HandlerLine2D
from ..time import to_gps
__author__ = 'Duncan Macleod <[email protected]>'
def log_norm(func):
"""Wrap ``func`` to handle custom gwpy keywords for a LogNorm colouring
"""
@wraps(func)
def decorated_func(*args, **kwargs):
norm, kwargs = format_norm(kwargs)
kwargs['norm'] = norm
return func(*args, **kwargs)
return decorated_func
def xlim_as_gps(func):
"""Wrap ``func`` to handle pass limit inputs through `gwpy.time.to_gps`
"""
@wraps(func)
def wrapped_func(self, left=None, right=None, **kw):
if right is None and numpy.iterable(left):
left, right = left
kw['left'] = left
kw['right'] = right
gpsscale = self.get_xscale() in GPS_SCALES
for key in ('left', 'right'):
if gpsscale:
try:
kw[key] = numpy.longdouble(str(to_gps(kw[key])))
except TypeError:
pass
return func(self, **kw)
return wrapped_func
def restore_grid(func):
"""Wrap ``func`` to preserve the Axes current grid settings.
"""
@wraps(func)
def wrapped_func(self, *args, **kwargs):
try:
grid = (
self.xaxis._minor_tick_kw["gridOn"],
self.xaxis._major_tick_kw["gridOn"],
self.yaxis._minor_tick_kw["gridOn"],
self.yaxis._major_tick_kw["gridOn"],
)
except KeyError: # matplotlib < 3.3.3
grid = (self.xaxis._gridOnMinor, self.xaxis._gridOnMajor,
self.yaxis._gridOnMinor, self.yaxis._gridOnMajor)
try:
return func(self, *args, **kwargs)
finally:
# reset grid
self.xaxis.grid(grid[0], which="minor")
self.xaxis.grid(grid[1], which="major")
self.yaxis.grid(grid[2], which="minor")
self.yaxis.grid(grid[3], which="major")
return wrapped_func
# -- new Axes -----------------------------------------------------------------
class Axes(_Axes):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# handle Series in `ax.plot()`
self._get_lines = PlotArgsProcessor(self)
# reset data formatters (for interactive plots) to support
# GPS time display
self.fmt_xdata = self._fmt_xdata
self.fmt_ydata = self._fmt_ydata
@allow_rasterization
def draw(self, *args, **kwargs):
labels = {}
for ax in (self.xaxis, self.yaxis):
if ax.get_scale() in GPS_SCALES and ax.isDefault_label:
labels[ax] = ax.get_label_text()
trans = ax.get_transform()
epoch = float(trans.get_epoch())
unit = trans.get_unit_name()
iso = Time(epoch, format='gps', scale='utc').iso
utc = iso.rstrip('0').rstrip('.')
ax.set_label_text('Time [{0!s}] from {1!s} UTC ({2!r})'.format(
unit, utc, epoch))
try:
super().draw(*args, **kwargs)
finally:
for ax in labels: # reset labels
ax.isDefault_label = True
# -- auto-gps helpers -----------------------
def _fmt_xdata(self, x):
if self.get_xscale() in GPS_SCALES:
return str(to_gps(x))
return self.xaxis.get_major_formatter().format_data_short(x)
def _fmt_ydata(self, y):
if self.get_yscale() in GPS_SCALES:
return str(to_gps(y))
return self.yaxis.get_major_formatter().format_data_short(y)
set_xlim = xlim_as_gps(_Axes.set_xlim)
def set_epoch(self, epoch):
"""Set the epoch for the current GPS scale.
This method will fail if the current X-axis scale isn't one of
the GPS scales. See :ref:`gwpy-plot-gps` for more details.
Parameters
----------
epoch : `float`, `str`
GPS-compatible time or date object, anything parseable by
:func:`~gwpy.time.to_gps` is fine.
"""
scale = self.get_xscale()
return self.set_xscale(scale, epoch=epoch)
def get_epoch(self):
"""Return the epoch for the current GPS scale/
This method will fail if the current X-axis scale isn't one of
the GPS scales. See :ref:`gwpy-plot-gps` for more details.
"""
return self.get_xaxis().get_transform().get_epoch()
# -- overloaded plotting methods ------------
def scatter(self, x, y, c=None, **kwargs):
# scatter with auto-sorting by colour
try:
if c is None:
raise ValueError
c_array = numpy.asanyarray(c, dtype=float)
except ValueError: # no colour array
pass
else:
c_sort = kwargs.pop('c_sort', True)
if c_sort:
sortidx = c_array.argsort()
x = numpy.asarray(x)[sortidx]
y = numpy.asarray(y)[sortidx]
c = numpy.asarray(c)[sortidx]
return super().scatter(x, y, c=c, **kwargs)
scatter.__doc__ = _Axes.scatter.__doc__.replace(
'marker :',
'c_sort : `bool`, optional, default: True\n'
' Sort scatter points by `c` array value, if given.\n\n'
'marker :',
)
@log_norm
def imshow(self, array, *args, **kwargs):
"""Display an image, i.e. data on a 2D regular raster.
If ``array`` is a :class:`~gwpy.types.Array2D` (e.g. a
:class:`~gwpy.spectrogram.Spectrogram`), then the defaults are
_different_ to those in the upstream
:meth:`~matplotlib.axes.Axes.imshow` method. Namely, the defaults are
- ``origin='lower'`` (coordinates start in lower-left corner)
- ``aspect='auto'`` (pixels are not forced to be square)
- ``interpolation='none'`` (no image interpolation is used)
In all other usage, the defaults from the upstream matplotlib method
are unchanged.
Parameters
----------
array : array-like or PIL image
The image data.
*args, **kwargs
All arguments and keywords are passed to the inherited
:meth:`~matplotlib.axes.Axes.imshow` method.
See also
--------
matplotlib.axes.Axes.imshow
for details of the image rendering
"""
if hasattr(array, "yspan"): # Array2D
return self._imshow_array2d(array, *args, **kwargs)
image = super().imshow(array, *args, **kwargs)
self.autoscale(enable=None, axis='both', tight=None)
return image
def _imshow_array2d(self, array, origin='lower', interpolation='none',
aspect='auto', **kwargs):
"""Render an `~gwpy.types.Array2D` using `Axes.imshow`
"""
# NOTE: If you change the defaults for this method, please update
# the docstring for `imshow` above.
# calculate extent
extent = tuple(array.xspan) + tuple(array.yspan)
if self.get_xscale() == 'log' and extent[0] == 0.:
extent = (1e-300,) + extent[1:]
if self.get_yscale() == 'log' and extent[2] == 0.:
extent = extent[:2] + (1e-300,) + extent[3:]
kwargs.setdefault('extent', extent)
return self.imshow(array.value.T, origin=origin, aspect=aspect,
interpolation=interpolation, **kwargs)
@restore_grid
@log_norm
def pcolormesh(self, *args, **kwargs):
"""Create a pseudocolor plot with a non-regular rectangular grid.
When using GWpy, this method can be called with a single argument
that is an :class:`~gwpy.types.Array2D`, for which the ``X`` and ``Y``
coordinate arrays will be determined from the indexing.
In all other usage, all ``args`` and ``kwargs`` are passed directly
to :meth:`~matplotlib.axes.Axes.pcolormesh`.
Notes
-----
Unlike the upstream :meth:`matplotlib.axes.Axes.pcolormesh`,
this method respects the current grid settings.
See also
--------
matplotlib.axes.Axes.pcolormesh
"""
if len(args) == 1 and hasattr(args[0], "yindex"): # Array2D
return self._pcolormesh_array2d(*args, **kwargs)
return super().pcolormesh(*args, **kwargs)
def _pcolormesh_array2d(self, array, *args, **kwargs):
"""Render an `~gwpy.types.Array2D` using `Axes.pcolormesh`
"""
x = numpy.concatenate((array.xindex.value, array.xspan[-1:]))
y = numpy.concatenate((array.yindex.value, array.yspan[-1:]))
xcoord, ycoord = numpy.meshgrid(x, y, copy=False, sparse=True)
return self.pcolormesh(xcoord, ycoord, array.value.T, *args, **kwargs)
def hist(self, x, *args, **kwargs):
x = numpy.asarray(x)
# re-format weights as array if given as float
weights = kwargs.get('weights', None)
if isinstance(weights, Number):
kwargs['weights'] = numpy.ones_like(x) * weights
# calculate log-spaced bins on-the-fly
if (kwargs.pop('logbins', False) and
not numpy.iterable(kwargs.get('bins', None))):
nbins = kwargs.get('bins', None) or rcParams.get('hist.bins', 30)
# get range
hrange = kwargs.pop('range', None)
if hrange is None:
try:
hrange = numpy.min(x), numpy.max(x)
except ValueError as exc:
if str(exc).startswith('zero-size array'): # no data
exc.args = ('cannot generate log-spaced histogram '
'bins for zero-size array, '
'please pass `bins` or `range` manually',)
raise
# log-scale the axis and extract the base
if kwargs.get('orientation') == 'horizontal':
self.set_yscale('log', nonposy='clip')
logbase = self.yaxis._scale.base
else:
self.set_xscale('log', nonposx='clip')
logbase = self.xaxis._scale.base
# generate the bins
kwargs['bins'] = numpy.logspace(
log(hrange[0], logbase), log(hrange[1], logbase),
nbins+1, endpoint=True)
return super().hist(x, *args, **kwargs)
hist.__doc__ = _Axes.hist.__doc__.replace(
'color :',
'logbins : boolean, optional\n'
' If ``True``, use logarithmically-spaced histogram bins.\n\n'
' Default is ``False``\n\n'
'color :')
# -- new plotting methods -------------------
def plot_mmm(self, data, lower=None, upper=None, **kwargs):
"""Plot a `Series` as a line, with a shaded region around it.
The ``data`` `Series` is drawn, while the ``lower`` and ``upper``
`Series` are plotted lightly below and above, with a fill
between them and the ``data``.
All three `Series` should have the same `~Series.index` array.
Parameters
----------
data : `~gwpy.types.Series`
Data to plot normally.
lower : `~gwpy.types.Series`
Lower boundary (on Y-axis) for shade.
upper : `~gwpy.types.Series`
Upper boundary (on Y-axis) for shade.
**kwargs
Any other keyword arguments acceptable for
:meth:`~matplotlib.Axes.plot`.
Returns
-------
artists : `tuple`
All of the drawn artists:
- `~matplotlib.lines.Line2d` for ``data``,
- `~matplotlib.lines.Line2D` for ``lower``, if given
- `~matplotlib.lines.Line2D` for ``upper``, if given
- `~matplitlib.collections.PolyCollection` for shading
See also
--------
matplotlib.axes.Axes.plot
for a full description of acceptable ``*args`` and ``**kwargs``
"""
alpha = kwargs.pop('alpha', .1)
# plot mean
line, = self.plot(data, **kwargs)
out = [line]
# modify keywords for shading
kwargs.update({
'label': '',
'linewidth': line.get_linewidth() / 2,
'color': line.get_color(),
'alpha': alpha * 2,
})
# plot lower and upper Series
fill = [data.xindex.value, data.value, data.value]
for i, bound in enumerate((lower, upper)):
if bound is not None:
out.extend(self.plot(bound, **kwargs))
fill[i+1] = bound.value
# fill between
out.append(self.fill_between(
*fill, alpha=alpha, color=kwargs['color'],
rasterized=kwargs.get('rasterized', True)))
return out
def tile(self, x, y, w, h, color=None,
anchor='center', edgecolors='face', linewidth=0.8,
**kwargs):
"""Plot rectanguler tiles based onto these `Axes`.
``x`` and ``y`` give the anchor point for each tile, with
``w`` and ``h`` giving the extent in the X and Y axis respectively.
Parameters
----------
x, y, w, h : `array_like`, shape (n, )
Input data
color : `array_like`, shape (n, )
Array of amplitudes for tile color
anchor : `str`, optional
Anchor point for tiles relative to ``(x, y)`` coordinates, one of
- ``'center'`` - center tile on ``(x, y)``
- ``'ll'`` - ``(x, y)`` defines lower-left corner of tile
- ``'lr'`` - ``(x, y)`` defines lower-right corner of tile
- ``'ul'`` - ``(x, y)`` defines upper-left corner of tile
- ``'ur'`` - ``(x, y)`` defines upper-right corner of tile
**kwargs
Other keywords are passed to
:meth:`~matplotlib.collections.PolyCollection`
Returns
-------
collection : `~matplotlib.collections.PolyCollection`
the collection of tiles drawn
Examples
--------
>>> import numpy
>>> from matplotlib import pyplot
>>> import gwpy.plot # to get gwpy's Axes
>>> x = numpy.arange(10)
>>> y = numpy.arange(x.size)
>>> w = numpy.ones_like(x) * .8
>>> h = numpy.ones_like(x) * .8
>>> fig = pyplot.figure()
>>> ax = fig.gca()
>>> ax.tile(x, y, w, h, anchor='ll')
>>> pyplot.show()
"""
# get color and sort
if color is not None and kwargs.get('c_sort', True):
sortidx = color.argsort()
x = x[sortidx]
y = y[sortidx]
w = w[sortidx]
h = h[sortidx]
color = color[sortidx]
# define how to make a polygon for each tile
if anchor == 'll':
def _poly(x, y, w, h):
return ((x, y), (x, y+h), (x+w, y+h), (x+w, y))
elif anchor == 'lr':
def _poly(x, y, w, h):
return ((x-w, y), (x-w, y+h), (x, y+h), (x, y))
elif anchor == 'ul':
def _poly(x, y, w, h):
return ((x, y-h), (x, y), (x+w, y), (x+w, y-h))
elif anchor == 'ur':
def _poly(x, y, w, h):
return ((x-w, y-h), (x-w, y), (x, y), (x, y-h))
elif anchor == 'center':
def _poly(x, y, w, h):
return ((x-w/2., y-h/2.), (x-w/2., y+h/2.),
(x+w/2., y+h/2.), (x+w/2., y-h/2.))
else:
raise ValueError("Unrecognised tile anchor {!r}".format(anchor))
# build collection
cmap = kwargs.pop('cmap', rcParams['image.cmap'])
coll = PolyCollection((_poly(*tile) for tile in zip(x, y, w, h)),
edgecolors=edgecolors, linewidth=linewidth,
**kwargs)
if color is not None:
coll.set_array(color)
coll.set_cmap(cmap)
out = self.add_collection(coll)
self.autoscale_view()
return out
# -- overloaded auxiliary methods -----------
def legend(self, *args, **kwargs):
# handle deprecated keywords
linewidth = kwargs.pop("linewidth", None)
if linewidth:
warnings.warn(
"the linewidth keyword to gwpy.plot.Axes.legend has been "
"deprecated and will be removed in a future release; "
"please update your code to use a custom legend handler, "
"e.g. gwpy.plot.legend.HandlerLine2D.",
DeprecationWarning,
)
alpha = kwargs.pop("alpha", None)
if alpha:
kwargs.setdefault("framealpha", alpha)
warnings.warn(
"the alpha keyword to gwpy.plot.Axes.legend has been "
"deprecated and will be removed in a future release; "
"use framealpha instead.",
DeprecationWarning,
)
# build custom handler
handler_map = kwargs.setdefault("handler_map", dict())
if isinstance(handler_map, dict):
handler_map.setdefault(Line2D, HandlerLine2D(linewidth or 6))
# create legend
return super().legend(*args, **kwargs)
legend.__doc__ = _Axes.legend.__doc__.replace(
"Call signatures",
""".. note::
This method uses a custom default legend handler for
`~matplotlib.lines.Line2D` objects, with increased linewidth relative
to the upstream :meth:`~matplotlib.axes.Axes.legend` method.
To disable this, pass ``handler_map=None``, or create and pass your
own handler class. See :ref:`gwpy-plot-legend` for more details.
Call signatures""",
)
def colorbar(self, mappable=None, **kwargs):
"""Add a `~matplotlib.colorbar.Colorbar` to these `Axes`
Parameters
----------
mappable : matplotlib data collection, optional
collection against which to map the colouring, default will
be the last added mappable artist (collection or image)
fraction : `float`, optional
fraction of space to steal from these `Axes` to make space
for the new axes, default is ``0.`` if ``use_axesgrid=True``
is given (default), otherwise default is ``.15`` to match
the upstream matplotlib default.
**kwargs
other keyword arguments to be passed to the
:meth:`Plot.colorbar` generator
Returns
-------
cbar : `~matplotlib.colorbar.Colorbar`
the newly added `Colorbar`
See also
--------
Plot.colorbar
"""
fig = self.get_figure()
if kwargs.get('use_axesgrid', True):
kwargs.setdefault('fraction', 0.)
if kwargs.get('fraction', 0.) == 0.:
kwargs.setdefault('use_axesgrid', True)
mappable, kwargs = gcbar.process_colorbar_kwargs(
fig, mappable=mappable, ax=self, **kwargs)
if isinstance(fig, Plot):
# either we have created colorbar Axes using axesgrid1, or
# the user already gave use_axesgrid=False, so we forcefully
# disable axesgrid here in case fraction == 0., which causes
# gridspec colorbars to fail.
kwargs['use_axesgrid'] = False
return fig.colorbar(mappable, **kwargs)
# override default Axes with this one by registering a projection with the
# same name
register_projection(Axes)
# -- overload Axes.plot() to handle Series ------------------------------------
class PlotArgsProcessor(_process_plot_var_args):
"""This class controls how ax.plot() works
"""
def __call__(self, *args, **kwargs):
"""Find `Series` data in `plot()` args and unwrap
"""
newargs = []
while args:
# strip first argument
this, args = args[:1], args[1:]
# it its a 1-D Series, then parse it as (xindex, value)
if hasattr(this[0], "xindex") and this[0].ndim == 1:
this = (this[0].xindex.value, this[0].value)
# otherwise treat as normal (must be a second argument)
else:
this += args[:1]
args = args[1:]
# allow colour specs
if args and isinstance(args[0], str):
this += args[0],
args = args[1:]
newargs.extend(this)
return super().__call__(*newargs, **kwargs)
| gpl-3.0 | 2,707,461,000,808,235,500 | 34.543831 | 79 | 0.546837 | false |
glehmann/uptodate | uptodate/plugins/display.py | 1 | 1674 | #!/usr/bin/env python
#coding: iso-8859-15
#
# Copyright (C) 2005 Gaëtan Lehmann <[email protected]>
#
# this file is part of uptodate
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
from uptodate import *
usage = _("uptodate [options] display [name] ...")
summary = _("Display informations about modules")
description = _("""Display is used in order to display informations about modules.
Example:
uptodate display itk-app""")
names = ['display']
options = [Option("-a", "--all", action="store_true", dest="all", help=_("display all modules")),
]
def runCommand(opts, args, conf, out) :
if len(args) == 0 and not opts.all :
raise InvalidNbOfArgsException(usage)
if opts.all :
modules = conf.sections()
else :
modules = set(args)
wrongModules = modules - set(conf.sections())
if wrongModules :
if opts.force :
modules -= wrongModules
else :
raise ModuleNotFoundException(wrongModules)
for module in sorted(modules) :
printModule(conf, module, out, opts.verbose)
| gpl-2.0 | -8,111,562,675,753,110,000 | 28.892857 | 97 | 0.719833 | false |
google/makani | lib/python/pack2/backend_py.py | 1 | 9016 | # Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python code generation backend."""
import re
import textwrap
from makani.lib.python import c_helpers
from makani.lib.python.pack2 import backend
class BackendPy(backend.Backend):
"""Python code generation backend."""
_primary_type_map = {
'uint8': 'py_types.UInt8',
'int8': 'py_types.Int8',
'uint16': 'py_types.UInt16',
'int16': 'py_types.Int16',
'uint32': 'py_types.UInt32',
'int32': 'py_types.Int32',
'float32': 'py_types.Float32',
'date': 'py_types.Date',
}
# Maps enum width to ctypes._SimpleCData _type_.
_enum_type_map = {
1: 'b',
2: 'h',
4: 'l',
}
def __init__(self, c_header_path):
super(self.__class__, self).__init__()
self.c_header_path = c_header_path
self.path_re = re.compile(r'/')
self._Start()
def _PathToModulePath(self, path):
module_path = self.path_re.sub('.', path)
return 'makani.' + module_path
def _Start(self):
self.source_string = textwrap.dedent("""\
# This file is automatically generated. Do not edit.
import ctypes
import yaml
from makani.lib.python import pack2
from makani.lib.python.pack2 import py_types
""")
def _Finalize(self):
pass
def _AddStruct(self, struct, include_extra):
fields_str = ''
offsets_str = ''
# In order to produce source that we can exec(), we need to explicitly
# declare our type references as being in the global scope.
globals_set = set()
for field in struct.body.fields:
if field.type_obj.path:
type_name = (self._PathToModulePath(field.type_obj.path)
+ '.' + field.type_obj.name)
else:
type_name = field.type_obj.name
if type_name in self._primary_type_map:
type_name = self._primary_type_map[type_name]
if type_name == 'string':
fields_str += " ('{name}', ctypes.c_char * {size}),\n".format(
name=field.name,
size=field.type_obj.width)
globals_set.add('ctypes')
elif field.extent == 1:
fields_str += " ('{name}', {type_name}),\n".format(
type_name=type_name,
name=field.name)
parts = re.split(r'\.', type_name)
globals_set.add(parts[0])
else:
fields_str += " ('{name}', {type_name} * {extent}),\n".format(
type_name=type_name,
name=field.name,
extent=field.extent)
parts = re.split(r'\.', type_name)
globals_set.add(parts[0])
offsets_str += " '{name}': {offset},\n".format(
name=field.name, offset=field.offset)
globals_str = ''
for type_name in globals_set:
globals_str += ' global %s\n' % type_name
self.source_string += textwrap.dedent("""\
class {type_name} (py_types.Structure):
{globals_str}
_fields_ = [
{fields}
]
_offsets_ = {{
{offsets}
}}
size = {size}
alignment = {alignment}
""").format(type_name=struct.name,
size=struct.width,
alignment=struct.alignment,
globals_str=globals_str,
fields=fields_str,
offsets=offsets_str)
if include_extra:
self.source_string += (
' crc = 0x{crc:08x}\n'
' source = "{source}"\n'
) .format(crc=struct.Crc32(),
source=struct.Source())
if not struct.forced_crc:
self.source_string += 'pack2.RegisterParam({type_name})\n\n'.format(
type_name=struct.name)
self.source_string += '\n\n'
def AddInclude(self, path):
self.source_string += 'import %s\n' % self._PathToModulePath(path)
def AddBitfield(self, bitfield):
raise NotImplementedError('Bitfields not implemented for %s'
% self.__class__.__name__)
def AddEnum(self, enum):
value_map_str = ''
c_value_map_str = ''
name_map_str = ''
constants_str = ''
for value in sorted(enum.body.value_map.keys()):
name = enum.body.value_map[value]
const_name = c_helpers.CamelToSnake(name).upper()
c_name = 'k' + enum.name + name
value_map_str += " {value}: '{name}',\n".format(name=name, value=value)
c_value_map_str += " {value}: '{c_name}',\n".format(
c_name=c_name, value=value)
name_map_str += " '{name}': {value},\n".format(name=name, value=value)
constants_str += (
'{type_name}.{const_name} = {type_name}({value})\n'.format(
const_name=const_name, value=value, type_name=enum.name))
max_value = max(enum.body.value_map.keys())
min_value = min(enum.body.value_map.keys())
# Strip trailing newline from above generated code.
value_map_str = value_map_str[:-1]
name_map_str = name_map_str[:-1]
constants_str = constants_str[:-1]
header_path = self.c_header_path
self.source_string += textwrap.dedent("""\
class {type_name}(ctypes._SimpleCData, py_types.PackableCType):
_type_ = '{type_code}'
_value_map = {{
{value_map}
}}
_c_value_map = {{
{c_value_map}
}}
_name_map = {{
{name_map}
}}
max_value = {max_value}
min_value = {min_value}
def __init__(self, value=0):
super(self.__class__, self).__init__()
self.__setstate__(value)
def __setstate__(self, state):
if isinstance(state, basestring):
self.value = self._name_map[state]
elif isinstance(state, self.__class__):
self.value = state.value
else:
self.value = state
def __repr__(self):
return self._value_map[self.value]
def __hash__(self):
return self.value
def __eq__(self, other):
if isinstance(other, basestring):
return self.value == self._name_map[other]
elif isinstance(other, self.__class__):
return self.value == other.value
else:
return self.value == other
def __ne__(self, other):
return not self.__eq__(other)
def CName(self):
return self._c_value_map[self.value]
@classmethod
def Names(cls):
return [{type_name}(v) for v in cls._value_map.keys()]
@classmethod
def iteritems(cls):
return cls._name_map.iteritems()
@classmethod
def HeaderFile(cls):
return "{output_c_header}"
@classmethod
def TypeName(cls):
return "{type_name}"
{constants}
""").format(type_name=enum.name,
type_code=self._enum_type_map[enum.width],
value_map=value_map_str,
c_value_map=c_value_map_str,
name_map=name_map_str,
max_value=max_value,
min_value=min_value,
output_c_header=header_path,
constants=constants_str)
def AddScaled(self, bitfield):
raise NotImplementedError('Scaleds not implemented for %s'
% self.__class__.__name__)
def AddStruct(self, struct):
self._AddStruct(struct, False)
def _AddYamlLoader(self, obj):
self.source_string += textwrap.dedent("""\
class {type_name}YamlLoader(yaml.YAMLObject):
global {type_name}
global yaml
yaml_tag = '!{type_name}'
yaml_loader = yaml.SafeLoader
data_type = {type_name}
@classmethod
def from_yaml(cls, loader, node):
# Clear object cache to ensure all objects are deep.
loader.constructed_objects = {{}}
state = loader.construct_mapping(node, deep=True)
return {type_name}(state=state)
""").format(type_name=obj.name)
def AddHeader(self, header):
self.AddStruct(header)
def AddParam(self, param):
self._AddStruct(param, True)
self._AddYamlLoader(param)
def Finalize(self):
self._Finalize()
def GetSourceString(self, name):
if name == 'source':
return self.source_string
else:
raise ValueError('Unknown source %s.' % name)
| apache-2.0 | -2,375,850,912,057,371,600 | 29.153846 | 80 | 0.551797 | false |
umkcdcrg01/ryu_openflow | ryu/app/my_arp_v3_copy.py | 1 | 7774 | # Simple Arp Handler v2
# Jack Zhao
# [email protected]
from operator import attrgetter
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import arp
from ryu.lib.packet.packet import Packet
from ryu.lib.packet.ethernet import ethernet
from ryu.lib.packet.arp import arp
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ether
from ryu.ofproto import inet
import time
import os
# config logging
# LOG = logging.getLogger('SimpleArp')
# LOG.setLevel(logging.DEBUG)
# logging.basicConfig()
OFP_SWITCHES_LIST_PREVIOUS = \
'./network-data/ofp_switches_list_prev.db'
OFP_SWITCHES_LIST_SCRIPT = \
'./scripts/remote_ovs_operation/get_switch_ofpbr_datapath_id.sh'
class MySimpleArp(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(MySimpleArp, self).__init__(*args, **kwargs)
self.mac_to_port = {}
self.arp_learning = {} # self.arp_learning = {srcMAC:[dst_ip,in_port,time]}
self.packetToport = {}
self.hostname_list = {}
self.dpset = kwargs['dpset']
def _get_hwaddr(self, dpid, port_no):
return self.dpset.get_port(dpid, port_no).hw_addr
def _hostname_Check(self, datapath):
# Given decimal datapath ID, return hostname
if os.path.exists(os.path.abspath(OFP_SWITCHES_LIST_PREVIOUS)):
f = os.path.abspath(OFP_SWITCHES_LIST_PREVIOUS)
else:
f = os.path.abspath(OFP_SWITCHES_LIST)
with open(f, 'r') as iff:
for line in iff:
hostname, dpid = line.split()
self.hostname_list[int(dpid, 16)] = hostname
# print self.hostname_list
# NEED add some datapath check later
if datapath not in self.hostname_list.keys():
return datapath
else:
return self.hostname_list[datapath]
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
""" install table-miss flow entry """
self.logger.debug("my_arp: switch_features_handler:")
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# self.logger.info("################### datapath in decimal %s", datapath.id)
# self.logger.info("################### datapath in hex %s", hex(int(datapath.id)))
#
# We specify NO BUFFER to max_len of the output action due to
# OVS bug. At this moment, if we specify a lesser number, e.g.,
# 128, OVS will send Packet-In with invalid buffer_id and
# truncated packet data. In that case, we cannot output packets
# correctly. The bug has been fixed in OVS v2.1.0.
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
def add_flow(self, datapath, priority, match, actions, buffer_id=None):
self.logger.debug("my_arp:add_flow")
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
self.logger.debug("my_arp: _packet_in_handler:")
# If you hit this you might want to increase
# the "miss_send_length" of your switch
if ev.msg.msg_len < ev.msg.total_len:
self.logger.debug("packet truncated: only %s of %s bytes",
ev.msg.msg_len, ev.msg.total_len)
msg = ev.msg
datapath = msg.datapath
# ofproto = datapath.ofproto
inPort = msg.match['in_port']
packets = Packet(msg.data)
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
eth = packets.get_protocols(ethernet)[0]
src = eth.src
dst = eth.dst
self.mac_to_port[dpid][src] = inPort
data = msg.data
self.arp_learning.setdefault(src, [])
self.packetToport.setdefault(src, [])
etherFrame = packets.get_protocol(ethernet)
# if dst == LLDP_MAC_NEAREST_BRIDGE:
# return
# print "packets: ", packets
# print "packets.get_protocols(ethernet): ", packets.get_protocols(ethernet)
# print "etherFrame######", etherFrame
etherFrame = packets.get_protocol(ethernet)
if etherFrame.ethertype == ether.ETH_TYPE_ARP:
# print "packets: ", packets
# print "packets.get_protocols(ethernet): ", packets.get_protocols(ethernet)
arpArriveTime = time.time()
srcMac = etherFrame.src
arpPacket = packets.get_protocol(arp)
arp_dstIP = arpPacket.dst_ip
self.packetToport[srcMac] = [arp_dstIP, inPort, arpArriveTime]
self.receive_arp(datapath, packets, etherFrame, inPort, data)
return 0
else:
self.logger.debug("Drop packet")
return 1
def receive_arp(self, datapath, packets, etherFrame, inPort, data):
arpPacket = packets.get_protocol(arp)
if arpPacket.opcode == 1:
arp_dstIP = arpPacket.dst_ip
self.logger.debug("receive ARP request %s => %s (port%d)"
% (etherFrame.src, etherFrame.dst, inPort))
if self.anti_arp_brodcast(datapath, etherFrame, inPort, arp_dstIP):
self.reply_arp(datapath, etherFrame, arpPacket, arp_dstIP, inPort, data)
elif arpPacket.opcode == 2:
pass
def reply_arp(self, datapath, etherFrame, arpPacket, arp_dstIp, inPort, data):
"""flood the arp """
dst = arp_dstIp
dpid = datapath.id
if dst in self.mac_to_port[datapath.id]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = datapath.ofproto.OFPP_FLOOD
actions = [datapath.ofproto_parser.OFPActionOutput(out_port)]
out = datapath.ofproto_parser.OFPPacketOut(datapath=datapath, buffer_id=0xffffffff,
in_port=inPort, actions=actions, data=data)
datapath.send_msg(out)
def anti_arp_brodcast(self, datapath, etherFrame, inPort, arp_dstIP):
if (etherFrame.src in self.packetToport) and (arp_dstIP == self.packetToport[etherFrame.src][0]):
if (inPort != self.packetToport[etherFrame.src][1]):
return False
else:
print("Another muticast packet form %s at %i port in %s " % (etherFrame.src, inPort, self._hostname_Check(datapath.id)))
return True
else:
arpArriveTime = time.time()
srcMac = etherFrame.src
self.packetToport[srcMac] = [arp_dstIP, inPort, arpArriveTime]
self.arp_learning[srcMac] = [inPort, arpArriveTime]
# print "new arp_learning: ", self.arp_learning
return True
| apache-2.0 | 6,158,059,715,359,937,000 | 40.572193 | 136 | 0.605866 | false |
phronesis-mnemosyne/census-schema-alignment | wit/wit/dev/authorship-embedding.py | 1 | 4685 | import pandas as pd
import urllib2
from pprint import pprint
from matplotlib import pyplot as plt
from bs4 import BeautifulSoup
from hashlib import md5
import sys
sys.path.append('/Users/BenJohnson/projects/what-is-this/wit/')
from wit import *
pd.set_option('display.max_rows', 50)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 120)
np.set_printoptions(linewidth=250)
# May need to add things here to make this run the same way each time
np.random.seed(123)
# --
num_features = 10000 # Words
max_len = 100 # Words
formatter = KerasFormatter(num_features, max_len)
# --
# Load data
orig = pd.read_csv('/Users/BenJohnson/projects/laundering/sec/edward/analysis/crowdsar/crowdsar_user.csv', sep = '|', header = None)
orig.columns = ('hash', 'obj')
orig['id'] = 0
# Get
frequent_posters = orig.hash.value_counts().head(100).index
nfrequent_posters = orig.hash.value_counts().head(100).tail(25).index
sub = orig[orig.hash.isin(frequent_posters)]
sel = np.random.uniform(0, 1, sub.shape[0]) > .9
sub = sub[sel].drop_duplicates()
sel2 = np.random.uniform(0, 1, sub.shape[0]) > .5
df = sub[sel2]
tdf = sub[~sel2]
tdf2 = orig[orig.hash.isin(nfrequent_posters)].drop_duplicates()
sel3 = np.random.uniform(0, 1, tdf2.shape[0]) > .9
tdf2 = tdf2[sel3]
# --
train = make_triplet_train(df, N = 500)
trn, trn_levs = formatter.format(train, ['obj'], 'hash')
awl, awl_levs = formatter.format(train.drop_duplicates(), ['obj'], 'hash')
# tst, tst_levs = formatter.format(tdf, ['obj'], 'hash')
out, out_levs = formatter.format(tdf2, ['obj'], 'hash')
# --
# Define model
recurrent_size = 64
dense_size = 16
model = Sequential()
model.add(Embedding(num_features, recurrent_size))
model.add(LSTM(recurrent_size, return_sequences = True))
model.add(LSTM(recurrent_size))
model.add(Dense(dense_size))
model.add(Activation('unit_norm'))
model.compile(loss = 'triplet_cosine', optimizer = 'adam')
# --
# Train model
for i in range(60):
ms = modsel(train.shape[0], N = 3)
fitting = model.fit(
trn['x'][0][ms], trn['x'][0][ms],
nb_epoch = 3,
batch_size = 3 * 250,
shuffle = False
)
json_string = model.to_json()
open('author2_architecture.json', 'w').write(json_string)
model.save_weights('author2_weights.h5')
tr_preds = model.predict(awl['x'][0], verbose = True, batch_size = 250)
colors = awl['y'].argmax(1)
plt.scatter(tr_preds[:,0], tr_preds[:,1], c = colors)
plt.show()
# ------------------------------------------------
# Load pretrained model
#
# from keras.models import model_from_json
# model = model_from_json(open('author_architecture.json').read())
# model.load_weights('author_weights.h5')
# <<
shp = awl['y'].shape[1]
amax = awl['y'].argmax(1)
sims = np.zeros( (awl['y'].shape[1], awl['y'].shape[1]) )
tmps = [tr_preds[amax == i] for i in range(shp)]
for i in range(shp):
print i
a = tmps[i]
for j in range(shp):
b = tmps[j]
mn = np.mean(np.dot(a, b.T) > .8)
sims[i][j] = mn
np.mean(np.max(sims, 0) - np.diag(sims))
np.mean(np.max(sims, 0) - sims)
np.mean(sims.argmax(1) == np.arange(sims.shape[0]))
# >>
ts_preds = model.predict(tst['x'][0], verbose = True, batch_size = 250)
tmpsel = np.random.choice(ts_preds.shape[0], 5000)
sim = np.dot(ts_preds[tmpsel], tr_preds.T)
np.mean(tst['y'].argmax(1)[tmpsel] == awl['y'].argmax(1)[sim.argmax(1)])
tdf[]
# --
out_preds = model.predict(out['x'][0], verbose = True, batch_size = 250)
outsims = np.dot(out_preds, out_preds.T)
shp = out['y'].shape[1]
amax = out['y'].argmax(1)
sims = np.zeros( (out['y'].shape[1], out['y'].shape[1]) )
tmps = [out_preds[amax == i] for i in range(shp)]
for i in range(shp):
print i
a = tmps[i]
for j in range(shp):
b = tmps[j]
mn = np.mean(np.dot(a, b.T) > .8)
sims[i][j] = mn
sims.argmax(1) == np.arange(sims.shape[0])
np.fill_diagonal(outsims, 0)
rowmax = outsims.argmax(1)
by_user = map(lambda K: np.mean(amax[rowmax[amax == K]] == K), range(out['y'].shape[1]))
pprint(by_user)
# >>
from sklearn.cluster import KMeans
lens = np.array(tdf2.obj.apply(lambda x: len(str(x))))
km = KMeans(n_clusters = 26)
cl = km.fit_predict(out_preds[lens > 100])
amax = out['y'][lens > 100].argmax(1)
pd.crosstab(cl, amax)
# <<
# --
out_preds = model.predict(out['x'][0], verbose = True, batch_size = 250)
sel = np.random.uniform(0, 1, out_preds.shape[0]) > .5
outsims = np.dot(out_preds[sel], out_preds[~sel].T)
amax1 = out['y'].argmax(1)[sel]
amax2 = out['y'].argmax(1)[~sel]
conf = pd.crosstab(amax1, amax2[outsims.argmax(1)])
np.mean(np.array(conf).argmax(1) == range(conf.shape[0]))
| apache-2.0 | -2,159,664,508,630,088,000 | 22.661616 | 140 | 0.631163 | false |
quantmind/jflib | boostbuilder/build_ext.py | 1 | 7401 | import os
import sys
from types import *
from distutils.errors import *
from distutils.dep_util import newer_group
from distutils.core import Extension as old_Extension
from distutils.command.build_ext import build_ext as _build_ext
from distutils import log
from utils import fullsplit
class external_object_filenames_hack(object):
def __init__(self, builder, ext):
self.builder = builder
self.compiler = builder.compiler
self.ext = ext
def __call__(self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None:
output_dir = ''
obj_names = []
sd = fullsplit(self.ext.source_directory)
NB = len(sd)
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
base = tuple(fullsplit(base)[NB:])
base = os.path.join(self.ext.name,*base)
if ext not in (self.compiler.src_extensions + ['.rc','.res']):
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % \
(ext, src_name)
if strip_dir:
base = os.path.basename (base)
if ext == '.res' or ext == '.rc':
# these need to be compiled to object files
obj_names.append (os.path.join (output_dir,
base + ext + self.compiler.obj_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.compiler.obj_extension))
return obj_names
class Extension(old_Extension):
def __init__ (self, *args, **kwargs):
self.external_library = kwargs.pop('external_library',False)
self.source_directory = kwargs.pop('source_directory',False)
self.depends_on = kwargs.pop('depends_on',[])
old_Extension.__init__(self, *args, **kwargs)
class build_ext(_build_ext):
"""
Specialized extension source builder for boost libraries
"""
def build_extension(self, ext):
if ext.depends_on:
if self.package:
pd = self.package.split('.')
dir = os.path.join(self.build_lib,*pd)
else:
dir = self.build_lib
if dir not in ext.library_dirs:
ext.library_dirs.append(dir)
if ext.external_library:
objf = self.compiler.object_filenames
self.compiler.object_filenames = external_object_filenames_hack(self,ext)
self.external_library = True
_build_ext.build_extension(self, ext)
self.compiler.object_filenames = objf
else:
self.external_library = False
_build_ext.build_extension(self, ext)
def get_export_symbols(self, ext):
if ext.external_library:
return None
else:
return _build_ext.get_export_symbols(self, ext)
def get_ext_filename(self, ext_name):
from distutils.sysconfig import get_config_var
ext_path = ext_name.split('.')
if os.name == "os2":
ext_path[len(ext_path) - 1] = ext_path[len(ext_path) - 1][:8]
so_ext = get_config_var('SO')
prefix = ''
debug = ''
if self.debug:
debug = '_debug'
if self.external_library:
if os.name == 'nt':
so_ext = '.dll'
elif os.name == 'posix':
prefix = 'lib'
ext_path[-1] = prefix + ext_path[-1] + debug
return apply(os.path.join, ext_path) + so_ext
def run (self):
from distutils.ccompiler import new_compiler
# 'self.extensions', as supplied by setup.py, is a list of
# Extension instances. See the documentation for Extension (in
# distutils.extension) for details.
#
# For backwards compatibility with Distutils 0.8.2 and earlier, we
# also allow the 'extensions' list to be a list of tuples:
# (ext_name, build_info)
# where build_info is a dictionary containing everything that
# Extension instances do except the name, with a few things being
# differently named. We convert these 2-tuples to Extension
# instances as needed.
if not self.extensions:
return
# If we were asked to build any C/C++ libraries, make sure that the
# directory where we put them is in the library search path for
# linking extensions.
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.libraries.extend(build_clib.get_library_names() or [])
self.library_dirs.append(build_clib.build_clib)
# Setup the CCompiler object that we'll use to do all the
# compiling and linking
self.compiler = new_compiler(compiler=self.compiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force)
hascpp = False
for ext in self.extensions:
l = ext.language or self.compiler.detect_language(ext.sources)
if l == 'c++':
hascpp = True
break
self.compiler.customize(self.distribution, self.debug, hascpp)
# If we are cross-compiling, init the compiler now (if we are not
# cross-compiling, init would not hurt, but people may rely on
# late initialization of compiler even if they shouldn't...)
#if os.name == 'nt' and self.plat_name != get_platform():
# self.compiler.initialize(self.plat_name)
# And make sure that any compile/link-related options (which might
# come from the command-line or from the setup script) are set in
# that CCompiler object -- that way, they automatically apply to
# all compiling and linking done here.
if self.include_dirs is not None:
self.compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name,value) in self.define:
self.compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
self.compiler.undefine_macro(macro)
if self.libraries is not None:
self.compiler.set_libraries(self.libraries)
if self.library_dirs is not None:
self.compiler.set_library_dirs(self.library_dirs)
if self.rpath is not None:
self.compiler.set_runtime_library_dirs(self.rpath)
if self.link_objects is not None:
self.compiler.set_link_objects(self.link_objects)
# Now actually compile and link everything.
self.build_extensions() | bsd-3-clause | -4,701,284,351,250,491,000 | 38.233696 | 90 | 0.547764 | false |
manub686/atomix | r2cmplr/_scrape_b_i.py | 1 | 4226 | #!/usr/bin/python
'''
Atomix project, _scrape_b_i.py, (TODO: summary)
Copyright (c) 2015 Stanford University
Released under the Apache License v2.0. See the LICENSE file for details.
Author(s): Manu Bansal
'''
import csv
import os
import sys
import re
from _util import *
class BlockScraper:
def which(self, blockname, ilib_include_path):
inc_list = []
if blockname == "BlockNOP":
return ""
for path in ilib_include_path:
fname = '%s/%s_i.h' % (path, blockname)
if os.path.exists(fname):
self.logger.debug('%s exists' % fname)
inc_list.append(fname)
else:
pass
if len(set(inc_list)) > 1:
print_line()
self.logger.error("Multiple block include files found for %s" % blockname)
print_line()
exit(17)
elif len(inc_list) == 0:
print_line()
self.logger.error("No block include file found for %s" % blockname)
print_line()
exit(18)
return inc_list[0]
def parse_block_i_h_file(self, blockname, block_i_h_file):
if blockname == "BlockNOP":
return ([], [], [], [], [], [], [])
ifile = open(block_i_h_file,'r')
text = ifile.read()
ifile.close()
(inp_list, out_list, cf_list, inp_list2, out_list2) = self.find_declaration_i(blockname, text)
if cf_list:
cf_par_list, cf_par_dict = self.find_declaration_i_conf(blockname, text)
else:
cf_par_list = []
cf_par_dict = {}
return (inp_list, out_list, cf_list, inp_list2, out_list2, cf_par_list, cf_par_dict)
def find_declaration_i(self, blockname, text):
pattern = re.compile(r'%s_i\s*\(.*?\;' % blockname, re.VERBOSE | re.DOTALL)
count = 0
for match in pattern.finditer(text):
count += 1
if count > 1:
print_line()
self.logger.error('More than one implementation prototype declarations found for %s' % blockname)
print_line()
exit(1)
text = match.group(0)
if count == 0:
print_line()
self.logger.error('No implementation prototype declaration found for %s' % blockname)
print_line()
exit(1)
subpattern = re.compile(r'(IN|OUT|CF)\s+([a-zA-Z_\d]+)\s*(?:const)?\s*\*\s*(?:const)?\s*(?:restrict)?\s*([a-zA-Z_\d]+)', re.VERBOSE)
inp_list = []
out_list = []
cf_list = []
inp_list2 = []
out_list2 = []
for line in text.split('\n'):
for match in subpattern.finditer(line):
iodir = match.group(1)
iotype = match.group(2)
ioname = match.group(3)
if (iodir == 'IN'):
inp_list.append(iotype)
inp_list2.append((iotype, ioname))
elif (iodir == 'OUT'):
out_list.append(iotype)
out_list2.append((iotype, ioname))
elif (iodir == 'CF'):
cf_list.append(iotype)
else:
print_line()
self.logger.error('Header file parse error')
print_line()
exit(1)
return (inp_list, out_list, cf_list, inp_list2, out_list2)
def find_declaration_i_conf(self, blockname, text):
pattern = re.compile(r'%s_i_conf\s*\(.*?\;' % blockname, re.VERBOSE | re.DOTALL)
count = 0
for match in pattern.finditer(text):
count += 1
if count > 1:
self.logger.error('More than one configuration implementation prototype declarations found for %s' % blockname)
exit(1)
text = match.group(0)
if count == 0:
print_line()
self.logger.error('No configuration implementation prototype declaration found for %s' % blockname)
print_line()
exit(1)
subpattern_cf = re.compile(r'(CF)\s+([a-zA-Z_\d]+)\s*(?:const)?\s*\*\s*(?:const)?\s*(?:restrict)?\s*([a-zA-Z_\d]+)', re.VERBOSE)
subpattern_par = re.compile(r'([a-zA-Z_\d]+)\s*(?:const)?\s*(?:const)?\s*([a-zA-Z_\d]+)', re.VERBOSE)
par_list = []
par_dict = {}
lines = text.split('\n')
line = lines[1]
count = 0
for match in subpattern_cf.finditer(line):
count += 1
if count == 0:
print_line()
self.logger.error('Configuration implementation prototype must have CF as the first parameter')
print_line()
exit(2)
for line in lines[2:]:
for match in subpattern_par.finditer(line):
iotype = match.group(1)
ioname = match.group(2)
par_dict[ioname] = iotype
par_list.append(ioname)
return (par_list, par_dict)
def __init__(self, logger):
self.logger = logger
| apache-2.0 | -1,635,830,698,253,406,700 | 26.986755 | 137 | 0.612636 | false |
leifos/treconomics | treconomics_project/treconomics/test_views.py | 1 | 1408 | from django.core.urlresolvers import reverse
__author__ = 'mickeypash'
from django.test import TestCase
class LandingViewTests(TestCase):
def test_home_view_status_code(self):
"""
Testing the response status codes
Status expected - 200 OK
"""
response = self.client.get(reverse("home"))
self.assertEqual(response.status_code, 200)
def test_login_view_status_code(self):
"""
Testing the response status codes
Status expected - 200 OK
"""
response = self.client.get(reverse("login"))
self.assertEqual(response.status_code, 200)
def test_logout_view_status_code(self):
"""
Testing the response status codes
Status expected - 200 OK
"""
response = self.client.get(reverse("logout"))
self.assertEqual(response.status_code, 200)
def test_start_exp_view_status_code(self):
"""
Testing the response status codes
Status expected - 200 OK
"""
response = self.client.get(reverse("start-experiment"))
self.assertEqual(response.status_code, 200)
def test_start_exp_view_status_code(self):
"""
Testing the response status codes
Status expected - 200 OK
"""
response = self.client.get(reverse("pre-experiment"))
self.assertEqual(response.status_code, 200) | mit | 660,491,089,278,089,300 | 28.978723 | 63 | 0.618608 | false |
Xeralux/tensorflow | tensorflow/contrib/distributions/python/ops/statistical_testing.py | 1 | 32457 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Statistical test assertions calibrated for their error rates."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
__all__ = [
"true_mean_confidence_interval_by_dkwm",
"assert_true_mean_equal_by_dkwm",
"min_discrepancy_of_true_means_detectable_by_dkwm",
"min_num_samples_for_dkwm_mean_test",
"assert_true_mean_equal_by_dkwm_two_sample",
"min_discrepancy_of_true_means_detectable_by_dkwm_two_sample",
"min_num_samples_for_dkwm_mean_two_sample_test",
]
def _batch_sort_vector(x, ascending=True, name=None):
with ops.name_scope(name, "sort_each_row", [x]):
x = ops.convert_to_tensor(x, name="x")
n = array_ops.shape(x)[-1]
if ascending:
y, _ = nn_ops.top_k(-x, k=n, sorted=True)
y = -y
else:
y, _ = nn_ops.top_k(x, k=n, sorted=True)
y.set_shape(x.shape)
return y
def _do_maximum_mean(samples, envelope, high, name=None):
"""Common code between maximum_mean and minimum_mean."""
with ops.name_scope(name, "do_maximum_mean", [samples, envelope, high]):
n = array_ops.rank(samples)
# Move the batch dimension of `samples` to the rightmost position,
# where the _batch_sort_vector function wants it.
perm = array_ops.concat([math_ops.range(1, n), [0]], axis=0)
samples = array_ops.transpose(samples, perm)
samples = _batch_sort_vector(samples)
batch_shape = array_ops.shape(samples)[:-1]
n = array_ops.shape(samples)[-1]
step = 1. / math_ops.cast(n, dtype=samples.dtype.base_dtype)
def _loop_body(iter_, total, to_skip):
total = array_ops.where(
step <= to_skip,
total,
array_ops.where(
to_skip > 0.,
total + (step - to_skip) * samples[..., iter_],
total + step * samples[..., iter_]))
to_skip = array_ops.where(step <= to_skip, to_skip - step, 0.)
return [iter_ + 1, total, to_skip]
_, total, _ = control_flow_ops.while_loop(
cond=lambda iter_, *args: iter_ < n,
body=_loop_body,
loop_vars=[
0,
array_ops.zeros(batch_shape, dtype=samples.dtype.base_dtype),
envelope, # to_skip
])
return total + envelope * high
def _maximum_mean(samples, envelope, high, name=None):
"""Returns a stochastic upper bound on the mean of a scalar distribution.
The idea is that if the true CDF is within an `eps`-envelope of the
empirical CDF of the samples, and the support is bounded above, then
the mean is bounded above as well. In symbols,
```none
sup_x(|F_n(x) - F(x)|) < eps
```
The 0th dimension of `samples` is interpreted as independent and
identically distributed samples. The remaining dimensions are
broadcast together with `envelope` and `high`, and operated on
separately.
Args:
samples: Floating-point tensor of samples from the distribution(s)
of interest. Entries are assumed IID across the 0th dimension.
The other dimensions must broadcast with `envelope` and `high`.
envelope: Floating-point tensor of sizes of admissible CDF
envelopes (i.e., the `eps` above).
high: Floating-point tensor of upper bounds on the distributions'
supports.
name: A name for this operation (optional).
Returns:
bound: Floating-point tensor of upper bounds on the true means.
Raises:
InvalidArgumentError: If some `sample` is found to be larger than
the corresponding `high`.
"""
with ops.name_scope(name, "maximum_mean", [samples, envelope, high]):
samples = ops.convert_to_tensor(samples, name="samples")
envelope = ops.convert_to_tensor(envelope, name="envelope")
high = ops.convert_to_tensor(high, name="high")
xmax = math_ops.reduce_max(samples, axis=[-1])
msg = "Given sample maximum value exceeds expectations"
check_op = check_ops.assert_less_equal(xmax, high, message=msg)
with ops.control_dependencies([check_op]):
return array_ops.identity(_do_maximum_mean(samples, envelope, high))
def _minimum_mean(samples, envelope, low, name=None):
"""Returns a stochastic lower bound on the mean of a scalar distribution.
The idea is that if the true CDF is within an `eps`-envelope of the
empirical CDF of the samples, and the support is bounded below, then
the mean is bounded below as well. In symbols,
```none
sup_x(|F_n(x) - F(x)|) < eps
```
The 0th dimension of `samples` is interpreted as independent and
identically distributed samples. The remaining dimensions are
broadcast together with `envelope` and `low`, and operated on
separately.
Args:
samples: Floating-point tensor of samples from the distribution(s)
of interest. Entries are assumed IID across the 0th dimension.
The other dimensions must broadcast with `envelope` and `low`.
envelope: Floating-point tensor of sizes of admissible CDF
envelopes (i.e., the `eps` above).
low: Floating-point tensor of lower bounds on the distributions'
supports.
name: A name for this operation (optional).
Returns:
bound: Floating-point tensor of lower bounds on the true means.
Raises:
InvalidArgumentError: If some `sample` is found to be smaller than
the corresponding `low`.
"""
with ops.name_scope(name, "minimum_mean", [samples, envelope, low]):
samples = ops.convert_to_tensor(samples, name="samples")
envelope = ops.convert_to_tensor(envelope, name="envelope")
low = ops.convert_to_tensor(low, name="low")
xmin = math_ops.reduce_min(samples, axis=[-1])
msg = "Given sample minimum value falls below expectations"
check_op = check_ops.assert_greater_equal(xmin, low, message=msg)
with ops.control_dependencies([check_op]):
return - _do_maximum_mean(-samples, envelope, -low)
def _dkwm_cdf_envelope(n, error_rate, name=None):
"""Computes the CDF envelope that the DKWM inequality licenses.
The [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval)
gives a stochastic bound on the distance between the true cumulative
distribution function (CDF) of any distribution and its empirical
CDF. To wit, for `n` iid samples from any distribution with CDF F,
```none
P(sup_x |F_n(x) - F(x)| > eps) < 2exp(-2n eps^2)
```
This function computes the envelope size `eps` as a function of the
number of samples `n` and the desired limit on the left-hand
probability above.
Args:
n: Tensor of numbers of samples drawn.
error_rate: Floating-point tensor of admissible rates of mistakes.
name: A name for this operation (optional).
Returns:
eps: Tensor of maximum distances the true CDF can be from the
empirical CDF. This scales as `O(sqrt(-log(error_rate)))` and
as `O(1 / sqrt(n))`. The shape is the broadcast of `n` and
`error_rate`.
"""
with ops.name_scope(name, "dkwm_cdf_envelope", [n, error_rate]):
n = math_ops.cast(n, dtype=error_rate.dtype)
return math_ops.sqrt(-gen_math_ops.log(error_rate / 2.) / (2. * n))
def _check_shape_dominates(tensor, tensors):
"""Check that broadcasting `tensor` against `tensors` does not expand it.
Why? Because I want to be very sure that the samples tensor is not
accidentally enlarged by broadcasting against tensors that are
supposed to be describing the distribution(s) sampled from, lest the
sample counts end up inflated.
Args:
tensor: A Tensor whose shape is to be protected against broadcasting.
tensors: A list of Tensors to check
Returns:
tensor: `tf.identity(tensor)` with control dependencies attached;
be sure to use that downstream.
"""
def check(t):
target = array_ops.shape(tensor)[1:]
result = array_ops.broadcast_dynamic_shape(target, array_ops.shape(t))
# This rank check ensures that I don't get a wrong answer from the
# _shapes_ broadcasting against each other.
gt = check_ops.assert_greater(array_ops.rank(target), array_ops.rank(t))
eq = check_ops.assert_equal(target, result)
return gt, eq
checks = list(itertools.chain(*[check(t) for t in tensors]))
with ops.control_dependencies(checks):
return array_ops.identity(array_ops.identity(tensor))
def true_mean_confidence_interval_by_dkwm(
samples, low, high, error_rate=1e-6, name=None):
"""Computes a confidence interval for the mean of a scalar distribution.
In batch mode, computes confidence intervals for all distributions
in the batch (which need not be identically distributed).
Relies on the [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval).
The probability (over the randomness of drawing the given samples)
that any true mean is outside the corresponding returned interval is
no more than the given `error_rate`. The size of the intervals
scale as
`O(1 / sqrt(#samples))`, as `O(high - low)`, and as `O(-log(error_rate))`.
Note that `error_rate` is a total error rate for all the confidence
intervals in the batch. As such, if the batch is nontrivial, the
error rate is not broadcast but divided (evenly) among the batch
members.
Args:
samples: Floating-point tensor of samples from the distribution(s)
of interest. Entries are assumed IID across the 0th dimension.
The other dimensions must broadcast with `low` and `high`.
low: Floating-point tensor of lower bounds on the distributions'
supports.
high: Floating-point tensor of upper bounds on the distributions'
supports.
error_rate: *Scalar* admissible total rate of mistakes.
name: A name for this operation (optional).
Returns:
low: A floating-point tensor of stochastic lower bounds on the true means.
high: A floating-point tensor of stochastic upper bounds on the true means.
"""
with ops.name_scope(
name, "true_mean_confidence_interval_by_dkwm",
[samples, low, high, error_rate]):
samples = ops.convert_to_tensor(samples, name="samples")
low = ops.convert_to_tensor(low, name="low")
high = ops.convert_to_tensor(high, name="high")
error_rate = ops.convert_to_tensor(error_rate, name="error_rate")
samples = _check_shape_dominates(samples, [low, high])
check_ops.assert_scalar(error_rate) # Static shape
error_rate = _itemwise_error_rate(error_rate, [low, high], samples)
n = array_ops.shape(samples)[0]
envelope = _dkwm_cdf_envelope(n, error_rate)
min_mean = _minimum_mean(samples, envelope, low)
max_mean = _maximum_mean(samples, envelope, high)
return min_mean, max_mean
def _itemwise_error_rate(
total_error_rate, param_tensors, sample_tensor=None, name=None):
with ops.name_scope(
name, "itemwise_error_rate",
[total_error_rate, param_tensors, sample_tensor]):
result_shape = [1]
for p_tensor in param_tensors:
result_shape = array_ops.broadcast_dynamic_shape(
array_ops.shape(p_tensor), result_shape)
if sample_tensor is not None:
result_shape = array_ops.broadcast_dynamic_shape(
array_ops.shape(sample_tensor)[1:], result_shape)
num_items = math_ops.reduce_prod(result_shape)
return total_error_rate / math_ops.cast(
num_items, dtype=total_error_rate.dtype)
def assert_true_mean_equal_by_dkwm(
samples, low, high, expected, false_fail_rate=1e-6, name=None):
"""Asserts the mean of the given distribution is as expected.
More precisely, fails if there is enough evidence (using the
[Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval))
that the true mean of some distribution from which the given samples are
drawn is _not_ the given expected mean with statistical significance
`false_fail_rate` or stronger, otherwise passes. If you also want to
check that you are gathering enough evidence that a pass is not
spurious, see `min_num_samples_for_dkwm_mean_test` and
`min_discrepancy_of_true_means_detectable_by_dkwm`.
Note that `false_fail_rate` is a total false failure rate for all
the assertions in the batch. As such, if the batch is nontrivial,
the assertion will insist on stronger evidence to fail any one member.
Args:
samples: Floating-point tensor of samples from the distribution(s)
of interest. Entries are assumed IID across the 0th dimension.
The other dimensions must broadcast with `low` and `high`.
low: Floating-point tensor of lower bounds on the distributions'
supports.
high: Floating-point tensor of upper bounds on the distributions'
supports.
expected: Floating-point tensor of expected true means.
false_fail_rate: *Scalar* admissible total rate of mistakes.
name: A name for this operation (optional).
Returns:
check: Op that raises `InvalidArgumentError` if any expected mean is
outside the corresponding confidence interval.
"""
with ops.name_scope(
name, "assert_true_mean_equal_by_dkwm",
[samples, low, high, expected, false_fail_rate]):
samples = ops.convert_to_tensor(samples, name="samples")
low = ops.convert_to_tensor(low, name="low")
high = ops.convert_to_tensor(high, name="high")
expected = ops.convert_to_tensor(expected, name="expected")
false_fail_rate = ops.convert_to_tensor(
false_fail_rate, name="false_fail_rate")
samples = _check_shape_dominates(samples, [low, high, expected])
min_mean, max_mean = true_mean_confidence_interval_by_dkwm(
samples, low, high, error_rate=false_fail_rate)
less_op = check_ops.assert_less(
min_mean, expected, message="Mean confidence interval too high")
with ops.control_dependencies([less_op]):
return check_ops.assert_greater(
max_mean, expected, message="Mean confidence interval too low")
def min_discrepancy_of_true_means_detectable_by_dkwm(
n, low, high, false_fail_rate, false_pass_rate, name=None):
"""Returns the minimum mean discrepancy that a DKWM-based test can detect.
DKWM is the [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval).
Note that `false_fail_rate` is a total false failure rate for all
the tests in the batch. As such, if the batch is nontrivial, each
member will demand more samples. The `false_pass_rate` is also
interpreted as a total, but is treated asymmetrically: If each test
in the batch detects its corresponding discrepancy with probability
at least `1 - false_pass_rate`, then running all those tests and
failing if any one fails will jointly detect all those discrepancies
with the same `false_pass_rate`.
Args:
n: Tensor of numbers of samples to be drawn from the distributions
of interest.
low: Floating-point tensor of lower bounds on the distributions'
supports.
high: Floating-point tensor of upper bounds on the distributions'
supports.
false_fail_rate: *Scalar* admissible total rate of false failures.
false_pass_rate: *Scalar* admissible rate of false passes.
name: A name for this operation (optional).
Returns:
discr: Tensor of lower bounds on the distances between true
means detectable by a DKWM-based test.
For each batch member `i`, of `K` total, drawing `n[i]` samples from
some scalar distribution supported on `[low[i], high[i]]` is enough
to detect a difference in means of size `discr[i]` or more.
Specifically, we guarantee that (a) if the true mean is the expected
mean, `assert_true_mean_equal_by_dkwm` will fail with probability at
most `false_fail_rate / K` (which amounts to `false_fail_rate` if
applied to the whole batch at once), and (b) if the true mean
differs from the expected mean by at least `discr[i]`,
`assert_true_mean_equal_by_dkwm` will pass with probability at most
`false_pass_rate`.
The detectable discrepancy scales as
- `O(high[i] - low[i])`,
- `O(1 / sqrt(n[i]))`,
- `O(-log(false_fail_rate/K))`, and
- `O(-log(false_pass_rate))`.
"""
with ops.name_scope(
name, "min_discrepancy_of_true_means_detectable_by_dkwm",
[n, low, high, false_fail_rate, false_pass_rate]):
n = ops.convert_to_tensor(n, name="n")
low = ops.convert_to_tensor(low, name="low")
high = ops.convert_to_tensor(high, name="high")
false_fail_rate = ops.convert_to_tensor(
false_fail_rate, name="false_fail_rate")
false_pass_rate = ops.convert_to_tensor(
false_pass_rate, name="false_pass_rate")
# Algorithm: Assume a true CDF F. The DKWM inequality gives a
# stochastic bound on how far the observed empirical CDF F_n can be.
# Then, using the DKWM inequality again gives a stochastic bound on
# the farthest candidate true CDF F' that
# true_mean_confidence_interval_by_dkwm might consider. At worst, these
# errors may go in the same direction, so the distance between F and
# F' is bounded by the sum.
# On batching: false fail rates sum, so I need to reduce
# the input to account for the batching. False pass rates
# max, so I don't.
sampling_envelope = _dkwm_cdf_envelope(n, false_pass_rate)
false_fail_rate = _itemwise_error_rate(false_fail_rate, [n, low, high])
analysis_envelope = _dkwm_cdf_envelope(n, false_fail_rate)
return (high - low) * (sampling_envelope + analysis_envelope)
def min_num_samples_for_dkwm_mean_test(
discrepancy, low, high,
false_fail_rate=1e-6, false_pass_rate=1e-6, name=None):
"""Returns how many samples suffice for a one-sample DKWM mean test.
To wit, returns an upper bound on the number of samples necessary to
guarantee detecting a mean difference of at least the given
`discrepancy`, with the given `false_fail_rate` and `false_pass_rate`,
using the [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval)
on a scalar distribution supported on `[low, high]`.
Args:
discrepancy: Floating-point tensor of desired upper limits on mean
differences that may go undetected with probability higher than
`1 - false_pass_rate`.
low: Tensor of lower bounds on the distributions' support.
high: Tensor of upper bounds on the distributions' support.
false_fail_rate: *Scalar* admissible total rate of false failures.
false_pass_rate: *Scalar* admissible rate of false passes.
name: A name for this operation (optional).
Returns:
n: Tensor of numbers of samples to be drawn from the distributions
of interest.
The `discrepancy`, `low`, and `high` tensors must have
broadcast-compatible shapes.
For each batch member `i`, of `K` total, drawing `n[i]` samples from
some scalar distribution supported on `[low[i], high[i]]` is enough
to detect a difference in means of size `discrepancy[i]` or more.
Specifically, we guarantee that (a) if the true mean is the expected
mean, `assert_true_mean_equal_by_dkwm` will fail with probability at
most `false_fail_rate / K` (which amounts to `false_fail_rate` if
applied to the whole batch at once), and (b) if the true mean
differs from the expected mean by at least `discrepancy[i]`,
`assert_true_mean_equal_by_dkwm` will pass with probability at most
`false_pass_rate`.
The required number of samples scales
as `O((high[i] - low[i])**2)`, `O(-log(false_fail_rate/K))`,
`O(-log(false_pass_rate))`, and `O(1 / discrepancy[i]**2)`.
"""
with ops.name_scope(
name, "min_num_samples_for_dkwm_mean_test",
[low, high, false_fail_rate, false_pass_rate, discrepancy]):
discrepancy = ops.convert_to_tensor(
discrepancy, name="discrepancy")
low = ops.convert_to_tensor(low, name="low")
high = ops.convert_to_tensor(high, name="high")
false_fail_rate = ops.convert_to_tensor(
false_fail_rate, name="false_fail_rate")
false_pass_rate = ops.convert_to_tensor(
false_pass_rate, name="false_pass_rate")
# Could choose to cleverly allocate envelopes, but this is sound.
envelope1 = discrepancy / (2. * (high - low))
envelope2 = envelope1
false_fail_rate = _itemwise_error_rate(
false_fail_rate, [low, high, discrepancy])
n1 = -math_ops.log(false_fail_rate / 2.) / (2. * envelope1**2)
n2 = -math_ops.log(false_pass_rate / 2.) / (2. * envelope2**2)
return math_ops.maximum(n1, n2)
def assert_true_mean_equal_by_dkwm_two_sample(
samples1, low1, high1, samples2, low2, high2,
false_fail_rate=1e-6, name=None):
"""Asserts the means of the given distributions are equal.
More precisely, fails if there is enough evidence (using the
[Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval))
that the means of the distributions from which the given samples are
drawn are _not_ equal with statistical significance `false_fail_rate`
or stronger, otherwise passes. If you also want to check that you
are gathering enough evidence that a pass is not spurious, see
`min_num_samples_for_dkwm_mean_two_sample_test` and
`min_discrepancy_of_true_means_detectable_by_dkwm_two_sample`.
Note that `false_fail_rate` is a total false failure rate for all
the assertions in the batch. As such, if the batch is nontrivial,
the assertion will insist on stronger evidence to fail any one member.
Args:
samples1: Floating-point tensor of samples from the
distribution(s) A. Entries are assumed IID across the 0th
dimension. The other dimensions must broadcast with `low1`,
`high1`, `low2`, and `high2`.
low1: Floating-point tensor of lower bounds on the supports of the
distributions A.
high1: Floating-point tensor of upper bounds on the supports of
the distributions A.
samples2: Floating-point tensor of samples from the
distribution(s) B. Entries are assumed IID across the 0th
dimension. The other dimensions must broadcast with `low1`,
`high1`, `low2`, and `high2`.
low2: Floating-point tensor of lower bounds on the supports of the
distributions B.
high2: Floating-point tensor of upper bounds on the supports of
the distributions B.
false_fail_rate: *Scalar* admissible total rate of mistakes.
name: A name for this operation (optional).
Returns:
check: Op that raises `InvalidArgumentError` if any pair of confidence
intervals true for corresponding true means do not overlap.
"""
with ops.name_scope(
name, "assert_true_mean_equal_by_dkwm_two_sample",
[samples1, low1, high1, samples2, low2, high2, false_fail_rate]):
samples1 = ops.convert_to_tensor(samples1, name="samples1")
low1 = ops.convert_to_tensor(low1, name="low1")
high1 = ops.convert_to_tensor(high1, name="high1")
samples2 = ops.convert_to_tensor(samples2, name="samples2")
low2 = ops.convert_to_tensor(low2, name="low2")
high2 = ops.convert_to_tensor(high2, name="high2")
false_fail_rate = ops.convert_to_tensor(
false_fail_rate, name="false_fail_rate")
samples1 = _check_shape_dominates(samples1, [low1, high1])
samples2 = _check_shape_dominates(samples2, [low2, high2])
compatible_samples = check_ops.assert_equal(
array_ops.shape(samples1)[1:], array_ops.shape(samples2)[1:])
with ops.control_dependencies([compatible_samples]):
# Could in principle play games with cleverly allocating
# significance instead of the even split below. It may be possible
# to get tighter intervals, in order to obtain a higher power test.
# Any allocation strategy that depends only on the support bounds
# and sample counts should be valid; however, because the intervals
# scale as O(-log(false_fail_rate)), there doesn't seem to be much
# room to win.
min_mean_1, max_mean_1 = true_mean_confidence_interval_by_dkwm(
samples1, low1, high1, false_fail_rate / 2.)
min_mean_2, max_mean_2 = true_mean_confidence_interval_by_dkwm(
samples2, low2, high2, false_fail_rate / 2.)
# I want to assert
# not (max_mean_1 < min_mean_2 or min_mean_1 > max_mean_2),
# but I think I only have and-combination of asserts, so use DeMorgan.
clause1_op = check_ops.assert_greater_equal(max_mean_1, min_mean_2)
with ops.control_dependencies([clause1_op]):
return check_ops.assert_less_equal(min_mean_1, max_mean_2)
def min_discrepancy_of_true_means_detectable_by_dkwm_two_sample(
n1, low1, high1, n2, low2, high2,
false_fail_rate, false_pass_rate, name=None):
"""Returns the minimum mean discrepancy for a two-sample DKWM-based test.
DKWM is the [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval).
Note that `false_fail_rate` is a total false failure rate for all
the tests in the batch. As such, if the batch is nontrivial, each
member will demand more samples. The `false_pass_rate` is also
interpreted as a total, but is treated asymmetrically: If each test
in the batch detects its corresponding discrepancy with probability
at least `1 - false_pass_rate`, then running all those tests and
failing if any one fails will jointly detect all those discrepancies
with the same `false_pass_rate`.
Args:
n1: Tensor of numbers of samples to be drawn from the distributions A.
low1: Floating-point tensor of lower bounds on the supports of the
distributions A.
high1: Floating-point tensor of upper bounds on the supports of
the distributions A.
n2: Tensor of numbers of samples to be drawn from the distributions B.
low2: Floating-point tensor of lower bounds on the supports of the
distributions B.
high2: Floating-point tensor of upper bounds on the supports of
the distributions B.
false_fail_rate: *Scalar* admissible total rate of false failures.
false_pass_rate: *Scalar* admissible rate of false passes.
name: A name for this operation (optional).
Returns:
discr: Tensor of lower bounds on the distances between true means
detectable by a two-sample DKWM-based test.
For each batch member `i`, of `K` total, drawing `n1[i]` samples
from scalar distribution A supported on `[low1[i], high1[i]]` and `n2[i]`
samples from scalar distribution B supported on `[low2[i], high2[i]]`
is enough to detect a difference in their true means of size
`discr[i]` or more. Specifically, we guarantee that (a) if their
true means are equal, `assert_true_mean_equal_by_dkwm_two_sample`
will fail with probability at most `false_fail_rate/K` (which
amounts to `false_fail_rate` if applied to the whole batch at once),
and (b) if their true means differ by at least `discr[i]`,
`assert_true_mean_equal_by_dkwm_two_sample` will pass with
probability at most `false_pass_rate`.
The detectable distribution scales as
- `O(high1[i] - low1[i])`, `O(high2[i] - low2[i])`,
- `O(1 / sqrt(n1[i]))`, `O(1 / sqrt(n2[i]))`,
- `O(-log(false_fail_rate/K))`, and
- `O(-log(false_pass_rate))`.
"""
with ops.name_scope(
name, "min_discrepancy_of_true_means_detectable_by_dkwm_two_sample",
[n1, low1, high1, n2, low2, high2, false_fail_rate, false_pass_rate]):
n1 = ops.convert_to_tensor(n1, name="n1")
low1 = ops.convert_to_tensor(low1, name="low1")
high1 = ops.convert_to_tensor(high1, name="high1")
n2 = ops.convert_to_tensor(n2, name="n2")
low2 = ops.convert_to_tensor(low2, name="low2")
high2 = ops.convert_to_tensor(high2, name="high2")
false_fail_rate = ops.convert_to_tensor(
false_fail_rate, name="false_fail_rate")
false_pass_rate = ops.convert_to_tensor(
false_pass_rate, name="false_pass_rate")
det_disc1 = min_discrepancy_of_true_means_detectable_by_dkwm(
n1, low1, high1, false_fail_rate / 2., false_pass_rate / 2.)
det_disc2 = min_discrepancy_of_true_means_detectable_by_dkwm(
n2, low2, high2, false_fail_rate / 2., false_pass_rate / 2.)
return det_disc1 + det_disc2
def min_num_samples_for_dkwm_mean_two_sample_test(
discrepancy, low1, high1, low2, high2,
false_fail_rate=1e-6, false_pass_rate=1e-6, name=None):
"""Returns how many samples suffice for a two-sample DKWM mean test.
DKWM is the [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval).
Args:
discrepancy: Floating-point tensor of desired upper limits on mean
differences that may go undetected with probability higher than
`1 - false_pass_rate`.
low1: Floating-point tensor of lower bounds on the supports of the
distributions A.
high1: Floating-point tensor of upper bounds on the supports of
the distributions A.
low2: Floating-point tensor of lower bounds on the supports of the
distributions B.
high2: Floating-point tensor of upper bounds on the supports of
the distributions B.
false_fail_rate: *Scalar* admissible total rate of false failures.
false_pass_rate: *Scalar* admissible rate of false passes.
name: A name for this operation (optional).
Returns:
n1: Tensor of numbers of samples to be drawn from the distributions A.
n2: Tensor of numbers of samples to be drawn from the distributions B.
For each batch member `i`, of `K` total, drawing `n1[i]` samples
from scalar distribution A supported on `[low1[i], high1[i]]` and `n2[i]`
samples from scalar distribution B supported on `[low2[i], high2[i]]`
is enough to detect a difference in their true means of size
`discr[i]` or more. Specifically, we guarantee that (a) if their
true means are equal, `assert_true_mean_equal_by_dkwm_two_sample`
will fail with probability at most `false_fail_rate/K` (which
amounts to `false_fail_rate` if applied to the whole batch at once),
and (b) if their true means differ by at least `discr[i]`,
`assert_true_mean_equal_by_dkwm_two_sample` will pass with
probability at most `false_pass_rate`.
The required number of samples scales as
- `O((high1[i] - low1[i])**2)`, `O((high2[i] - low2[i])**2)`,
- `O(-log(false_fail_rate/K))`,
- `O(-log(false_pass_rate))`, and
- `O(1 / discrepancy[i]**2)`.
"""
with ops.name_scope(
name, "min_num_samples_for_dkwm_mean_two_sample_test",
[low1, high1, low2, high2,
false_fail_rate, false_pass_rate, discrepancy]):
discrepancy = ops.convert_to_tensor(discrepancy, name="discrepancy")
low1 = ops.convert_to_tensor(low1, name="low1")
high1 = ops.convert_to_tensor(high1, name="high1")
low2 = ops.convert_to_tensor(low2, name="low2")
high2 = ops.convert_to_tensor(high2, name="high2")
false_fail_rate = ops.convert_to_tensor(
false_fail_rate, name="false_fail_rate")
false_pass_rate = ops.convert_to_tensor(
false_pass_rate, name="false_pass_rate")
# Could choose to cleverly allocate discrepancy tolerances and
# failure probabilities, but this is sound.
n1 = min_num_samples_for_dkwm_mean_test(
discrepancy / 2., low1, high1,
false_fail_rate / 2., false_pass_rate / 2.)
n2 = min_num_samples_for_dkwm_mean_test(
discrepancy / 2., low2, high2,
false_fail_rate / 2., false_pass_rate / 2.)
return n1, n2
| apache-2.0 | 7,352,941,959,383,396,000 | 43.583791 | 80 | 0.697045 | false |
Marcello-Sega/pytim | pytim/interface.py | 1 | 27725 | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
from abc import ABCMeta, abstractmethod
import numpy as np
from .properties import _create_property
from .writepdb import _writepdb
from . import messages
from . import utilities
from scipy.spatial import cKDTree
class Interface(object):
""" The Interface metaclass. Classes for interfacial determination
(ITIM, GITIM,...) are derived from this one
"""
__metaclass__ = ABCMeta
directions_dict = {
0: 'x',
1: 'y',
2: 'z',
'x': 'x',
'y': 'y',
'z': 'z',
'X': 'x',
'Y': 'y',
'Z:': 'z'
}
symmetry_dict = {
'generic': 'generic',
'cylindrical': 'cylindrical',
'spherical': 'spherical',
'planar': 'planar'
}
# main properties shared by all implementations of the class
# When required=True is passed, the implementation of the class *must*
# override the method when instantiating the class (i.e., before __init__)
# By default required=False, and the name is set to None
# interface *must* be created first.
alpha, _alpha =\
_create_property('alpha', "(float) real space cutoff")
layers, _layers =\
_create_property('layers', "AtomGroups of atoms in layers")
analysis_group, _analysis_group =\
_create_property('analysis_group', "(AtomGroup) the group, "
"the surface of which should be computed")
cluster_cut, _cluster_cut =\
_create_property('cluster_cut', "(real) cutoff for phase "
"identification")
molecular, _molecular =\
_create_property('molecular', "(bool) whether to compute "
"surface atoms or surface molecules")
surfaces, _surfaces =\
_create_property('surfaces', "Surfaces associated to the interface",
readonly=True)
info, _info =\
_create_property('info', "(bool) print additional information")
multiproc, _multiproc =\
_create_property('multiproc', "(bool) use parallel implementation")
extra_cluster_groups, _extra_cluster_groups =\
_create_property('extra_cluster_groups',
"(ndarray) additional cluster groups")
radii_dict, _radii_dict =\
_create_property('radii_dict', "(dict) custom atomic radii")
max_layers, _max_layers =\
_create_property('max_layers',
"(int) maximum number of layers to be identified")
autoassign, _autoassign =\
_create_property('autoassign',
"(bool) assign layers every time a frame changes")
cluster_threshold_density, _cluster_threshold_density =\
_create_property('cluster_threshold_density',
"(float) threshold for the density-based filtering")
# TODO: does this belong here ?
_interpolator, __interpolator =\
_create_property('_interpolator', "(dict) custom atomic radii")
@abstractmethod
def __init__(self):
pass
@abstractmethod
def _assign_layers(self):
pass
@property
def atoms(self):
return self._layers[:].sum()
@property
def method(self):
return self.__class__.__name__
def label_planar_sides(self):
""" Assign to all layers a label (the beta tempfactor)
that can be used in pdb files. Additionally, set
the new layers and sides.
"""
for uplow in [0, 1]:
for nlayer, layer in enumerate(self._layers[uplow]):
if layer is None:
self._layers[uplow][nlayer] = self.universe.atoms[:0]
else:
self.label_group(
layer, beta=nlayer + 1.0, layer=nlayer + 1, side=uplow)
def label_group(self,
group,
beta=None,
layer=None,
cluster=None,
side=None):
if group is None:
raise RuntimeError(
'one of the groups, possibly a layer one, is None.' +
' Something is wrong...')
if len(group) == 0:
return
if self.molecular is True:
_group = group.residues.atoms
else:
_group = group
if beta is not None:
_group.tempfactors = float(beta)
if layer is not None:
_group.layers = layer
if side is not None:
_group.sides = side
if cluster is not None:
_group.clusters = cluster
def _assign_symmetry(self, symmetry):
if self.analysis_group is None:
raise TypeError(messages.UNDEFINED_ANALYSIS_GROUP)
if symmetry == 'guess':
raise ValueError("symmetry 'guess' To be implemented")
else:
if not (symmetry in self.symmetry_dict):
raise ValueError(messages.WRONG_DIRECTION)
self.symmetry = symmetry
def _define_cluster_group(self):
self.universe.atoms.pack_into_box()
self.cluster_group = self.universe.atoms[:0] # empty
if (self.cluster_cut is not None):
cluster_cut = float(self.cluster_cut[0])
# we start by adding the atoms in the smaller clusters
# of the opposit phase, if extra_cluster_groups are provided
if (self.extra_cluster_groups is not None):
for extra in self.extra_cluster_groups:
x_labels, x_counts, _ = utilities.do_cluster_analysis_dbscan(
extra, cluster_cut, self.cluster_threshold_density,
self.molecular)
x_labels = np.array(x_labels)
x_label_max = np.argmax(x_counts)
x_ids_other = np.where(x_labels != x_label_max)[0]
self.cluster_group += extra[x_ids_other]
# next, we add the atoms belonging to the main phase
self.cluster_group += self.analysis_group
# groups have been checked already in _sanity_checks()
# self.cluster_group at this stage is composed of analysis_group +
# the smaller clusters of the other phase
labels, counts, neighbors = utilities.do_cluster_analysis_dbscan(
self.cluster_group, cluster_cut,
self.cluster_threshold_density, self.molecular)
labels = np.array(labels)
# counts is not necessarily ordered by size of cluster.
sorting = np.argsort(counts)[::-1]
# labels for atoms in each cluster starting from the largest
unique_labels = np.sort(np.unique(labels[labels > -1]))
# by default, all elements of the cluster_group are in
# single-molecule/atom clusters. We will update them right after.
self.label_group(self.cluster_group, cluster=-1)
# we go in reverse order to let smaller labels (bigger clusters)
# overwrite larger labels (smaller cluster) when the molecular
# option is used.
for el in unique_labels[::-1]:
# select a label
cond = np.where(labels == el)
if self.molecular is True:
g_ = self.cluster_group[cond].residues.atoms
else:
g_ = self.cluster_group[cond]
# probably we need an example here, say:
# counts = [ 61, 1230, 34, 0, ... 0 ,0 ]
# labels = [ 0, 1, 2, 1, -1 .... -1 ]
# we have three clusters, of 61, 1230 and 34 atoms.
# There are 61 labels '0'
# 1230 labels '1'
# 34 labels '2'
# the remaining are '-1'
#
# sorting = [1,0,2,3,....] i.e. the largest element is in
# (1230) position 1, the next (61) is in position 0, ...
# Say, g_ is now the group with label '1' (the biggest cluster)
# Using argwhere(sorting==1) returns exactly 0 -> the right
# ordered label for the largest cluster.
self.label_group(g_, cluster=np.argwhere(sorting == el)[0, 0])
# now that labels are assigned for each of the clusters,
# we can restric the cluster group to the largest cluster.
if self.biggest_cluster_only:
label_max = np.argmax(counts)
ids_max = np.where(labels == label_max)[0]
self.cluster_group = self.cluster_group[ids_max]
else: # we still filter out molecules which do not belong to any cluster
ids = np.where(labels != -1)[0]
self.cluster_group = self.cluster_group[ids]
self.n_neighbors = neighbors
else:
self.cluster_group = self.analysis_group
self.label_group(self.cluster_group, cluster=0)
def is_buried(self, pos):
""" Checks wether an array of positions are located below
the first interfacial layer """
inter = self
box = inter.universe.dimensions[:3]
nonsurface = inter.cluster_group - inter.atoms[inter.atoms.layers == 1]
# there are no inner atoms, distance is always > 0
if len(nonsurface) == 0:
return np.asarray([True] * len(pos))
tree = cKDTree(nonsurface.positions, boxsize=box)
neighs = tree.query_ball_point(pos, inter.alpha)
condition = np.array([len(el) != 0 for el in neighs])
return condition
def reset_labels(self):
""" Reset labels before interfacial analysis"""
self.label_group(
self.universe.atoms, beta=0.0, layer=-1, cluster=-1, side=-1)
@staticmethod
def _attempt_shift(group, _pos_group, direction, halfbox_shift, _dir):
if _dir == 'x':
utilities.centerbox(
group.universe,
x=_pos_group,
center_direction=direction,
halfbox_shift=halfbox_shift)
if _dir == 'y':
utilities.centerbox(
group.universe,
y=_pos_group,
center_direction=direction,
halfbox_shift=halfbox_shift)
if _dir == 'z':
utilities.centerbox(
group.universe,
z=_pos_group,
center_direction=direction,
halfbox_shift=halfbox_shift)
def prepare_box(self):
""" Before the analysis, pack every molecule into the box.
Keep the original positions for latter use.
"""
self.original_positions = np.copy(self.universe.atoms.positions[:])
self.universe.atoms.pack_into_box()
@staticmethod
def _center(group, direction, halfbox_shift=False):
"""
Centers the liquid slab in the simulation box.
The algorithm tries to avoid problems with the definition
of the center of mass. First, a rough density profile
(10 bins) is computed. Then, the group is shifted
and reboxed until the bins at the box boundaries have a
density lower than a threshold delta
In ITIM, the system along the normal direction is always
centered at 0 (halfbox_shift==True). To center to the middle
of the box along all directions, set halfbox_shift=False
"""
dim = group.universe.coord.dimensions
total_shift = 0
if not (direction in Interface.directions_dict):
raise ValueError(messages.WRONG_DIRECTION)
_dir = Interface.directions_dict[direction]
_xyz = {
'x': (0, utilities.get_x),
'y': (1, utilities.get_y),
'z': (2, utilities.get_z)
}
if _dir in _xyz.keys():
direction = _xyz[_dir][0]
_pos_group = (_xyz[_dir][1])(group)
shift = dim[direction] / 100.
_x = utilities.get_x(group.universe.atoms)
_y = utilities.get_y(group.universe.atoms)
_z = utilities.get_z(group.universe.atoms)
_range = (0., dim[direction])
if (halfbox_shift is True):
_range = (-dim[direction] / 2., dim[direction] / 2.)
histo, _ = np.histogram(
_pos_group, bins=10, range=_range, density=True)
max_val, min_val = np.amax(histo), np.amin(histo)
# NOTE maybe allow user to set different values
delta = min_val + (max_val - min_val) / 3.
# let's first avoid crossing pbc with the liquid phase. This can fail:
while (histo[0] > delta or histo[-1] > delta):
total_shift += shift
if total_shift >= dim[direction]:
raise ValueError(messages.CENTERING_FAILURE)
_pos_group += shift
Interface._attempt_shift(group, _pos_group, direction,
halfbox_shift, _dir)
histo, _ = np.histogram(
_pos_group, bins=10, range=_range, density=True)
_center_ = np.average(_pos_group)
if (halfbox_shift is False):
box_half = dim[direction] / 2.
else:
box_half = 0.
_pos = {'x': _x, 'y': _y, 'z': _z}
if _dir in _pos.keys():
_pos[_dir] += total_shift - _center_ + box_half
# finally, we copy everything back
group.universe.atoms.positions = np.column_stack((_x, _y, _z))
@staticmethod
def shift_positions_to_middle(universe, normal):
box = universe.dimensions[normal]
translation = [0, 0, 0]
translation[normal] = box / 2.
universe.atoms.positions += np.array(translation)
universe.atoms.pack_into_box()
def _shift_positions_to_middle(self):
Interface.shift_positions_to_middle(self.universe, self.normal)
@staticmethod
def center_system(symmetry, group, direction, planar_to_origin=False):
if symmetry == 'planar':
utilities.centerbox(group.universe, center_direction=direction)
Interface._center(group, direction, halfbox_shift=True)
utilities.centerbox(group.universe, center_direction=direction)
if planar_to_origin is False:
Interface.shift_positions_to_middle(group.universe, direction)
else:
for xyz in [0, 1, 2]:
try:
Interface._center(group, xyz, halfbox_shift=False)
except ValueError:
pass
group.universe.atoms.pack_into_box()
def center(self, planar_to_origin=False):
Interface.center_system(
self.symmetry,
self.cluster_group,
self.normal,
planar_to_origin=planar_to_origin)
self.centered_positions = np.copy(self.universe.atoms.positions[:])
def writepdb(self,
filename='layers.pdb',
centered='no',
group='all',
multiframe=True,
tempfactors=None):
""" Write the frame to a pdb file, marking the atoms belonging
to the layers with different beta factors.
:param str filename: the output file name
:param str centered: 'origin', 'middle', or 'no'
:param AtomGroup group: if 'all' is passed, use universe
:param bool multiframe: append to pdb file if True
:param ndarray tempfactors: use this array as temp (beta) factors
Example: save the positions (centering the interface in the cell)
without appending
>>> import pytim
>>> import pytim.datafiles
>>> import MDAnalysis as mda
>>> from pytim.datafiles import WATER_GRO
>>> u = mda.Universe(WATER_GRO)
>>> interface = pytim.ITIM(u)
>>> interface.writepdb('layers.pdb',multiframe=False)
Example: save the positions without centering the interface. This
will not shift the atoms from the original position
(still, they will be put into the basic cell).
The :obj:`multiframe` option set to :obj:`False` will
overwrite the file
>>> interface.writepdb('layers.pdb',centered='no')
Note that if :mod:`~pytim.gitim.GITIM` is used, and the
:obj:`symmetry` option is different from :obj:`planar`,
the :obj:`centered='origin'` option is equivalent to
:obj:`centered='middle'`.
"""
_writepdb(
self,
filename=filename,
centered=centered,
group=group,
multiframe=multiframe,
tempfactors=tempfactors)
@staticmethod
def _():
"""
This is a collection of basic tests to check
that code is running -- no test on the correctness
of the output is performed here.
>>> # TEST:0 loading the module
>>> import pytim
>>> pytim.ITIM._() ; # coverage
>>> # TEST:1 basic functionality
>>> import MDAnalysis as mda
>>> import pytim
>>> from pytim.datafiles import *
>>> u = mda.Universe(WATER_GRO)
>>> oxygens = u.select_atoms("name OW")
>>> interface = pytim.ITIM(u, alpha=1.5, max_layers=4)
>>> print (len(interface.layers[0,0]))
786
>>> del interface
>>> interface = pytim.ITIM(u, alpha=1.5, max_layers=4, multiproc=False)
>>> print (len(interface.layers[0,0]))
786
>>> del interface
>>> # TEST:2 basic functionality
>>> u=None
>>> interface = pytim.GITIM(u)
Traceback (most recent call last):
...
Exception: Wrong Universe
>>> interface = pytim.ITIM(u)
Traceback (most recent call last):
...
Exception: Wrong Universe
>>> # TEST:3 large probe sphere radius
>>> u = mda.Universe(WATER_GRO)
>>> interface = pytim.ITIM(u, alpha=100000.0, max_layers=1,multiproc=False)
Traceback (most recent call last):
...
ValueError: parameter alpha must be smaller than the smaller box side
>>> # TEST:3b no surface atoms
>>> u = mda.Universe(GLUCOSE_PDB)
>>> g = u.select_atoms('type C or name OW')
>>> interface = pytim.GITIM(u,group=g, alpha=4.0)
>>> print(interface.atoms)
<AtomGroup []>
>>> # TEST:4 interchangeability of Universe/AtomGroup
>>> u = mda.Universe(WATER_GRO)
>>> oxygens = u.select_atoms("name OW")
>>> interface = pytim.ITIM(u, alpha=1.5,group=oxygens, max_layers=1,multiproc=False,molecular=False)
>>> print (len(interface.layers[0,0]))
262
>>> interface = pytim.ITIM(oxygens, alpha=1.5,max_layers=1,multiproc=False,molecular=False)
>>> print (len(interface.layers[0,0]))
262
>>> # PDB FILE FORMAT
>>> import MDAnalysis as mda
>>> import pytim
>>> from pytim.datafiles import WATER_GRO
>>> u = mda.Universe(WATER_GRO)
>>> oxygens = u.select_atoms("name OW")
>>> interface = pytim.ITIM(u, alpha=1.5, max_layers=4,molecular=True)
>>> interface.writepdb('test.pdb',centered=False)
>>> PDB =open('test.pdb','r').readlines()
>>> line = list(filter(lambda l: 'ATOM 19 ' in l, PDB))[0]
>>> beta = line[62:66] # PDB file format is fixed
>>> print(beta)
4.00
>>> # correct behaviour of biggest_cluster_only option
>>> import MDAnalysis as mda
>>> import pytim
>>> from pytim.datafiles import ANTAGONISTIC_GRO
>>> u = mda.Universe(ANTAGONISTIC_GRO)
>>> g = u.atoms.select_atoms('resname bph4')
>>> # Define the interface
>>> inter = pytim.SASA( g, alpha=2.5, max_layers=2, cluster_cut=3.5, biggest_cluster_only=False, molecular=True)
>>> print(repr(inter.atoms))
<AtomGroup with 2025 atoms>
>>> inter = pytim.SASA( g, alpha=2.5, max_layers=2, cluster_cut=3.5, biggest_cluster_only=True, molecular=True)
>>> print(repr(inter.atoms))
<AtomGroup with 855 atoms>
>>> # mdtraj
>>> try:
... import mdtraj
... try:
... import numpy as np
... import MDAnalysis as mda
... import pytim
... from pytim.datafiles import WATER_GRO,WATER_XTC
... from pytim.datafiles import pytim_data,G43A1_TOP
... # MDAnalysis
... u = mda.Universe(WATER_GRO,WATER_XTC)
... ref = pytim.ITIM(u)
... # mdtraj
... t = mdtraj.load_xtc(WATER_XTC,top=WATER_GRO)
... # mdtraj manipulates the name of atoms, we need to set the
... # radii by hand
... _dict = { 'O':pytim_data.vdwradii(G43A1_TOP)['OW'],'H':0.0}
... inter = pytim.ITIM(t, radii_dict=_dict)
... ids_mda = []
... ids_mdtraj = []
... for ts in u.trajectory[0:2]:
... ids_mda.append(ref.atoms.ids)
... for ts in t[0:2]:
... ids_mdtraj.append(inter.atoms.ids)
... for fr in [0,1]:
... if not np.all(ids_mda[fr] == ids_mdtraj[fr]):
... print ("MDAnalysis and mdtraj surface atoms do not coincide")
... _a = u.trajectory[1] # we make sure we load the second frame
... _b = t[1]
... if not np.all(np.isclose(inter.atoms.positions[0], ref.atoms.positions[0])):
... print("MDAnalysis and mdtraj atomic positions do not coincide")
... except:
... raise RuntimeError("mdtraj available, but a general exception happened")
... except:
... pass
>>> # check that using the biggest_cluster_only option without setting cluster_cut
>>> # throws a warning and resets to biggest_cluster_only == False
>>> import MDAnalysis as mda
>>> import pytim
>>> from pytim.datafiles import GLUCOSE_PDB
>>>
>>> u = mda.Universe(GLUCOSE_PDB)
>>> solvent = u.select_atoms('name OW')
>>> inter = pytim.GITIM(u, group=solvent, biggest_cluster_only=True)
Warning: the option biggest_cluster_only has no effect without setting cluster_cut, ignoring it
>>> print (inter.biggest_cluster_only)
False
>>> import pytim
>>> import pytest
>>> import MDAnalysis as mda
>>> u = mda.Universe(pytim.datafiles.WATER_GRO)
>>>
>>> with pytest.raises(Exception):
... pytim.ITIM(u,alpha=-1.0)
>>> with pytest.raises(Exception):
... pytim.ITIM(u,alpha=1000000)
>>> pytim.ITIM(u,mesh=-1)
Traceback (most recent call last):
...
ValueError: parameter mesh must be positive
>>> # check that it is possible to use two trajectories
>>> import MDAnalysis as mda
>>> import pytim
>>> from pytim.datafiles import WATER_GRO, WATER_XTC
>>> u = mda.Universe(WATER_GRO,WATER_XTC)
>>> u2 = mda.Universe(WATER_GRO,WATER_XTC)
>>> inter = pytim.ITIM(u,group=u.select_atoms('resname SOL'))
>>> inter2 = pytim.ITIM(u2,group=u2.select_atoms('resname SOL'))
>>> for ts in u.trajectory[::50]:
... ts2 = u2.trajectory[ts.frame]
"""
pass
@staticmethod
def __():
"""
This is a collection of test to check
that the algorithms are behaving properly if
the interface is rotated in space.
>>> # TEST:1, ITIM+GITIM, flat interface
>>> import MDAnalysis as mda
>>> import pytim
>>> import numpy as np
>>> from pytim.datafiles import WATER_GRO
>>> pytim.ITIM.__() ; # coverage
>>>
>>> for method in [pytim.ITIM , pytim.GITIM] :
... u = mda.Universe(WATER_GRO)
... positions = np.copy(u.atoms.positions)
... oxygens = u.select_atoms('name OW')
... interface = method(u,group=oxygens,molecular=False,alpha=2.5,_noextrapoints=True)
... #interface.writepdb(method.__name__+'.ref.pdb') ; # debug
... ref_box = np.copy(u.dimensions)
... ref_ind = np.sort(np.copy(interface.atoms.indices))
... ref_pos = np.copy(interface.atoms.positions)
...
... u.atoms.positions = np.copy(np.roll(positions,1,axis=1))
... box = np.roll(ref_box[:3],1)
... ref_box[:3] = box
... u.dimensions = ref_box
... interface = method(u,group=oxygens,molecular=False,alpha=2.5,_noextrapoints=True)
... ind = np.sort(interface.atoms.indices)
... #interface.writepdb(method.__name__+'.pdb') ; # debug
... cond = (ref_ind == ind )
... if np.all(cond) == False:
... miss1 = (np.in1d(ref_ind,ind)==False).sum()
... miss2 = (np.in1d(ind,ref_ind)==False).sum()
... percent = (miss1 + miss2)*0.5/len(ref_ind) * 100.
... if percent > 2: # this should be 0 for ITIM, and < 5
... # for GITIM, with this config+alpha
... print (miss1+miss2)
... print ( " differences in indices in method",)
... print ( method.__name__, " == ",percent," %")
>>> del interface
>>> del u
>>> # TEST:2, GITIM, micelle
>>> import MDAnalysis as mda
>>> import pytim
>>> import numpy as np
>>> from pytim.datafiles import MICELLE_PDB
>>>
>>> for method in [pytim.GITIM] :
... u = mda.Universe(MICELLE_PDB)
... positions = np.copy(u.atoms.positions)
... DPC = u.select_atoms('resname DPC')
... interface = method(u,group=DPC,molecular=False,alpha=2.5,_noextrapoints=True)
... #interface.writepdb(method.__name__+'.ref.pdb') ; # debug
... ref_box = np.copy(u.dimensions)
... ref_ind = np.sort(np.copy(interface.atoms.indices))
... ref_pos = np.copy(interface.atoms.positions)
...
... u.atoms.positions = np.copy(np.roll(positions,1,axis=1))
... box = np.roll(ref_box[:3],1)
... ref_box[:3] = box
... u.dimensions = ref_box
... interface = method(u,group=DPC,molecular=False,alpha=2.5,_noextrapoints=True)
... ind = np.sort(interface.atoms.indices)
... #interface.writepdb(method.__name__+'.pdb') ; # debug
... cond = (ref_ind == ind )
... if np.all(cond) == False:
... miss1 = (np.in1d(ref_ind,ind)==False).sum()
... miss2 = (np.in1d(ind,ref_ind)==False).sum()
... percent = (miss1 + miss2)*0.5/len(ref_ind) * 100.
... if percent > 4 : # should be ~ 4 % for this system
... print (miss1+miss2)
... print ( " differences in indices in method",)
... print ( method.__name__, " == ",percent," %")
>>> del interface
>>> del u
"""
pass
#
| gpl-3.0 | -8,665,581,660,487,102,000 | 38.949568 | 120 | 0.541064 | false |
iNecas/katello | cli/src/katello/client/api/repo.py | 1 | 5198 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
from katello.client.api.base import KatelloAPI
from katello.client.core.utils import update_dict_unless_none
# pylint: disable=R0904
class RepoAPI(KatelloAPI):
"""
Connection class to access repositories
"""
def create(self, orgName, prod_id, name, url, gpgkey, nogpgkey):
repodata = {
"organization_id": orgName,
"product_id": prod_id,
"name": name,
"url": url}
update_dict_unless_none(repodata, "gpg_key_name", gpgkey)
if nogpgkey:
repodata["gpg_key_name"] = ""
path = "/api/repositories/"
return self.server.POST(path, repodata)[1]
def update(self, repo_id, gpgkey, nogpgkey):
repodata = {}
update_dict_unless_none(repodata, "gpg_key_name", gpgkey)
if nogpgkey:
repodata["gpg_key_name"] = ""
path = "/api/repositories/%s/" % repo_id
return self.server.PUT(path, {"repository": repodata })[1]
def repos_by_org_env(self, orgName, envId, includeDisabled=False):
data = {
"include_disabled": includeDisabled
}
path = "/api/organizations/%s/environments/%s/repositories" % (orgName, envId)
result_list = self.server.GET(path, data)[1]
return result_list
def repos_by_env_product(self, envId, productId, name=None, includeDisabled=False):
path = "/api/environments/%s/products/%s/repositories" % (envId, productId)
search_params = {
"include_disabled": includeDisabled
}
if name != None:
search_params['name'] = name
result_list = self.server.GET(path, search_params)[1]
return result_list
def repos_by_product(self, orgName, productId, includeDisabled=False):
path = "/api/organizations/%s/products/%s/repositories" % (orgName, productId)
data = {
"include_disabled": includeDisabled
}
result_list = self.server.GET(path, data)[1]
return result_list
def repo(self, repo_id):
path = "/api/repositories/%s/" % repo_id
data = self.server.GET(path)[1]
return data
def enable(self, repo_id, enable=True):
data = {"enable": enable}
path = "/api/repositories/%s/enable/" % repo_id
return self.server.POST(path, data)[1]
def delete(self, repoId):
path = "/api/repositories/%s/" % repoId
return self.server.DELETE(path)[1]
def sync(self, repo_id):
path = "/api/repositories/%s/sync" % repo_id
data = self.server.POST(path)[1]
return data
def cancel_sync(self, repo_id):
path = "/api/repositories/%s/sync" % repo_id
data = self.server.DELETE(path)[1]
return data
def last_sync_status(self, repo_id):
path = "/api/repositories/%s/sync" % repo_id
data = self.server.GET(path)[1]
return data
def repo_discovery(self, org_name, url, repotype):
discoverydata = {"url": url, "type": repotype}
path = "/api/organizations/%s/repositories/discovery" % org_name
return self.server.POST(path, discoverydata)[1]
def repo_discovery_status(self, discoveryTaskId):
path = "/api/repositories/discovery/%s" % discoveryTaskId
return self.server.GET(path)[1]
def packagegroups(self, repoid):
path = "/api/repositories/%s/package_groups" % repoid
return self.server.GET(path)[1]
def packagegroup_by_id(self, repoid, groupId):
path = "/api/repositories/%s/package_groups/" % repoid
groups = self.server.GET(path, {"group_id": groupId})[1]
if len(groups) == 0:
return None
else:
return groups[0]
def packagegroupcategories(self, repoid):
path = "/api/repositories/%s/package_group_categories/" % repoid
return self.server.GET(path)[1]
def packagegroupcategory_by_id(self, repoid, categoryId):
path = "/api/repositories/%s/package_group_categories/" % repoid
categories = self.server.GET(path, {"category_id": categoryId})[1]
if len(categories) == 0:
return None
else:
return categories[0]
def update_filters(self, repo_id, filters):
path = "/api/repositories/%s/filters" % repo_id
return self.server.PUT(path, {"filters": filters})[1]
def filters(self, repo_id, inherit=False):
path = "/api/repositories/%s/filters" % repo_id
return self.server.GET(path, {"inherit": inherit})[1]
| gpl-2.0 | 7,461,500,698,733,066,000 | 35.598592 | 87 | 0.617279 | false |
cyrillg/ros-playground | src/deedee_driver/tests/test_motors_driver.py | 1 | 2314 | #! /usr/bin/env python
PKG='deedee_driver'
import sys
import unittest
from mock import Mock
from deedee_driver.motors_driver_lib import *
class TestMotorsDriver(unittest.TestCase):
def setUp(self):
robot_name = "test_bot"
config = {"max_wheel_speed": 0.55}
serial_socket = Mock()
self.md = MotorsDriver(robot_name, config, serial_socket)
def test_speed_to_PWM_correct_range(self):
'''
'''
max_wheel_speed = self.md.max_wheel_speed
for i in range(-10,11):
ratio = i / 10.
expected_uniform_speed = ratio*100.
uniform_speed = self.md.map_speed(ratio*max_wheel_speed)
self.assertEqual(uniform_speed,
expected_uniform_speed,
"%s != %s" % (uniform_speed,
expected_uniform_speed))
def test_speed_to_PWM_over_range(self):
'''
'''
max_wheel_speed = self.md.max_wheel_speed
for i in range(10,101):
ratio = i/10.
expected_uniform_speed = 100.
uniform_speed = self.md.map_speed(ratio*max_wheel_speed)
self.assertEqual(uniform_speed,
expected_uniform_speed,
"%s != %s" % (uniform_speed,
expected_uniform_speed))
for i in range(-100,-9):
ratio = i/10.
expected_uniform_speed = -100.
uniform_speed = self.md.map_speed(ratio*max_wheel_speed)
self.assertEqual(uniform_speed,
expected_uniform_speed,
"%s != %s" % (uniform_speed,
expected_uniform_speed))
def test_build_packet(self):
'''
'''
speed_left = 23
speed_right = -100
expected_packet = "$0,23,-100\n"
packet = self.md.build_packet(speed_left, speed_right)
self.assertEqual(packet,
expected_packet,
"%s != %s" % (packet,
expected_packet))
if __name__ == '__main__':
import rosunit
rosunit.unitrun(PKG, 'test_MotorsDriver', TestMotorsDriver)
| mit | -295,165,355,079,123,500 | 31.138889 | 68 | 0.492653 | false |
openstack/networking-sfc | networking_sfc/db/migration/alembic_migrations/versions/newton/contract/06382790fb2c_fix_foreign_constraints.py | 1 | 1647 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""fix foreign constraints
Revision ID: 06382790fb2c
Create Date: 2016-08-11 14:45:34.416120
"""
from alembic import op
from sqlalchemy.engine import reflection
from neutron.db import migration
# revision identifiers, used by Alembic.
revision = '06382790fb2c'
down_revision = '010308b06b49'
def upgrade():
inspector = reflection.Inspector.from_engine(op.get_bind())
fks_to_cascade = {
'sfc_flow_classifier_l7_parameters': 'classifier_id',
'sfc_chain_group_associations': 'portchain_id',
'sfc_port_chain_parameters': 'chain_id',
'sfc_service_function_params': 'pair_id',
'sfc_chain_classifier_associations': 'portchain_id'
}
for table, column in fks_to_cascade.items():
fk_constraints = inspector.get_foreign_keys(table)
for fk in fk_constraints:
if column in fk['constrained_columns']:
fk['options']['ondelete'] = 'CASCADE'
migration.remove_foreign_keys(table, fk_constraints)
migration.create_foreign_keys(table, fk_constraints)
| apache-2.0 | -3,674,974,696,152,133,600 | 33.3125 | 78 | 0.686096 | false |
cwaldbieser/txcas | sample.py | 1 | 22229 | #! /usr/bin/env python
from __future__ import print_function
import argparse
import cgi
from textwrap import dedent
from urllib import urlencode
import sys
from txcas.constants import (
VIEW_LOGIN,
VIEW_LOGIN_SUCCESS,
VIEW_LOGOUT,
VIEW_INVALID_SERVICE,
VIEW_ERROR_5XX,
VIEW_NOT_FOUND)
from txcas.interface import (
IRealmFactory,
IServiceManagerFactory,
ITicketStoreFactory)
from txcas.server import escape_html
import txcas.settings
from klein import Klein
from twisted.python import log
from twisted.web import microdom
from twisted.web.client import getPage
def custom_login(ticket, service, failed, request):
service_lookup = {
'http://127.0.0.1:9801/landing': 'Cool App #1',
'http://127.0.0.1:9802/landing': 'Awesome App #2',
'http://127.0.0.1:9803/landing': 'Super Secure App #3',
'http://127.0.0.1:9804/landing': 'Just Another App #4',
}
top = dedent('''\
<!DOCTYPE html>
<html>
<body>
<h1>CAS Login - %(service_name)s</h1>
''')
failblurb = dedent('''\
<p>Login Failed. Try again.</p>
''')
formtop = dedent('''\
<form method="post" action="">
Username: <input type="text" name="username" />
<br />Password: <input type="password" name="password" />
<input type="hidden" name="lt" value="%(lt)s" />
''')
middle = ' <input type="hidden" name="service" value="%(service)s" />'
bottom = dedent('''\
<input type="submit" value="Sign in" />
</form>
</body>
</html>
''')
parts = [top]
if failed:
parts.append(failblurb)
parts.append(formtop)
if service != "":
parts.append(middle)
parts.append(bottom)
template = '\n'.join(parts)
return template % {
'lt': cgi.escape(ticket),
'service': cgi.escape(service),
'service_name': cgi.escape(service_lookup.get(service, "SSO Login"))
}
class MyApp(object):
app = Klein()
def __init__(
self, color, cas_root, allow_sso=True,
act_as_proxy=None, act_as_link_in_proxy_chain=None):
self.color = color
self.cas_root = cas_root
self.allow_sso = allow_sso
self.act_as_proxy = act_as_proxy
self._ious = {}
self.act_as_link_in_proxy_chain = act_as_link_in_proxy_chain
@app.route('/')
def index(self, request):
session = request.getSession()
me = request.URLPath().child('landing')
service = request.URLPath().path
if self.act_as_proxy is not None:
parts = ["""<li><a href="/pgtinfo">Click here to see your current PGT.</a>.</li>"""]
parts.append("""<li><a href="/proxy-a-service">This service will proxy another service.</a>.</li>""")
parts.append("""<li><a href="/proxy-a-service-mismatch">This service will proxy another, but the PT will be requested (erroneously) with a different URL than the service for which it is presented.</a>.</li>""")
parts.append("""<li><a href="/badproxyticket">Make a bad request for a proxy ticket.</a>.</li>""")
pgt_markup = '\n'.join(parts)
else:
pgt_markup = ""
return '''<html>
<body style="background: %(color)s">
Welcome to the app.
<br />You are logged in as: %(user)s
<ul>
<li><a href="%(cas_root)s/login?service=%(service)s">Click here to login</a>.</li>
<li><a href="%(cas_root)s/login?service=%(service)s&renew=true">Click here to login, forcing the login page.</a>.</li>
<li><a href="%(cas_root)s/login?service=%(service)s&gateway=true">Click here to login, using SSO or failing.</a>.</li>
<li><a href="%(cas_root)s/login">Click here to login to an SSO session (no service)</a>.</li>
%(pgt_markup)s
<li><a href="%(cas_root)s/logout?service=%(logout_service)s">Click here to logout of your SSO session.</a>.</li>
</ul>
</body>
</html>''' % {
'cas_root': self.cas_root,
'service': str(request.URLPath().child('landing')),
'logout_service': request.URLPath().here(),
'user': getattr(request.getSession(), 'username', '(nobody)'),
'color': self.color,
'pgt_markup': pgt_markup,
}
@app.route('/landing', methods=['GET'])
def landing_GET(self, request):
try:
ticket = request.args['ticket'][0]
except (KeyError, IndexError):
return 'Invalid login attempt'
if not ticket:
return 'Invalid login attempt'
url = self.cas_root + '/serviceValidate'
q = {
'service': str(request.URLPath()),
'ticket': ticket,
}
if not self.allow_sso:
q['renew'] = True
if self.act_as_proxy is not None:
if request.isSecure():
scheme = "https://"
else:
scheme = "http://"
host = request.getHost()
netloc = "%s:%d" % (host.host, host.port)
q['pgtUrl'] = scheme + netloc + '/proxycb'
params = urlencode(q)
url += '?' + params
d = getPage(url)
def gotResponse(response):
log.msg(response)
doc = microdom.parseString(response)
elms = doc.getElementsByTagName("cas:authenticationSuccess")
valid = False
pgt = None
if len(elms) > 0:
valid = True
elms = doc.getElementsByTagName("cas:user")
if len(elms) > 0:
elm = elms[0]
username = elm.childNodes[0].value
elms = doc.getElementsByTagName("cas:proxyGrantingTicket")
if len(elms) > 0:
elm = elms[0]
iou = elm.childNodes[0].value
pgt = None
if iou in self._ious:
pgt = self._ious[iou]
del self._ious[iou]
else:
log.msg("[WARNING] Could not corrolate PGTIOU '%s'." % iou)
if not valid:
raise Exception('Invalid login')
session = request.getSession()
session.username = username
if pgt is not None:
session.pgt = pgt
log.msg("PGT added to session '%s'." % pgt)
request.redirect(request.URLPath().sibling('').path)
def eb(err):
log.err(err)
return 'Invalid login attempt'
return d.addCallback(gotResponse).addErrback(eb)
@app.route('/landing', methods=['POST'])
def landing_POST(self, request):
doc = microdom.parseString(request.content.read())
elms = doc.getElementsByTagName("samlp:SessionIndex")
if len(elms) > 0:
elm = elms[0]
st = elm.childNodes[0].value
log.msg("Received POST SLO with Session Index '%s'." % st)
return "ACK"
@app.route('/proxycb', methods=['GET'])
def proxycb_GET(self, request):
pgtId = request.args.get('pgtId', [None])[0]
pgtIou = request.args.get('pgtIou', [None])[0]
if (pgtId is not None) and (pgtIou is not None):
self._ious[pgtIou] = pgtId
return "OK"
@app.route('/pgtinfo', methods=['GET'])
def pgtinfo_GET(self, request):
session = request.getSession()
if hasattr(session, 'pgt'):
return "PGT == %s" % escape_html(session.pgt)
else:
return "No PGT"
@app.route('/proxy-a-service', methods=['GET'])
def proxy_a_service_GET(self, request):
return self._proxy_a_service(request)
@app.route('/proxy-a-service-mismatch', methods=['GET'])
def proxy_a_service_mismatch_GET(self, request):
return self._proxy_a_service(request, service_mismatch="http://fail.example.com/")
def _proxy_a_service(self, request, service_mismatch=None):
act_as_proxy = self.act_as_proxy
proxied_service = act_as_proxy['service']
request_service_endpoint = act_as_proxy['request_service_endpoint']
if proxied_service is None:
return dedent("""\
<html>
<head><title>Not Configured to Proxy a Service</title></head>
<body style="background: %(color)s">
<h1>Not Configured to Proxy a Service</h1>
<p>
This service is not configured to proxy a service.
</p>
<p>
<a href="/">Back</a>
</p>
</body>
</html>
""") % {'color': self.color}
session = request.getSession()
if hasattr(session, 'pgt'):
def parsePT(result):
log.msg(result)
doc = microdom.parseString(result)
elms = doc.getElementsByTagName("cas:proxySuccess")
if len(elms) == 0:
raise Exception("Error parsing PT")
elms = doc.getElementsByTagName("cas:proxyTicket")
if len(elms) == 0:
raise Exception("Error parsing PT")
elm = elms[0]
pt = elm.childNodes[0].value
return pt
def requestService(pt, proxied_service, request_service_endpoint):
q = {
'ticket': pt,
}
url = request_service_endpoint + '?' + urlencode(q)
d = getPage(url)
return d
def printResult(result):
return dedent("""\
<html>
<head><title>Proxy a Service</title></head>
<body style="background: %(color)s">
<h1>Proxy a Service</h1>
<p>
Proxying service at: %(proxied_service)s
</p>
<pre>
%(result)s
</pre>
<p>
<a href="/">Back</a>
</p>
</body>
</html>
""") % {
'color': self.color,
'result': escape_html(result),
'proxied_service': escape_html(proxied_service)}
def printError(err):
log.err(err)
return dedent("""\
<html>
<head><title>Proxy a Service - Error</title></head>
<body style="background: %(color)s">
<h1>Proxy a Service - Error</h1>
<p>
Errors occured while proxying service at: %(proxied_service)s
</p>
<pre>
%(result)s
</pre>
<p>
<a href="/">Back</a>
</p>
</body>
</html>
""") % {
'color': self.color,
'result': escape_html(str(err.getErrorMessage())),
'proxied_service': escape_html(proxied_service)}
url = self.cas_root + '/proxy'
q = {
'targetService': service_mismatch or proxied_service,
'pgt': session.pgt,
}
url += '?' + urlencode(q)
d = getPage(url)
d.addCallback(parsePT)
d.addCallback(
requestService,
proxied_service,
request_service_endpoint)
d.addCallback(printResult)
d.addErrback(printError)
return d
else:
return dedent("""\
<html>
<head><title>No PGT</title></head>
<body style="background: %(color)s">
<h1>No PGT</h1>
<p>
<a href="/">Back</a>
</p>
</body>
</html>
""") % {
'color': self.color,}
@app.route('/badproxyticket', methods=['GET'])
def badproxyticket_GET(self, request):
pgt = 'PGT-bogus'
def printResult(result):
return dedent("""\
<html>
<head><title>/proxy Result</title></head>
<body style="background: %(color)s">
<h1>/proxy Result</h1>
<pre>
%(result)s
</pre>
<p>
<a href="/">Back</a>
</p>
</body>
</html>
""") % {
'color': self.color,
'result': escape_html(result)}
url = self.cas_root + '/proxy'
q = {
'targetService': 'foo',
'pgt': pgt,
}
url += '?' + urlencode(q)
d = getPage(url)
d.addCallback(printResult)
return d
@app.route('/acceptproxyticket', methods=['GET'])
def acceptproxyticket_GET(self, request):
act_as_link_in_proxy_chain = self.act_as_link_in_proxy_chain
if act_as_link_in_proxy_chain is not None:
proxied_service = act_as_link_in_proxy_chain['service']
request_service_endpoint = act_as_link_in_proxy_chain['request_service_endpoint']
try:
ticket = request.args['ticket'][0]
except (KeyError, IndexError):
request.setResponseCode(400)
return 'Bad request'
if not ticket:
request.setResponseCode(400)
return 'Bad request'
url = self.cas_root + '/proxyValidate'
q = {
'service': str(request.URLPath().sibling("landing")),
'ticket': ticket,
}
if act_as_link_in_proxy_chain is not None:
q['pgtUrl'] = str(request.URLPath().sibling("proxycb"))
params = urlencode(q)
url += '?' + params
def requestPT(result, proxied_service):
doc = microdom.parseString(result)
elms = doc.getElementsByTagName("cas:authenticationSuccess")
valid = False
pgt = None
if len(elms) == 0:
log.msg("[WARNING] CAS authentication failed. Result was:\n%s" % str(result))
raise Exception("CAS authentication failed.")
elms = doc.getElementsByTagName("cas:proxyGrantingTicket")
if len(elms) == 0:
log.msg("[WARNING] No PGT IOU was supplied. Result was:\n%s" % str(result))
raise Exception("No PGT IOU was supplied.")
elm = elms[0]
iou = elm.childNodes[0].value
pgt = None
if iou in self._ious:
pgt = self._ious[iou]
del self._ious[iou]
else:
log.msg("[WARNING] Could not corrolate PGTIOU '%s'." % iou)
raise Exception("Could not corrolate PGTIOU.")
# Request the PT.
url = self.cas_root + '/proxy'
q = {
'targetService': proxied_service,
'pgt': pgt,
}
url += '?' + urlencode(q)
d = getPage(url)
return d
def proxyService(result, request_service_endpoint, proxied_service):
#Parse the PT.
doc = microdom.parseString(result)
elms = doc.getElementsByTagName("cas:proxySuccess")
if len(elms) == 0:
raise Exception("Error parsing PT")
elms = doc.getElementsByTagName("cas:proxyTicket")
if len(elms) == 0:
raise Exception("Error parsing PT")
elm = elms[0]
pt = elm.childNodes[0].value
# Make the request
q = {
'service': proxied_service,
'ticket': pt,
}
url = request_service_endpoint + '?' + urlencode(q)
d = getPage(url)
return d
d = getPage(url)
if act_as_link_in_proxy_chain is not None:
d.addCallback(requestPT, proxied_service)
d.addCallback(proxyService, request_service_endpoint, proxied_service)
return d
def main(args):
run_cas_server = not args.no_cas
cas_base_url = args.cas_base_url
if run_cas_server:
# server
from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
from twisted.cred.strcred import ICheckerFactory
from txcas.server import ServerApp
page_views = {VIEW_LOGIN: custom_login}
# Load the config.
scp = txcas.settings.load_settings('cas', syspath='/etc/cas', defaults={
'PLUGINS': {
'cred_checker': 'DemoChecker',
'realm': 'demo_realm',
'ticket_store': 'memory_ticket_store',}})
# Choose plugin that implements IServiceManager.
service_manager = None
if scp.has_option('PLUGINS', 'service_manager'):
tag_args = scp.get('PLUGINS', 'service_manager')
parts = tag_args.split(':')
tag = parts[0]
args = ':'.join(parts[1:])
factory = txcas.settings.get_plugin_factory(tag, IServiceManagerFactory)
if factory is None:
sys.stderr.write("[ERROR] Service manager type '%s' is not available.\n" % tag)
sys.exit(1)
service_manager = factory.generateServiceManager(args)
# Choose plugin that implements ITicketStore.
tag_args = scp.get('PLUGINS', 'ticket_store')
parts = tag_args.split(':')
tag = parts[0]
args = ':'.join(parts[1:])
factory = txcas.settings.get_plugin_factory(tag, ITicketStoreFactory)
if factory is None:
sys.stderr.write("[ERROR] Ticket store type '%s' is not available.\n" % tag)
sys.exit(1)
ticket_store = factory.generateTicketStore(args)
if service_manager is not None:
ticket_store.service_manager = service_manager
# Choose the plugin that implements IRealm.
tag_args = scp.get('PLUGINS', 'realm')
parts = tag_args.split(':')
tag = parts[0]
args = ':'.join(parts[1:])
factory = txcas.settings.get_plugin_factory(tag, IRealmFactory)
if factory is None:
sys.stderr.write("[ERROR] Realm type '%s' is not available.\n" % tag)
sys.exit(1)
realm = factory.generateRealm(args)
assert realm is not None, "User Realm has not been configured!"
# Choose plugin(s) that implement ICredentialChecker
try:
tag_args = scp.get('PLUGINS', 'cred_checker')
except Exception:
print("[ERROR] No valid credential checker was configured.")
sys.exit(1)
parts = tag_args.split(':')
tag = parts[0]
args = ':'.join(parts[1:])
factories = txcas.settings.get_plugins_by_predicate(
ICheckerFactory,
lambda x: x.authType == tag)
if len(factories) == 0:
checkers= [InMemoryUsernamePasswordDatabaseDontUse(foo='password')]
else:
checkers=[f.generateChecker(args) for f in factories]
# Service validation func.
if service_manager is None:
validService = lambda x:True
else:
validService = service_manager.isValidService
#Create the CAS server app.
server_app = ServerApp(
ticket_store,
realm,
checkers,
validService,
requireSSL=False,
page_views=page_views,
validate_pgturl=False)
# combines server/app
from twisted.web.resource import Resource
from twisted.web.server import Site
from twisted.internet import reactor
from twisted.python import log
import sys
log.startLogging(sys.stdout)
if run_cas_server:
# cas server
reactor.listenTCP(9800, Site(server_app.app.resource()))
# app 1
app1 = MyApp(
'#acf', cas_base_url,
act_as_link_in_proxy_chain={
'request_service_endpoint': 'http://127.0.0.1:9804/acceptproxyticket',
'service': 'http://127.0.0.1:9804/landing',})
reactor.listenTCP(9801, Site(app1.app.resource()))
# app 2
app2 = MyApp(
'#cfc', cas_base_url,
act_as_proxy={
'request_service_endpoint': 'http://127.0.0.1:9801/acceptproxyticket',
'service': 'http://127.0.0.1:9801/landing'})
reactor.listenTCP(9802, Site(app2.app.resource()))
# app 3
app3 = MyApp('#abc', cas_base_url, allow_sso=False)
reactor.listenTCP(9803, Site(app3.app.resource()))
# app 4
app4 = MyApp('#9932CC', cas_base_url)
reactor.listenTCP(9804, Site(app4.app.resource()))
reactor.run()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--no-cas",
action='store_true',
help='Only run the service providers and *not* the CAS service.')
parser.add_argument(
"--cas-base-url",
action="store",
default='http://127.0.0.1:9800',
help="The base CAS service URL (default http://127.0.0.1:9800).")
args = parser.parse_args()
main(args)
| gpl-3.0 | 1,190,900,562,462,385,700 | 37.861888 | 222 | 0.500157 | false |
nathanielvarona/airflow | tests/providers/apache/pinot/hooks/test_pinot.py | 1 | 9827 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import io
import os
import subprocess
import unittest
from unittest import mock
import pytest
from airflow.exceptions import AirflowException
from airflow.providers.apache.pinot.hooks.pinot import PinotAdminHook, PinotDbApiHook
class TestPinotAdminHook(unittest.TestCase):
def setUp(self):
super().setUp()
self.conn = conn = mock.MagicMock()
self.conn.host = 'host'
self.conn.port = '1000'
self.conn.extra_dejson = {'cmd_path': './pinot-admin.sh'}
class PinotAdminHookTest(PinotAdminHook):
def get_connection(self, conn_id):
return conn
self.db_hook = PinotAdminHookTest()
@mock.patch('airflow.providers.apache.pinot.hooks.pinot.PinotAdminHook.run_cli')
def test_add_schema(self, mock_run_cli):
params = ["schema_file", False]
self.db_hook.add_schema(*params)
mock_run_cli.assert_called_once_with(
[
'AddSchema',
'-controllerHost',
self.conn.host,
'-controllerPort',
self.conn.port,
'-schemaFile',
params[0],
]
)
@mock.patch('airflow.providers.apache.pinot.hooks.pinot.PinotAdminHook.run_cli')
def test_add_table(self, mock_run_cli):
params = ["config_file", False]
self.db_hook.add_table(*params)
mock_run_cli.assert_called_once_with(
[
'AddTable',
'-controllerHost',
self.conn.host,
'-controllerPort',
self.conn.port,
'-filePath',
params[0],
]
)
@mock.patch('airflow.providers.apache.pinot.hooks.pinot.PinotAdminHook.run_cli')
def test_create_segment(self, mock_run_cli):
params = {
"generator_config_file": "a",
"data_dir": "b",
"segment_format": "c",
"out_dir": "d",
"overwrite": True,
"table_name": "e",
"segment_name": "f",
"time_column_name": "g",
"schema_file": "h",
"reader_config_file": "i",
"enable_star_tree_index": False,
"star_tree_index_spec_file": "j",
"hll_size": 9,
"hll_columns": "k",
"hll_suffix": "l",
"num_threads": 8,
"post_creation_verification": True,
"retry": 7,
}
self.db_hook.create_segment(**params)
mock_run_cli.assert_called_once_with(
[
'CreateSegment',
'-generatorConfigFile',
params["generator_config_file"],
'-dataDir',
params["data_dir"],
'-format',
params["segment_format"],
'-outDir',
params["out_dir"],
'-overwrite',
params["overwrite"],
'-tableName',
params["table_name"],
'-segmentName',
params["segment_name"],
'-timeColumnName',
params["time_column_name"],
'-schemaFile',
params["schema_file"],
'-readerConfigFile',
params["reader_config_file"],
'-starTreeIndexSpecFile',
params["star_tree_index_spec_file"],
'-hllSize',
params["hll_size"],
'-hllColumns',
params["hll_columns"],
'-hllSuffix',
params["hll_suffix"],
'-numThreads',
params["num_threads"],
'-postCreationVerification',
params["post_creation_verification"],
'-retry',
params["retry"],
]
)
@mock.patch('airflow.providers.apache.pinot.hooks.pinot.PinotAdminHook.run_cli')
def test_upload_segment(self, mock_run_cli):
params = ["segment_dir", False]
self.db_hook.upload_segment(*params)
mock_run_cli.assert_called_once_with(
[
'UploadSegment',
'-controllerHost',
self.conn.host,
'-controllerPort',
self.conn.port,
'-segmentDir',
params[0],
]
)
@mock.patch('subprocess.Popen')
def test_run_cli_success(self, mock_popen):
mock_proc = mock.MagicMock()
mock_proc.returncode = 0
mock_proc.stdout = io.BytesIO(b'')
mock_popen.return_value.__enter__.return_value = mock_proc
params = ["foo", "bar", "baz"]
self.db_hook.run_cli(params)
params.insert(0, self.conn.extra_dejson.get('cmd_path'))
mock_popen.assert_called_once_with(
params, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, close_fds=True, env=None
)
@mock.patch('subprocess.Popen')
def test_run_cli_failure_error_message(self, mock_popen):
msg = b"Exception caught"
mock_proc = mock.MagicMock()
mock_proc.returncode = 0
mock_proc.stdout = io.BytesIO(msg)
mock_popen.return_value.__enter__.return_value = mock_proc
params = ["foo", "bar", "baz"]
with pytest.raises(AirflowException):
self.db_hook.run_cli(params)
params.insert(0, self.conn.extra_dejson.get('cmd_path'))
mock_popen.assert_called_once_with(
params, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, close_fds=True, env=None
)
@mock.patch('subprocess.Popen')
def test_run_cli_failure_status_code(self, mock_popen):
mock_proc = mock.MagicMock()
mock_proc.returncode = 1
mock_proc.stdout = io.BytesIO(b'')
mock_popen.return_value.__enter__.return_value = mock_proc
self.db_hook.pinot_admin_system_exit = True
params = ["foo", "bar", "baz"]
with pytest.raises(AirflowException):
self.db_hook.run_cli(params)
params.insert(0, self.conn.extra_dejson.get('cmd_path'))
env = os.environ.copy()
env.update({"JAVA_OPTS": "-Dpinot.admin.system.exit=true "})
mock_popen.assert_called_once_with(
params, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, close_fds=True, env=env
)
class TestPinotDbApiHook(unittest.TestCase):
def setUp(self):
super().setUp()
self.conn = conn = mock.MagicMock()
self.conn.host = 'host'
self.conn.port = '1000'
self.conn.conn_type = 'http'
self.conn.extra_dejson = {'endpoint': 'query/sql'}
self.cur = mock.MagicMock(rowcount=0)
self.conn.cursor.return_value = self.cur
self.conn.__enter__.return_value = self.cur
self.conn.__exit__.return_value = None
class TestPinotDBApiHook(PinotDbApiHook):
def get_conn(self):
return conn
def get_connection(self, conn_id):
return conn
self.db_hook = TestPinotDBApiHook
def test_get_uri(self):
"""
Test on getting a pinot connection uri
"""
db_hook = self.db_hook()
assert db_hook.get_uri() == 'http://host:1000/query/sql'
def test_get_conn(self):
"""
Test on getting a pinot connection
"""
conn = self.db_hook().get_conn()
assert conn.host == 'host'
assert conn.port == '1000'
assert conn.conn_type == 'http'
assert conn.extra_dejson.get('endpoint') == 'query/sql'
def test_get_records(self):
statement = 'SQL'
result_sets = [('row1',), ('row2',)]
self.cur.fetchall.return_value = result_sets
assert result_sets == self.db_hook().get_records(statement)
def test_get_first(self):
statement = 'SQL'
result_sets = [('row1',), ('row2',)]
self.cur.fetchone.return_value = result_sets[0]
assert result_sets[0] == self.db_hook().get_first(statement)
def test_get_pandas_df(self):
statement = 'SQL'
column = 'col'
result_sets = [('row1',), ('row2',)]
self.cur.description = [(column,)]
self.cur.fetchall.return_value = result_sets
df = self.db_hook().get_pandas_df(statement)
assert column == df.columns[0]
for i in range(len(result_sets)): # pylint: disable=consider-using-enumerate
assert result_sets[i][0] == df.values.tolist()[i][0]
class TestPinotDbApiHookIntegration(unittest.TestCase):
@pytest.mark.integration("pinot")
@mock.patch.dict('os.environ', AIRFLOW_CONN_PINOT_BROKER_DEFAULT="pinot://pinot:8000/")
def test_should_return_records(self):
hook = PinotDbApiHook()
sql = "select playerName from baseballStats ORDER BY playerName limit 5"
records = hook.get_records(sql)
assert [["A. Harry"], ["A. Harry"], ["Aaron"], ["Aaron Albert"], ["Aaron Albert"]] == records
| apache-2.0 | -1,238,631,437,813,165,000 | 34.476534 | 101 | 0.559377 | false |
jbarlow83/OCRmyPDF | tests/test_stdio.py | 1 | 3399 | # © 2019 James R. Barlow: github.com/jbarlow83
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import sys
from pathlib import Path
from subprocess import DEVNULL, PIPE, Popen, run
import pytest
from ocrmypdf.exceptions import ExitCode
from ocrmypdf.helpers import check_pdf
from .conftest import run_ocrmypdf
def test_stdin(ocrmypdf_exec, resources, outpdf):
input_file = str(resources / 'francais.pdf')
output_file = str(outpdf)
# Runs: ocrmypdf - output.pdf < testfile.pdf
with open(input_file, 'rb') as input_stream:
p_args = ocrmypdf_exec + [
'-',
output_file,
'--plugin',
'tests/plugins/tesseract_noop.py',
]
run(p_args, stdout=PIPE, stderr=PIPE, stdin=input_stream, check=True)
def test_stdout(ocrmypdf_exec, resources, outpdf):
if 'COV_CORE_DATAFILE' in os.environ:
pytest.skip(msg="Coverage uses stdout")
input_file = str(resources / 'francais.pdf')
output_file = str(outpdf)
# Runs: ocrmypdf francais.pdf - > test_stdout.pdf
with open(output_file, 'wb') as output_stream:
p_args = ocrmypdf_exec + [
input_file,
'-',
'--plugin',
'tests/plugins/tesseract_noop.py',
]
run(p_args, stdout=output_stream, stderr=PIPE, stdin=DEVNULL, check=True)
assert check_pdf(output_file)
@pytest.mark.skipif(
sys.version_info[0:3] >= (3, 6, 4), reason="issue fixed in Python 3.6.4"
)
@pytest.mark.skipif(os.name == 'nt', reason="POSIX problem")
def test_closed_streams(ocrmypdf_exec, resources, outpdf):
input_file = str(resources / 'francais.pdf')
output_file = str(outpdf)
def evil_closer():
os.close(0)
os.close(1)
p_args = ocrmypdf_exec + [
input_file,
output_file,
'--plugin',
'tests/plugins/tesseract_noop.py',
]
p = Popen( # pylint: disable=subprocess-popen-preexec-fn
p_args,
close_fds=True,
stdout=None,
stderr=PIPE,
stdin=None,
preexec_fn=evil_closer,
)
_out, err = p.communicate()
print(err.decode())
assert p.returncode == ExitCode.ok
@pytest.mark.skipif(sys.version_info >= (3, 7, 0), reason='better utf-8')
@pytest.mark.skipif(
Path('/etc/alpine-release').exists(), reason="invalid test on alpine"
)
@pytest.mark.skipif(os.name == 'nt', reason="invalid test on Windows")
def test_bad_locale(monkeypatch):
monkeypatch.setenv('LC_ALL', 'C')
p, out, err = run_ocrmypdf('a', 'b')
assert out == '', "stdout not clean"
assert p.returncode != 0
assert 'configured to use ASCII as encoding' in err, "should whine"
@pytest.mark.xfail(
os.name == 'nt' and sys.version_info < (3, 8),
reason="Windows does not like this; not sure how to fix",
)
def test_dev_null(resources):
if 'COV_CORE_DATAFILE' in os.environ:
pytest.skip(msg="Coverage uses stdout")
p, out, _err = run_ocrmypdf(
resources / 'trivial.pdf',
os.devnull,
'--force-ocr',
'--plugin',
'tests/plugins/tesseract_noop.py',
)
assert p.returncode == 0, "could not send output to /dev/null"
assert len(out) == 0, "wrote to stdout"
| gpl-3.0 | -968,572,428,516,915,500 | 28.293103 | 81 | 0.622131 | false |
ryandub/simpl | simpl/rest.py | 1 | 13761 | # Copyright (c) 2011-2015 Rackspace US, Inc.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""REST-ful API Utilites."""
import functools
import itertools
import json
import logging
import bottle
try:
import yaml
except ImportError:
yaml = None
LOG = logging.getLogger(__name__)
MAX_PAGE_SIZE = 10000000
STANDARD_QUERY_PARAMS = ('offset', 'limit', 'sort', 'q', 'facets')
class HTTPError(Exception):
"""Include HTTP Code, description and reason in exception."""
def __init__(self, message, http_code=400, reason=None):
"""Initialize normal error, but save http code and reason."""
super(HTTPError, self).__init__(message)
self.http_code = http_code
self.reason = reason
def body(schema=None, types=None, required=False, default=None):
"""Decorator to parse and validate API body.
:keyword schema: callable that accepts raw data and returns the coerced (or
unchanged) data if it is valid. It should raise an error if the data is
not valid.
:keyword types: supported content types (default is ['application/json'])
:keyword required: if true and no body specified will raise an error.
:keyword default: default value to return if no body supplied
Note: only json types are supported.
"""
if not types:
types = ['application/json']
if not all('json' in t for t in types):
raise NotImplementedError("Only 'json' body supported.")
def wrap(fxn):
"""Return a decorated callable."""
def wrapped(*args, **kwargs):
"""Callable to called when the decorated function is called."""
data = bottle.request.json
if required and not data:
bottle.abort(400, "Call body cannot be empty")
if data is None:
data = default
if schema:
try:
data = schema(data)
except (KeyboardInterrupt, SystemExit):
raise # don't catch and ignore attempts to end the app
except Exception as exc:
bottle.abort(400, str(exc))
return fxn(data, *args, **kwargs)
return wrapped
return wrap
def paginated(resource_name=None):
"""Decorator that handles pagination headers, params, and links.
This accepts, parses, validates, and handles `limit` and `offset` optional
query params according to common Rackspace APIs and passes them as kwargs
to the decorated function.
offset: The pagination offset.
limit: The pagination limit (or page size). The response is paginated
with a default limit of 100 and a maximum limit of 1000.
Headers returned include:
Content-Range: ... per HTTP RFC and 206 response
Link: ... one for each page (first, last, next, previous)
It adds Content-Range headers to the response according to RFC 7233
section 4.2 - https://tools.ietf.org/html/rfc7233#section-4.2. The resource
name is supplied in the decorator call as resource_name or deduced from
uripath if not supplied.
It adds link headers based on RFC 5988 (by ex-Racker Mark Nottingham) -
https://tools.ietf.org/html/rfc5988#section-5.5.
Future changes to the RFCs or common API concepts, such as adding links to
the response body, may be implemented here to consistently handle
pagination across all projects using simpl pagination for their APIs.
Opinionated assumptions:
- body has data items under a `data` or `results` key.
- the `len()` of that data represents the number of records being returned
in this page.
- body has a `collection-count` value with the total number of records in
the underlying collection.
- bottle is being used and this is decorating a route function.
Responses:
- paginated responses are returned as `206 Partial Content` unless all the
data fits in one page, inwhich case the response is `200 OK`.
Example response for **GET** `/widgets[&limit=2&offset=3]`
> HTTP/1.0 206 Partial Content
> Content-Range: widget 3-4/6
> Content-Length: 217
> Content-Type: application/json
> Link: </widgets?limit=2&offset=1>; rel="previous"; title="Previous page"
> Link: </widgets?limit=2>; rel="first"; title="First page"
> Link: </widgets?offset=4>; rel="last"; title="Last page"
```json
{
"collection-count": 6,
"data": [
{
"id": 1,
"name": "foo"
},
{
"id": 2,
"name": "bar"
}
]
}
```
"""
def _paginated(fxn):
"""Add pagination (optional) and headers to response."""
def _decorator(*args, **kwargs):
"""Internal function wrapped as a decorator."""
try:
validate_range_values(bottle.request, 'offset', kwargs)
validate_range_values(bottle.request, 'limit', kwargs)
except ValueError:
bottle.response.status = 416
bottle.response.set_header(
'Content-Range', '%s */*' %
resource_name or bottle.request.path.split('/')[-1])
return
data = fxn(*args, **kwargs)
write_pagination_headers(
data,
int(kwargs.get('offset') or 0),
int(kwargs.get('limit') or 100),
bottle.response,
bottle.request.path,
resource_name)
return data
return functools.wraps(fxn)(_decorator)
return _paginated
def validate_range_values(request, label, kwargs):
"""Ensure value contained in label is a positive integer."""
value = kwargs.get(label, request.query.get(label))
if value:
kwargs[label] = int(value)
if kwargs[label] < 0 or kwargs[label] > MAX_PAGE_SIZE:
raise ValueError
def write_pagination_headers(data, offset, limit, response, uripath,
resource_name):
"""Add pagination headers to the bottle response.
See docs in :func:`paginated`.
"""
items = data.get('results') or data.get('data') or {}
count = len(items)
try:
total = int(data['collection-count'])
except (ValueError, TypeError, KeyError):
total = None
if total is None and offset == 0 and (limit is None or limit > len(items)):
total = count
# Set 'content-range' header
response.set_header(
'Content-Range',
"%s %d-%d/%s" % (resource_name, offset, offset + max(count - 1, 0),
total if total is not None else '*')
)
partial = False
if offset:
partial = True # Any offset automatically means we've skipped data
elif total is None and count == limit:
# Unknown total, but first page is full (so there may be more)
partial = True # maybe more records in next page
elif total > count:
# Known total and not all records returned
partial = True
if partial:
uripath = uripath.strip('/')
response.status = 206 # Partial
# Add Next page link to http header
if total is None or (offset + limit) < total - 1:
nextfmt = (
'</%s?limit=%d&offset=%d>; rel="next"; title="Next page"')
response.add_header(
"Link", nextfmt % (uripath, limit, offset + limit)
)
# Add Previous page link to http header
if offset > 0 and (offset - limit) >= 0:
prevfmt = ('</%s?limit=%d&offset=%d>; rel="previous"; '
'title="Previous page"')
response.add_header(
"Link", prevfmt % (uripath, limit, offset - limit)
)
# Add first page link to http header
if offset > 0:
firstfmt = '</%s?limit=%d>; rel="first"; title="First page"'
response.add_header(
"Link", firstfmt % (uripath, limit))
# Add last page link to http header
if (total is not None and # can't calculate last page if unknown total
limit and # if no limit, then any page is the last page!
limit < total):
lastfmt = '</%s?offset=%d>; rel="last"; title="Last page"'
if limit and total % limit:
last_offset = total - (total % limit)
else:
last_offset = total - limit
response.add_header(
"Link", lastfmt % (uripath, last_offset))
def process_params(request, standard_params=STANDARD_QUERY_PARAMS,
filter_fields=None, defaults=None):
"""Parse query params.
Parses, validates, and converts query into a consistent format.
:keyword request: the bottle request
:keyword standard_params: query params that are present in most of our
(opinionated) APIs (ex. limit, offset, sort, q, and facets)
:keyword filter_fields: list of field names to allow filtering on
:keyword defaults: dict of params and their default values
:retuns: dict of query params with supplied values (string or list)
"""
if not filter_fields:
filter_fields = []
unfilterable = (set(request.query.keys()) - set(filter_fields) -
set(standard_params))
if unfilterable:
bottle.abort(400,
"The following query params were invalid: %s. "
"Try one (or more) of %s." %
(", ".join(unfilterable),
", ".join(filter_fields)))
query_fields = defaults or {}
for key in request.query:
if key in filter_fields:
# turns ?netloc=this.com&netloc=that.com,what.net into
# {'netloc': ['this.com', 'that.com', 'what.net']}
matches = request.query.getall(key)
matches = list(itertools.chain(*(k.split(',') for k in matches)))
if len(matches) > 1:
query_fields[key] = matches
else:
query_fields[key] = matches[0]
if 'sort' in request.query:
sort = request.query.getall('sort')
sort = list(itertools.chain(*(
comma_separated_strings(str(k)) for k in sort)))
query_fields['sort'] = sort
if 'q' in request.query:
search = request.query.getall('q')
search = list(itertools.chain(*(
comma_separated_strings(k) for k in search
if k)))
query_fields['q'] = search
return query_fields
def comma_separated_strings(value):
"""Parse comma-separated string into list."""
return [str(k).strip() for k in value.split(",")]
def error_formatter(error):
"""Bottle error formatter.
This will take caught errors and output them in our opinionated format and
the requested media-type. We default to json if we don't recognize or
support the content.
The content format is:
error: - this is the wrapper for the returned error object
code: - the HTTP error code (ex. 404)
message: - the HTTP error code message (ex. Not Found)
description: - the plain english, user-friendly description. Use
this to to surface a UI/CLI. non-technical message
reason: - (optional) any additional technical information to
help a technical user with troubleshooting
Usage as a default handler:
import bottle
from simple import rest
app = bottle.default_app()
app.default_error_handler = rest.error_formatter
# Meanwhile, elsewhere in a module nearby
raise rest.HTTPError("Ouch!", http_code=500, reason="Lapse of reason")
"""
output = {}
accept = bottle.request.get_header("Accept") or ""
if "application/x-yaml" in accept:
error.headers.update({"content-type": "application/x-yaml"})
writer = functools.partial(yaml.safe_dump, default_flow_style=False)
else: # default to JSON
error.headers.update({"content-type": "application/json"})
writer = json.dumps
description = error.body or error.exception
if isinstance(error.exception, AssertionError):
error.status = 400
description = str(error.exception)
LOG.error(error.exception)
elif isinstance(error.exception, HTTPError):
error.status = error.exception.http_code
description = str(error.exception)
if error.exception.reason:
output['reason'] = error.exception.reason
LOG.error(error.exception)
elif error.exception:
error.status = 500
description = "Unexpected error"
# Log unexpected args
if hasattr(error.exception, 'args'):
if len(error.exception.args) > 1:
LOG.warning('HTTPError: %s', error.exception.args)
output['description'] = description
output['code'] = error.status_code
output['message'] = error.status_line.split(' ', 1)[1]
error.apply(bottle.response)
return writer({'error': output})
| apache-2.0 | 3,635,419,209,916,187,600 | 36.394022 | 79 | 0.600538 | false |
b3j0f/debate | django/core/views.py | 1 | 3280 | """View module."""
from .models import (
Account, User, Topic, Event, Space, Vote, Tag, Media,
Stat
)
from .serializers import (
AccountSerializer, UserSerializer, TopicSerializer, MediaSerializer,
EventSerializer, VoteSerializer, TagSerializer,
SpaceSerializer, StatSerializer
)
from .permissions import IsVoter, IsAdminOrReadOnly
from rest_framework.viewsets import ModelViewSet
class AccountViewSet(ModelViewSet):
"""Account view set."""
queryset = Account.objects.all()
serializer_class = AccountSerializer
filter_fields = {
'id': ['exact'],
'user': ['exact'],
'avatar': ['exact'],
'relationships': ['exact'],
'address': ['iexact'],
'languages': ['iexact']
}
class UserViewSet(ModelViewSet):
"""User view set."""
queryset = User.objects.all()
serializer_class = UserSerializer
filter_fields = {
'id': ['exact'],
'account': ['exact'],
}
class TopicViewSet(ModelViewSet):
"""Topic view set."""
queryset = Topic.objects.all()
serializer_class = TopicSerializer
filter_fields = {
'id': ['exact'],
'contact': ['exact'],
'name': ['exact', 'icontains'],
'public': ['exact'],
'score': ['lte', 'gte', 'exact'],
'description': ['icontains'],
'medias': ['exact']
}
permission_classes = (IsAdminOrReadOnly,)
class MediaViewSet(ModelViewSet):
"""Media view set."""
queryset = Media.objects.all()
serializer_class = MediaSerializer
filter_fields = {
'id': ['exact'],
'file': ['exact'],
'url': ['icontains'],
'topic': ['exact']
}
class EventViewSet(ModelViewSet):
"""Event view set."""
queryset = Event.objects.all()
serializer_class = EventSerializer
filter_fields = {
'id': ['exact'],
'date': ['exact', 'lte', 'gte'],
'mduration': ['exact', 'lte', 'gte'],
'Space': ['exact']
}
class SpaceViewSet(ModelViewSet):
"""Space view set."""
queryset = Space.objects.all()
serializer_class = SpaceSerializer
filter_fields = {
'id': ['exact'],
'space': ['exact'],
'product': ['exact'],
'state': ['exact']
}
permission_classes = (IsAdminOrReadOnly,)
class VoteViewSet(ModelViewSet):
"""Vote view set."""
queryset = Vote.objects.all()
serializer_class = VoteSerializer
filter_fields = {
'id': ['exact'],
'account': ['exact'],
'topic': ['exact'],
'value': ['exact', 'gte', 'lte']
}
permission_classes = (IsVoter,)
class TagViewSet(ModelViewSet):
"""Tag view set."""
queryset = Tag.objects.all()
serializer_class = TagSerializer
filter_fields = {
'id': ['exact'],
'name': ['exact', 'icontains'],
'description': ['icontains'],
'topicS': ['exact']
}
class StatViewSet(ModelViewSet):
"""Stat view set."""
queryset = Stat.objects.all()
serializer_class = StatSerializer
filter_fields = {
'id': ['exact'],
'date': ['exact', 'lte', 'gte'],
'accounts': ['exact', 'lte', 'gte'],
'spaces': ['exact', 'lte', 'gte'],
'votes': ['exact', 'lte', 'gte'],
}
| mit | -417,113,614,665,994,500 | 22.768116 | 72 | 0.559146 | false |
openstack/networking-arista | networking_arista/tests/unit/ml2/mechanism_ha_simulator.py | 1 | 4119 | # Copyright (c) 2018 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from multiprocessing import Queue
import random
from neutron.agent import rpc as agent_rpc
from neutron_lib.agent import topics
from neutron_lib.api.definitions import portbindings
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import context
from networking_arista.ml2 import arista_sync
from networking_arista.ml2.mechanism_arista import AristaDriver
class AristaHASimulationDriver(AristaDriver):
def __init__(self):
super(AristaHASimulationDriver, self).__init__()
self.provision_queue1 = Queue()
self.provision_queue2 = Queue()
self.provision_queue3 = Queue()
self.provision_queues = [self.provision_queue1,
self.provision_queue2,
self.provision_queue3]
def get_workers(self):
return [arista_sync.AristaSyncWorker(self.provision_queue1),
arista_sync.AristaSyncWorker(self.provision_queue2),
arista_sync.AristaSyncWorker(self.provision_queue3)]
def create_network_postcommit(self, context):
self.provision_queue = random.choice(self.provision_queues)
super(AristaHASimulationDriver, self).create_network_postcommit(
context)
def update_network_postcommit(self, context):
self.provision_queue = random.choice(self.provision_queues)
super(AristaHASimulationDriver, self).update_network_postcommit(
context)
def delete_network_postcommit(self, context):
self.provision_queue = random.choice(self.provision_queues)
super(AristaHASimulationDriver, self).delete_network_postcommit(
context)
def update_port_postcommit(self, context):
self.provision_queue = random.choice(self.provision_queues)
super(AristaHASimulationDriver, self).update_port_postcommit(context)
def delete_port_postcommit(self, context):
self.provision_queue = random.choice(self.provision_queues)
super(AristaHASimulationDriver, self).delete_port_postcommit(context)
class AristaHAScaleSimulationDriver(AristaHASimulationDriver):
def __init__(self):
super(AristaHAScaleSimulationDriver, self).__init__()
self.context = None
self.plugin_rpc = None
def initialize(self):
super(AristaHAScaleSimulationDriver, self).initialize()
self.context = context.get_admin_context_without_session()
# Subscribe to port updates to force ports to active after binding
# since a fake virt driver is being used, so OVS will never see
# the libvirt interfaces come up, triggering the OVS provisioning
self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN)
registry.subscribe(self._port_update_callback,
resources.PORT, events.AFTER_UPDATE)
def _port_update_callback(self, resource, event, trigger, **kwargs):
port = kwargs.get('port')
host = port.get(portbindings.HOST_ID)
vif_type = port.get(portbindings.VIF_TYPE)
device_dict = {'device': port['id'],
'agent_id': 'ovs-agent-%s' % host,
'host': host}
if vif_type == 'ovs':
self.plugin_rpc.update_device_up(self.context, **device_dict)
elif (port.get(portbindings.VNIC_TYPE) == 'normal'
and vif_type == 'unbound'):
self.plugin_rpc.update_device_down(self.context, **device_dict)
| apache-2.0 | 2,236,879,542,072,917,500 | 40.606061 | 77 | 0.686817 | false |
NerdsvilleCEO/elections | tests/test_flask_irc.py | 1 | 5788 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
(c) 2014 - Copyright Joshua Santos
Author: Joshua Santos <[email protected]>
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2, or (at your option) any later version. This
# program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the GNU
# General Public License along with this program; if not, write to the Free
# Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public License and
# may only be used or replicated with the express permission of Red Hat, Inc.
fedora_elections.elections test script
"""
__requires__ = ['SQLAlchemy >= 0.7', 'jinja2 >= 2.4']
import pkg_resources
import logging
import unittest
import sys
import os
from datetime import time
from datetime import timedelta
import flask
sys.path.insert(0, os.path.join(os.path.dirname(
os.path.abspath(__file__)), '..'))
import fedora_elections
from tests import ModelFlasktests, Modeltests, TODAY, FakeUser, user_set
# pylint: disable=R0904
class FlaskIrcElectionstests(ModelFlasktests):
""" Flask application tests irc voting. """
def test_vote_irc(self):
""" Test the vote_irc function - the preview part. """
output = self.app.get(
'/vote/test_election', follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<title>OpenID transaction in progress</title>' in output.data
or 'discoveryfailure' in output.data)
self.setup_db()
user = FakeUser(['packager'], username='toshio')
with user_set(fedora_elections.APP, user):
output = self.app.get(
'/vote/test_election7', follow_redirects=True)
self.assertTrue(
'class="message">You have already voted in the election!</'
in output.data)
user = FakeUser(['packager'], username='nerdsville')
with user_set(fedora_elections.APP, user):
output = self.app.get(
'/vote/test_election7')
self.assertTrue(
'<h2>test election 7 shortdesc</h2>' in output.data)
self.assertTrue(
'<input type="hidden" name="action" value="preview" />'
in output.data)
csrf_token = output.data.split(
'name="csrf_token" type="hidden" value="')[1].split('">')[0]
# Invalid vote: No candidate
data = {
'action': 'preview',
}
output = self.app.post('/vote/test_election7', data=data)
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<h2>test election 7 shortdesc</h2>' in output.data)
# Valid input
data = {
'Kevin': -1,
'Toshio': '0',
'action': 'preview',
'csrf_token': csrf_token,
}
output = self.app.post('/vote/test_election7', data=data)
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<h2>test election 7 shortdesc</h2>' in output.data)
self.assertTrue(
'<input type="hidden" name="action" value="submit" />'
in output.data)
self.assertTrue(
'<li class="message">Please confirm your vote!</li>'
in output.data)
def test_vote_irc_process(self):
""" Test the vote_irc function - the voting part. """
output = self.app.get(
'/vote/test_election', follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<title>OpenID transaction in progress</title>' in output.data
or 'discoveryfailure' in output.data)
self.setup_db()
user = FakeUser(['packager'], username='pingou')
with user_set(fedora_elections.APP, user):
# Invalid candidate id - no csrf
data = {
'candidate': 1,
'action': 'submit',
}
output = self.app.post(
'/vote/test_election7', data=data,
follow_redirects=True)
self.assertEqual(output.status_code, 200)
csrf_token = output.data.split(
'name="csrf_token" type="hidden" value="')[1].split('">')[0]
# Valid input
data = {
'Toshio': '0',
'Kevin': '1',
'action': 'submit',
'csrf_token': csrf_token,
}
output = self.app.post(
'/vote/test_election7', data=data,
follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertTrue(
'class="message">Your vote has been recorded. Thank you!</'
in output.data)
self.assertTrue('<h3>Current elections</h3>' in output.data)
self.assertTrue('<h3>Next 1 elections</h3>' in output.data)
self.assertTrue('<h3>Last 2 elections</h3>' in output.data)
if __name__ == '__main__':
SUITE = unittest.TestLoader().loadTestsFromTestCase(FlaskIrcElectionstests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
| gpl-2.0 | -6,630,374,561,659,560,000 | 35.866242 | 79 | 0.585867 | false |
michimussato/pypelyne2 | pypelyne2/payload/rr/7.0.29__installer/files/render_apps/scripts/kso_tcp.py | 1 | 6250 | import SocketServer
import struct
import datetime
import sys
import time
StructureID_rrCommands =0x0B03
StructureID_RRN_TCP_HeaderData_v3 = 0x0D03
Size_RRCommands = 1232
Size_RRN_TCP_HeaderData_v3 = 198
rrnData_commands = 7
log_command="print(' \\\'"
log_command_end="')"
log_command_intern_prestring=" "
log_debug= False
commandTimeout=180
def flushLog():
sys.stdout.flush()
sys.stderr.flush()
def logMessage_intern(lvl, msg):
if (len(lvl)==0):
msg=datetime.datetime.now().strftime("%H:%M.%S") + " rrKSO : " + str(msg)
else:
msg=datetime.datetime.now().strftime("%H:%M.%S") + " rrKSO - " + str(lvl) + ": " + str(msg)
msg= msg.replace("\'","\\\'").replace("\n","\\\n")
print(log_command_intern_prestring+msg)
def logMessageGen(lvl, msg):
if (len(lvl)==0):
msg=datetime.datetime.now().strftime("%H:%M.%S") + " rrKSO : " + str(msg)
else:
msg=datetime.datetime.now().strftime("%H:%M.%S") + " rrKSO - " + str(lvl) + ": " + str(msg)
msg= msg.replace("\'","\\\'").replace("\n","\\\n")
msg= log_command+msg+log_command_end
exec(msg)
def logMessageDebug( msg):
if (log_debug):
logMessage_intern("DGB", msg)
def logMessage(msg):
logMessageGen("",msg)
def logMessageError(msg):
logMessageGen("ERR", str(msg)+"\n\n")
logMessageGen("ERR", "Error reported, aborting render script")
flushLog();
class _RRCommands():
StructureID=StructureID_rrCommands
ctype=4
command=0
paramID=0
paramX=0
paramY=0
paramS=""
paramSlength=0
paramSType=0
def toBinary(self):
keptfree=0
return struct.pack("=HBBhbbQii1002sHH200?bb",self.StructureID,self.ctype, self.command, keptfree, keptfree,keptfree,self.paramID, self.paramX, self.paramY, keptfree, keptfree, self.paramS, self.paramSlength,self.paramSType)
def fromBinary(self, buf):
tmp= struct.unpack("=HBBhbbQii1002sHH200?bb",buf)
self.StructureID= tmp[0]
self.ctype= tmp[1]
self.command= tmp[2]
self.paramID= tmp[6]
self.paramX= tmp[7]
self.paramY= tmp[8]
paramsTemp=tmp[9];
self.paramSlength= tmp[10]
self.paramSType= tmp[11]
self.paramS=""
for c in range(0, self.paramSlength): #string is actually unicode 16bit, but for now a dirty ANSI conversion is fine
self.paramS= self.paramS+ paramsTemp[c*2]
def rightStructure(self):
return (self.StructureID== StructureID_rrCommands)
class _RRN_TCP_HeaderData_v3():
StructureID= StructureID_RRN_TCP_HeaderData_v3
dataLen=0
dataType=0
dataNrElements=0
appType=14
def toBinary(self):
keptfree=0
keptfreeS=""
return struct.pack("=HIIHbhB182s",self.StructureID,keptfree,self.dataLen,keptfree,self.dataType,self.dataNrElements,self.appType,keptfreeS)
def fromBinary(self, buf):
tmp= struct.unpack("=HIIHbhB182s",buf)
self.StructureID= tmp[0]
self.dataLen= tmp[2]
self.dataType= tmp[4]
self.dataNrElements= tmp[5]
self.appType= tmp[6]
def rightStructure(self):
return (self.StructureID== StructureID_RRN_TCP_HeaderData_v3)
rrKSONextCommand=""
class rrKSOTCPHandler(SocketServer.BaseRequestHandler):
def handle(self):
logMessageDebug("Handler")
headerData=_RRN_TCP_HeaderData_v3()
headerData.fromBinary(self.request.recv(Size_RRN_TCP_HeaderData_v3))
if ((not headerData.rightStructure()) or (headerData.dataType!=rrnData_commands) or (headerData.dataLen!=Size_RRCommands) ):
self.server.continueLoop=False
logMessageGen("ERR","TCP header wrong! "
+ " ID:"+ str(headerData.StructureID)+"!=" +str(StructureID_RRN_TCP_HeaderData_v3)
+ " type:"+ str(headerData.dataType)+"!=" +str(rrnData_commands)
+ " len:"+ str(headerData.dataLen)+"!=" +str(Size_RRCommands)
)
return
command = _RRCommands()
command.fromBinary(self.request.recv(Size_RRCommands))
if (( not command.rightStructure())):
self.server.continueLoop=False
logMessageGen("ERR","TCP data wrong! "
+ "ID:"+ str(command.StructureID)+"!=" +str(StructureID_rrCommands)
)
return
if (( command.paramSlength==0)):
logMessageGen("ERR","Empty command received.")
return
global rrKSONextCommand
rrKSONextCommand=command.paramS
class rrKSOServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
timeout = commandTimeout
daemon_threads = True
allow_reuse_address = True
continueLoop = True
def __init__(self, server_address, RequestHandlerClass):
SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass)
def handle_timeout(self):
logMessage_intern("ERR",'Timeout!')
self.continueLoop=False
def handle_error(self, request, client_address):
logMessage_intern("ERR"," Issue while handline connection to " + str(client_address))
self.continueLoop=False
import traceback
logMessage_intern("ERR",traceback.format_exc())
def writeRenderPlaceholder(filename):
logMessageGen("---",filename );
import socket
hostName=socket.gethostname()
hostName=hostName[:100]
file = open(filename,"wb")
file.write("rrDB") #Magic ID
file.write("\x01\x0B") #DataType ID
for x in range(0, len(hostName)):
file.write(hostName[x])
file.write("\x00") #unicode
for x in range(len(hostName),51):
file.write("\x00\x00")
file.write(chr(len(hostName)))
file.write("\x00")
file.write("\x00\x00")
file.close();
def writeRenderPlaceholder_nr(filename, frameNr, padding, ext):
padding=int(padding)
if (padding==0):
padding=4
filenameFinal=filename +str(frameNr).zfill(int(padding)) + ext
writeRenderPlaceholder(filenameFinal)
#logMessageDebug("KSO_IMPORTED__KSO_IMPORTED__KSO_IMPORTED__KSO_IMPORTED__KSO_IMPORTED__KSO_IMPORTED__KSO_IMPORTED")
| gpl-2.0 | 4,744,804,664,678,391,000 | 30.407035 | 231 | 0.62832 | false |
balena/python-smime | setup.py | 1 | 1324 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='smime',
version=__import__('smime').__version__,
description='Python S/MIME Toolkit',
long_description=long_description,
url='https://github.com/balena/python-smime',
author='G. B. Versiani',
author_email='[email protected]',
license='Apache License (2.0)',
classifiers=[
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries',
'Topic :: Communications :: Email',
'Topic :: Security :: Cryptography',
],
keywords='smime cryptography email',
packages=find_packages(exclude=['smime/test', 'smime/crypto/testdata',
'smime/crypto/tools', '*_test.py']),
platforms=["all"],
install_requires=['cryptography', 'asn1crypto'],
)
| apache-2.0 | -497,665,523,071,325,800 | 32.948718 | 74 | 0.635196 | false |
moas/sketchbadges | exo/studio/users/urls.py | 1 | 2012 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf.urls import url
from django.contrib.auth.views import (
logout_then_login, login,
password_change
)
from django.core.urlresolvers import reverse_lazy
from django.views import generic
from . import views
urlpatterns = [
url(
regex=r'^$',
view=generic.RedirectView.as_view(
url=reverse_lazy('user:redirect')
),
name='index'
),
url(
regex='^login/$',
view=login,
kwargs={'template_name': 'login.html'},
name='login'
),
url(
regex=r'^change-password/$',
view=password_change,
kwargs={
'post_change_redirect': reverse_lazy('user:logout'),
'template_name': 'password_change_form.html'
},
name='change-password'
),
url(
regex=r'^add/$',
view=views.UserDesignerCreateAccount.as_view(),
name='create'
),
url(
regex=r'^redirect/$',
view=views.UserRedirectView.as_view(),
name='redirect'
),
url(
regex=r'^(?P<pk>\d+)/$',
view=views.UserDetailView.as_view(),
name='detail'
),
url(
regex=r'^(?P<pk>\d+)/change/$',
view=views.UserUpdateView.as_view(),
name='update'
),
url(
regex='^model-list/$',
view=views.DesignerListModelView.as_view(),
name='model-list'
),
url(
regex='^model-list/add/$',
view=views.DesignerAddModelView.as_view(),
name='add-model'
),
url(
regex='^model-list/(?P<uid>[a-f,0-9]+)/$',
view=views.DesignerModelDetailView.as_view(),
name='model-detail'
),
url(
regex='^model-list/(?P<uid>[a-f,0-9]+)/change/$',
view=views.DesignerChangeModelView.as_view(),
name='model-change'
),
url(
regex='^logout/$',
view=logout_then_login,
name='logout'
),
]
| mit | 7,237,117,035,323,833,000 | 23.536585 | 64 | 0.538767 | false |
tcarmelveilleux/CheapDrawBot | Software/drivers/pololu_maestro.py | 1 | 1901 | #
# Basic library to interface with Pololu Maestro servo controllers
# through serial port.
#
# Limitations:
# * CRC-7 not yet supported
import serial
class PololuMaestro:
CMD_SET_TARGET = 0x04
CMD_SET_SPEED = 0x07
CMD_SET_ACCEL = 0x09
def __init__(self, port, baud_rate=9600):
self.port = port
self.baud_rate = baud_rate
self.serial_port = None
def connect(self):
if self.serial_port != None:
self.close()
#try:
self.serial_port = serial.Serial(self.port, self.baud_rate)
#except IOError:
# TODO: Logging ?
# return False
return True
def close(self):
if self.serial_port != None:
self.serial_port.close()
self.serial_port = None
def set_target(self, servo_id, target_counts, device_number = 12):
bytes_to_send = bytearray([0xaa, device_number & 0x7f, self.CMD_SET_TARGET])
bytes_to_send.append(servo_id & 0x1f)
bytes_to_send.append(target_counts & 0x7f)
bytes_to_send.append((target_counts >> 7) & 0x7f)
#print ", ".join(["0x%02X" % b for b in bytes_to_send])
self.serial_port.write(bytes_to_send)
def set_speed(self, servo_id, speed, device_number = 12):
bytes_to_send = bytearray([0xaa, device_number & 0x7f, self.CMD_SET_SPEED])
bytes_to_send.append(servo_id & 0x1f)
bytes_to_send.append(speed & 0x7f)
bytes_to_send.append((speed >> 7) & 0x7f)
self.serial_port.write(bytes_to_send)
def set_accel(self, servo_id, acceleration, device_number = 12):
bytes_to_send = bytearray([0xaa, device_number & 0x7f, self.CMD_SET_ACCEL])
bytes_to_send.append(servo_id & 0x1f)
bytes_to_send.append(acceleration & 0x7f)
bytes_to_send.append((acceleration >> 7) & 0x01)
self.serial_port.write(bytes_to_send)
| mit | -2,907,025,372,800,003,000 | 31.220339 | 84 | 0.609679 | false |
sklam/numba | numba/cuda/tests/cudapy/test_idiv.py | 1 | 1061 | import numpy as np
from numba import cuda, float32, float64, int32
from numba.cuda.testing import unittest, CUDATestCase
class TestCudaIDiv(CUDATestCase):
def test_inplace_div(self):
@cuda.jit(argtypes=[float32[:, :], int32, int32])
def div(grid, l_x, l_y):
for x in range(l_x):
for y in range(l_y):
grid[x, y] /= 2.0
x = np.ones((2, 2), dtype=np.float32)
grid = cuda.to_device(x)
div[1, 1](grid, 2, 2)
y = grid.copy_to_host()
self.assertTrue(np.all(y == 0.5))
def test_inplace_div_double(self):
@cuda.jit(argtypes=[float64[:, :], int32, int32])
def div_double(grid, l_x, l_y):
for x in range(l_x):
for y in range(l_y):
grid[x, y] /= 2.0
x = np.ones((2, 2), dtype=np.float64)
grid = cuda.to_device(x)
div_double[1, 1](grid, 2, 2)
y = grid.copy_to_host()
self.assertTrue(np.all(y == 0.5))
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | 8,982,965,068,970,503 | 26.921053 | 57 | 0.514609 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.