repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
AsgerPetersen/tiledrasterio | tiledrasterio/_virtualraster.py | 1 | 8905 |
# coding: utf-8
# In[1]:
import os
import rasterio
from math import floor, ceil
import numpy as np
# In[24]:
class VirtualRaster():
def __init__(self, shape, transformation = None, proj4_crs = None):
self.height = shape[0]
self.width = shape[1]
self.transform = transformation
self.crs = proj4_crs
self.bands = []
def read_band(self, bidx, out=None, window=None, masked=None):
"""Read the `bidx` band into an `out` array if provided,
otherwise return a new array.
Band indexes begin with 1: read_band(1) returns the first band.
The optional `window` argument is a 2 item tuple. The first item
is a tuple containing the indexes of the rows at which the
window starts and stops and the second is a tuple containing the
indexes of the columns at which the window starts and stops. For
example, ((0, 2), (0, 2)) defines a 2x2 window at the upper left
of the raster dataset.
"""
band = self.bands[ bidx - 1]
if window is None:
window = ((0,self.height),(0,self.width))
if out is None:
window_shape = rasterio._base.window_shape(window, self.height, self.width)
if masked:
out = np.ma.zeros(window_shape, band.dtype)
else:
out = np.zeros(window_shape, band.dtype)
return band.read(out, window, masked)
def open(self, mode = 'r', base_path = None):
#map( lambda b: map( lambda s: s.open, b.sources ),self.bands)
for b in self.bands:
for s in b.sources:
s.open()
def close(self):
#map( lambda b: map( lambda s: s.open, b.sources ),self.bands)
for b in self.bands:
for s in b.sources:
s.close()
# In[25]:
class Band():
def __init__(self, band_number, dtype, nodata = None):
self.band_number = band_number
self.dtype = dtype
self.nodata = nodata
self.sources = []
def read(self, out, req_window, masked=None):
# Consider using indexed dest_windows instead of brute force
map(lambda src: src.read(out, req_window, masked), self.sources)
return out
# In[26]:
def crop_window(window, cropper_window):
"""Returns a version of window cropped against cropper_window.
Also returns a tuple containing two bools: (cropped_rows, cropped_cols)"""
(changed_rows, changed_cols) = (False, False)
((row_start,row_end),(col_start, col_end)) = window
if row_start < cropper_window[0][0]:
row_start = cropper_window[0][0]
changed_rows = True
if col_start < cropper_window[1][0]:
col_start = cropper_window[1][0]
changed_cols = True
if row_end > cropper_window[0][1]:
row_end = cropper_window[0][1]
changed_rows = True
if col_end > cropper_window[1][1]:
col_end = cropper_window[1][1]
changed_cols = True
return ( (row_start,row_end),(col_start,col_end) ), (changed_rows, changed_cols)
# In[27]:
def windows_overlap(win1, win2):
(ymin1, ymax1), (xmin1, xmax1) = win1
(ymin2, ymax2), (xmin2, xmax2) = win2
if ymin1 > ymax2 - 1 or ymax1 - 1 < ymin2 or xmin1 > xmax2 - 1 or xmax1 - 1 < xmin2:
return False
return True
# In[28]:
class Source():
def __init__(self, path, source_band, source_window, destination_window, source_nodata = None):
self.path = path
self.source_band = source_band
self.source_window = source_window
self.source_shape = rasterio._base.window_shape(source_window)
self.destination_window = destination_window
self.destination_shape = rasterio._base.window_shape(destination_window)
self.source_nodata = source_nodata
self.dataset = None
self._scale = tuple(float(src)/float(dest) for src,dest in zip(self.source_shape,self.destination_shape))
def open(self, mode = 'r', base_path = None):
if self.dataset is None:
absolute_path = self.path if not base_path else os.path.join(base_path, self.path)
self.dataset = rasterio.open(absolute_path)
def close(self):
if self.dataset:
self.dataset.close()
def _source_to_destination(self, source):
"""Transforms source pixel coordinates into destination pixel coordinates.
Accepts either a coordinate tuple or a window"""
if isinstance(source[0], (tuple, list)) :
# This is a window, not a coord pair
zipped = zip( *source )
start = tuple( int(floor(c)) for c in self._source_to_destination(zipped[0]) )
# vrtsources.cpp does not ceil() the end coord. Rather it floors it
end = tuple( int(floor(c)) for c in self._source_to_destination(zipped[1]) )
return tuple(zip(start, end))
dest_col = (source[1] - self.source_window[1][0]) / self._scale[1] + self.destination_window[1][0]
dest_row = (source[0] - self.source_window[0][0]) / self._scale[0] + self.destination_window[0][0]
return (dest_row, dest_col)
def _destination_to_source(self, destination ):
"""Transforms destination pixel coordinates into source pixel coordinates.
Accepts either a (row,col) tuple or a window like ((row_start,row_end),(col_start,col_end))"""
if isinstance(destination[0], (tuple, list)) :
# This is a window, not a coord pair
zipped = zip( *destination )
source_start = tuple( int(floor(c)) for c in self._destination_to_source(zipped[0]) )
source_end = tuple( int(ceil(c)) for c in self._destination_to_source(zipped[1]) )
return tuple(zip(source_start, source_end))
source_col = (destination[1] - self.destination_window[1][0]) * self._scale[1] + self.source_window[1][0]
source_row = (destination[0] - self.destination_window[0][0]) * self._scale[0] + self.source_window[0][0]
return (source_row, source_col)
def read(self, out, req_window, masked=None):
""" req_window is the total requested window in destination coordinates.
Out is a numpy array."""
# Logic is roughly copied from GDAL's vrtsources.cpp
req_window_shape = rasterio._base.window_shape(req_window)
# Does req_window overlap destination_window
if not windows_overlap(self.destination_window, req_window):
return
# Crop req_window to not extent outside dest_window
dest_req_window, req_window_changed = crop_window(req_window, self.destination_window)
# Translate req_window into source pix coords
src_req_window = self._destination_to_source( dest_req_window )
# If the requested area does not overlap the source window
if not windows_overlap(self.source_window, src_req_window):
return
# Crop source req window to be within source windowed bounds
src_req_window, src_req_window_changed = crop_window(src_req_window, self.source_window)
# Transform the source req window back into destination pixel coordinates
dest_req_window = self._source_to_destination(src_req_window)
# Where to put the data in the outarray
# Scale between original requested window and output buffer size
scale_req_win_to_outarray = tuple( float(a)/b for a,b in zip(out.shape, req_window_shape) )
# Calculate resulting window into outarray
out_start_row = int((dest_req_window[1][0]-req_window[1][0])*scale_req_win_to_outarray[1]+0.001)
out_end_row = int((dest_req_window[1][1]-req_window[1][0])*scale_req_win_to_outarray[1]+0.001)
out_start_col = int((dest_req_window[0][0]-req_window[0][0])*scale_req_win_to_outarray[0]+0.001)
out_end_col = int((dest_req_window[0][1]-req_window[0][0])*scale_req_win_to_outarray[0]+0.001)
out_window = ((out_start_row, out_end_row),(out_start_col, out_end_col))
out_window_shape = rasterio._base.window_shape(out_window)
if out_window_shape[0] < 1 or out_window_shape[1] < 1:
return
# Create tmp array with source dtype and possibly masked
if masked:
tmp_out = np.ma.zeros(out_window_shape, self.dataset.dtypes[0])
else:
tmp_out = np.zeros(out_window_shape, self.dataset.dtypes[0])
# Ok. Phew. Read
tmp_out = self.dataset.read_band(self.source_band, out=tmp_out, window=src_req_window, masked=masked)
# Put the data in out
out[ [slice(*dim) for dim in out_window] ] = tmp_out
return out
| mit | -6,827,677,199,476,705,000 | 41.004717 | 113 | 0.603593 | false |
hustkerry/ML | src/ann/mlp.py | 1 | 26672 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File Name: MLP.py
Author: Kerry
Mail: [email protected]
Created Time: 2016年10月18日 星期二 11时28分14秒
Description: 多层感知器,一种监督式神经网络算法,可以用来做分类(2分类,多分类,多标签分类)和回归,
主要取决于在output layer使用的激活函数(logistic、softmax、identity)。参数优化的算法为误差反向传导
方法,具体的支持使用多种SGD方式(mini_batch)。
References:
http://sebastianruder.com/optimizing-gradient-descent/
http://ufldl.stanford.edu/wiki/index.php/Backpropagation_Algorithm
http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
"""
import abc
import numpy as np
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
from __init__ import *
# NAG和Adam都是标准sgd的改良版本
_SGD_OPTIMIZERS = {'sgd', 'NAG', 'Adam'}
def judge_y_type(y):
"""根据y判断此次任务的类别,支持:
regression,回归,y的值都是连续的,不仅仅是只有整数,并且是1维
binary, 2分类, y只包含两种离散的值,且是1维
multiclass, 多分类, y包含不止两种离散的值,且是1维
multilabel, 多标签,y是2维的, 至少2列,只包含两种离散的值
unknow,不是上面任何一种
"""
if y.ndim > 2:
return 'unknow'
if y.ndim == 1 and y.dtype.kind == 'f' and np.any(y <> y.astype(int)):
return 'regression'
elif y.ndim == 1 and len(np.unique(y)) <= 2:
return 'binary'
elif y.ndim == 1 and len(np.unique(y)) > 2:
return 'multiclass'
elif y.ndim == 2 and y.shape[1] >= 2 and len(np.unique(y)) <= 2:
return 'multilabel'
else:
return 'unknow'
class BaseSGD(object):
"""所有SGD方法的基础抽象类
"""
__metaclass__ = abc.ABCMeta
def __init__(self, params, learning_rate_init):
pass
class BaseMLP(object):
"""MLP的基础抽样类,不能实例化,必须被继承实现抽象方法
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __init__(self, hidden_layer_sizes, hidden_layer_activation, opt_way, alpha,
learning_rate_init, learning_rate_way, max_iteration, random_seed, shuffle,
batch_size, early_stop, valid_ratio, momentum, nesterovs_momentum, beta_1, beta_2,
invscaling_pow_step, adaptive_min_decrease_threshold, adaptive_epsilon, warm_start,
loss):
"""抽象初始化方法
Args:
hidden_layer_sizes, tuple, 隐藏层大小, 例如(5, 2)表示有2层,第一层5个,第二层2个神经元
hidden_layer_activation, 隐藏层的激活函数,支持{'identity', 'logistic', 'tanh', 'relu'}
opt_way, 参数优化的方法,支持标准sgd,adam,nag
alpha, L2正则项的权重参数
learning_rate_init, 初始化学习率
learning_rate_way, 学习率调整方法, {‘constant’, ‘invscaling’, ‘adaptive’}
max_iteration, 训练最大的迭代次数
random_seed, 随机数种子
shuffle, 每轮训练开始的时候是否需要shuffle训练集
batch_size, SGD中每个mini_batch的大小
early_stop, 是否允许提前结束训练(满足一定要求)
valid_ratio, early_stop为True情况下,验证集所占的比例
momentum, nesterovs_momentum, deta_1, deta_2均为改进的SGD算法对应的参数
invscaling_pow_step, invascaling方式中步长更新参数
adaptive_min_decrease_threshold, adaptive方式中最小下降阈值
adaptive_epsilon,adaptive方式中bool型参数
warm_start,是否热启动,如果是则用之前的model作为初始值,然后接着训练,否则重新训练
loss, 模型使用的损失函数
"""
self.hidden_layer_sizes = hidden_layer_sizes
self.hidden_layer_activation = hidden_layer_activation
self.opt_way = opt_way
self.alpha = alpha
self.learning_rate_init = learning_rate_init
self.learning_rate_way = learning_rate_way
self.max_iteration = max_iteration
self.random_seed = random_seed
self.shuffle = shuffle
self.batch_size = batch_size
self.early_stop = early_stop
self.valid_ratio = valid_ratio
self.momentum = momentum
self.nesterovs_momentum = nesterovs_momentum
self.beta_1 = beta_1
self.beta_2 = beta_2
self.invscaling_pow_step = invscaling_pow_step
self.adaptive_min_decrease_threshold = adaptive_min_decrease_threshold
self.adaptive_epsilon = adaptive_epsilon
self.warm_start = warm_start
self.loss = loss
def fit(self, X, Y):
"""训练的入口函数
Args:
X, array_like, 训练集的特征矩阵
Y, array_like, 训练集的label
"""
# 非增量学习任务,一开始先根据label识别出任务类型
self.mission_type = judge_y_type(Y)
if self.mission_type == 'unknow':
raise ValueError("fit:不支持的任务类型")
if self.mission_type <> 'regression':
self.label_binarizer = preprocessing.LabelBinarizer()
self.label_binarizer.fit(Y)
self.classes = self.label_binarizer.classes_
print "任务类型: %s, classes: %s" %(self.mission_type, self.classes)
else:
print "任务类型: %s" % self.mission_type
return self._fit(X, Y, False)
def incremential_fit(self, X, Y, classes=None):
"""增量训练的入口函数,分类器中重新定义实现,隐藏本函数;回归中不用实现
Args:
X. 训练集的特征矩阵
Y, 训练集的label
classes, list, label对应的所有类别,不重复
"""
# 增量学习之前,需要验证当前使用的参数优化方法,只有sgd才支持增量式训练
if self.opt_way not in _SGD_OPTIMIZERS:
raise ValueError("incremental_fit只有在使用sgd方法时才可被调用: %s 不是sgd方法"
% self.opt_way)
return self._fit(X, Y, True)
def _fit(self, X, Y, incremential=False):
"""训练过程
Args:
X, 训练集的特征矩阵
Y, 训练集的label
incremential, 是否是增量训练
"""
self._valid_hyperparams()
X, Y = self._valid_data(X, Y, incremential)
# 初始化工作,其中包含一些属性的定义:各层神经元数目,权重和偏置参数等等
self._initial(X, Y, incremential)
self._train(X, Y, incremential)
# 返回训练好的实例
return self
def _train(self, X, Y, incremential=False):
"""训练过程
"""
print "***************************开始训练*******************************"
# 如果设置了提前结束(非增量学习),则需要划分出验证集
if self.early_stop and not incremential:
X, X_valid, Y, Y_valid = train_test_split(X, Y, self.valid_ratio, self.random_seed)
# 如果是分类器,必须还原Y
if self.mission_type <> 'unknow' and self.mission_type <> 'regression':
Y_valid = self.label_binarizer.inverse_transform(Y_valid)
else:
X_valid = None
Y_valid = None
n_samples, n_features = X.shape
if self.batch_size == "auto":
# 默认的batch是200
batch_size = min(200, n_samples)
else:
batch_size = np.clip(self.batch_size, 1, n_samples)
# 从X中产生batch,先计算产生所有的切片
all_slices = []
start = 0
for _ in range(n_samples // batch_size):
all_slices.append(slice(start, start+batch_size))
start += batch_size
if start < n_samples:
all_slices.append(slice(start, n_samples))
# 初始化weights和bias的梯度,初始化为0
weight_grads = []
bias_grads = []
for i, j in zip(self.all_layer_neuron[:-1], self.all_layer_neuron[1:]):
weight_grads.append(np.empty((i, j)))
for i in self.all_layer_neuron[1:]:
bias_grads.append(np.empty(i))
# 确定代价函数
loss_function = self.loss
if loss_function == 'log_loss' and self.out_layer_activation == 'logistic':
loss_function = 'binary_los_loss'
print "切片数量%d,大小为%d, 损失函数为%s" %(len(all_slices), batch_size, loss_function)
for i in range(self.max_iteration):
print "第%d轮迭代, " % (i+1),
# 迭代前先shuffle训练集
indices = self.random_seed.permutation(n_samples)
X = X[indices]
Y = Y[indices]
all_loss = 0.0
# 进行mini_batch sgd
for batch_slice in all_slices:
# a_values表示每个神经元的输出,长度为MLP中的层数(包含输入和输出层)
a_values = [X[batch_slice]]
a_values.extend(np.empty((batch_size, neure_num))
for neure_num in self.all_layer_neuron[1:])
# 进行前向传导计算当前weights和bias下的MLP的输出
self._forward_pass(a_values)
# 初始化此次batch的残差矩阵
deltas = []
for unit in a_values[1:]:
deltas.append(np.empty_like(unit))
# 利用误差反向传播,计算平均误差以及weights和bias的梯度,然后更新weights和bias参数
batch_loss = self._compute_loss_grads(Y[batch_slice], a_values, weight_grads,
bias_grads, deltas, loss_function)
# 根据weights和bias梯度去更新weights和bias参数
all_loss += batch_loss * (batch_slice.stop - batch_slice.start)
def _update_params(self, method_name, weight_grads, bias_grads):
"""更新参数,method_name表示所使用的更新方式:sgd,NAG,Adam
"""
params_grads = weight_grads
def _forward_pass(self, a_values):
"""利用当前参数进行前向传导
"""
for level in range(self.n_layers - 1):
a_values[level + 1] = np.dot(a_values[level], self.weights[level])
a_values[level + 1] += self.bias[level]
if level <> (self.n_layers - 2):
# 将累加和送入激活函数(隐藏层)
a_values[level + 1] = ACTIVATION_FUNCTIONS[self.hidden_layer_activation](
a_values[level + 1])
# 计算输出层
a_values[level + 1] = ACTIVATION_FUNCTIONS[self.out_layer_activation](a_values[level + 1])
def _compute_loss_grads(self, Y, a_values, weight_grads, bias_grads, deltas, loss_function):
"""计算BP神经网络的参数梯度
"""
n_samples = a_values[0].shape[0]
loss_value = LOSS_FUNCTIONS[loss_function](Y, a_values[-1])
# 计算L2正则项
L2 = np.sum(np.array([np.dot(level.ravel(), level.ravel()) for level in self.weights]))
L2 = (0.5 * self.alpha) * L2 / n_samples
loss_value += L2
# 首先计算最后一层的残差,然后通过残差反向传导得到每层每个节点的残差,最后根据偏导与残差的
# 关系得到weights和bias的偏导
# 最后一层残差计算跟误差函数以及输出层的激活函数有关,对于本程序来说,有如下几种:
# square_loss + indentity
# binary_log_loss + logistic
# log_loss + softmax
# 通过推导最终会发现,最后一层残差都为f(z) - y
end = self.n_layers - 2 # 残差的长度只有self.n_layers - 1
deltas[end] = a_values[-1] - Y
self._computer_grads(end, n_samples, a_values, deltas, weight_grads, bias_grads)
for level in range(end, 0, -1):
# 根据level层的残差计算level-1的残差
deltas[level - 1] = np.dot(deltas[level], self.weights[level].T)
# 乘以对应的导数f`(z)
if self.hidden_layer_activation == 'identity':
# do nothing
pass
elif self.hidden_layer_activation == 'logistic':
deltas[level - 1] *= (a_values[level] * (1 - a_values[level]))
elif self.hidden_layer_activation == 'tanh':
deltas[level - 1] *= (1 - a_values[level] ** 2)
elif self.hidden_layer_activation == 'relu':
deltas[level - 1][a_values[level] == 0] = 0
self._computer_grads(level - 1, n_samples, a_values, deltas, weight_grads, bias_grads)
return loss_value
def _computer_grads(self, layer, n_samples, a_values, deltas, weight_grads, bias_grads):
"""根据第layer层每个节点的残差,计算第layer-1层到layer层的weights和第layer层的bias
"""
weight_grads[layer] = np.dot(a_values[layer].T, deltas[layer])
# L2正则项部分
weight_grads[layer] += self.alpha * self.weights[layer]
# 除以这次的样本数量
weight_grads[layer] /= n_samples
bias_grads[layer] = np.mean(deltas[layer], 0)
#print a_values[layer]
#print deltas[layer]
def _valid_hyperparams(self):
"""验证MLP的超参数
"""
# 只有一层隐藏层的情况
if not hasattr(self.hidden_layer_sizes, '__iter__'):
self.hidden_layer_sizes = [self.hidden_layer_sizes]
self.hidden_layer_sizes = list(self.hidden_layer_sizes)
for size in self.hidden_layer_sizes:
if size <= 0:
raise ValueError("隐藏层的神经元个数必须大于0,隐藏层参数异常")
# 支持的隐藏层激活函数
supported_hidden_layer_activations = {'identity', 'logistic', 'tanh', 'relu'}
if self.hidden_layer_activation not in supported_hidden_layer_activations:
raise ValueError("%s 是不被支持的隐藏层激活函数" % self.hidden_layer_activation)
if self.opt_way not in _SGD_OPTIMIZERS:
raise ValueError("%s 是不被支持的优化方法" % self.opt_way)
if self.opt_way == 'NAG' and (self.momentum > 1 or self.momentum < 0 or
not isinstance(self.nesterovs_momentum, bool)):
raise ValueError("NAG参数错误,momentum应该属于[0, 1],nesterovs_momentum应该是布尔类型")
if self.opt_way == 'Adam' and (self.beta_1 < 0 or self.beta_1 >= 1 or self.beta_2 < 0 or
self.beta_2 >= 1 or self.adaptive_epsilon <= 0.0):
raise ValueError("Adam参数错误,beta参数应该属于[0, 1), epsilon参数应该大于0")
if self.alpha < 0:
raise ValueError("L2正则项比例系数不能小于0")
# 支持的步长选择方式
# constant表示一直使用初始步长
# invscaling遵循一种步长依次减小的方式,具体的:第i步的步长为“初始步长/i**pow_step”
# adaptive是一种自适应的步长更新方式,具体的:只要连续两步的损失函数值下降不低于一个阈值,步
# 长保持不变,否则,步长减半
supported_learning_rate_way = {'constant', 'invscaling', 'adaptive'}
if self.learning_rate_way not in supported_learning_rate_way:
raise ValueError("%s 是不被支持的步长选择方式" % self.learning_rate_way)
if self.learning_rate_way == 'invscaling' and self.invscaling_pow_step < 0:
raise ValueError("invscaling方式的pow参数必须大于0")
if self.learning_rate_way == 'adaptive' and self.adaptive_min_decrease_threshold < 0:
raise ValueError("adaptive方式的最小阈值必须大于0")
# 检查随机数种子
if self.random_seed is None:
self.random_seed = np.random.mtrand._rand
elif isinstance(self.random_seed, int):
self.random_seed = np.random.RandomState(self.random_seed)
elif isinstance(self.random_seed, np.random.RandomState):
pass
else:
raise ValueError("随机数种子格式错误")
if self.valid_ratio < 0 or self.valid_ratio >= 1:
raise ValueError("验证集比例属于[0, 1)")
if self.learning_rate_init <= 0:
raise ValueError("参数更新初始步长必须大于0")
if self.max_iteration < 1:
raise ValueError("最大迭代数必须大于0")
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle参数必须是布尔类型")
if not isinstance(self.early_stop, bool):
raise ValueError("early_stop参数必须是布尔类型")
if not isinstance(self.warm_start, bool):
raise ValueError("warm_start参数必须是布尔类型")
@abc.abstractmethod
def _valid_data(self, X, Y, incremential=False):
"""验证训练集的输入,抽象方法,分类和回归验证机制不同,非抽象子类必须实现
"""
return
def _initial(self, X, Y, incremential):
"""训练前的初始化工作
"""
print "***************************开始初始化*******************************"
# 获取训练集的个数和特征数
n_samples, n_features = X.shape
# 确保Y是2维的
if Y.ndim == 1:
Y = Y.reshape((-1,1))
# 输出的特征数,很多任务下都是1,例如:2分类
self.n_out_features = Y.shape[1]
# 所有神经元的列表,从输入到隐藏到输出
print "ANN结构:%d * %s * %d" %(n_features, self.hidden_layer_sizes, self.n_out_features)
self.all_layer_neuron = [n_features] + self.hidden_layer_sizes + [self.n_out_features]
self.n_layers = len(self.all_layer_neuron)
# 根据训练数据指定输出层的激活函数:identity or logistic or softmax
# 如果为2分类或者多标签分类则输出层激活函数为logistic,损失函数为log_loss
# 如果为多分类则输出层激活函数为softmax,损失函数为log_loss
# 如果是回归则输出层激活函数为identity,损失函数为square_loss
if not hasattr(self, "classes"):
self.out_layer_activation = "identity"
elif self.label_binarizer.y_type_ == "multiclass":
self.out_layer_activation = "softmax"
else:
self.out_layer_activation = "logistic"
print "隐藏层激活函数为%s, 输出层激活函数为%s, loss function为log_loss" \
%(self.hidden_layer_activation, self.out_layer_activation)
# 初始化权重和偏置项
# 第一次初始化
if not hasattr(self, "weights"):
self._initial_weights_bias()
# 初始过,但不是增量学习,需要重新初始化
elif (not self.warm_start and not incremential):
self._initial_weights_bias()
else:
# do nonthing
pass
self.loss_data = [] # 每轮损失函数值
self.best_loss = np.inf # 最小损失值
self.not_improve_count = 0 # 连续效果没提升的次数
# 如果算法可以提前结束,则记录对应的验证集准确率
if self.early_stop:
self.valid_score = []
self.best_valid_score = -np.inf
print "误差: %s" % self.loss_data
print "误差连续没下降次数: %d" % self.not_improve_count
print "是否可以提前结束: %s" % self.early_stop
if self.early_stop:
print "验证集得分: %s" % self.valid_score
print "***************************初始化结束*******************************"
def _initial_weights_bias(self):
"""初始化MLP的两大参数:权重和偏执。
初始化时每层的weight和bias不能取相同的值,防止对称失效
"""
self.weights = []
self.bias = []
# 神经网络的weights参数初始化
# 目前的初始化方法采用的是xavier initialization,这部分后续可以考虑添加到超参中
for i in range(self.n_layers - 1):
if self.hidden_layer_activation == "logistic":
bound = np.sqrt(2.0 / (self.all_layer_neuron[i] + self.all_layer_neuron[i+1]))
else:
bound = np.sqrt(6.0 / (self.all_layer_neuron[i] + self.all_layer_neuron[i+1]))
self.weights.append(self.random_seed.uniform(-bound, bound,
(self.all_layer_neuron[i],
self.all_layer_neuron[i+1])))
self.bias.append(self.random_seed.uniform(-bound, bound, self.all_layer_neuron[i+1]))
class MLPClassifier(BaseMLP):
"""基于MLP的分类器,可用于2分类,多分类,多标签分类,根据训练集label自动识别
"""
def __init__(self, hidden_layer_sizes=(80,), hidden_layer_activation="logistic", opt_way="Adam",
alpha=0.0001, learning_rate_init=0.001, learning_rate_way="constant", max_iteration
=300, random_seed=None, shuffle=True, batch_size="auto", early_stop=False,
valid_ratio=0.2, momentum=0.9, nesterovs_momentum=True, beta_1=0.9, beta_2=0.999,
invscaling_pow_step=0.5, adaptive_min_decrease_threshold=1e-4, adaptive_epsilon=
1e-8, warm_start=False):
"""分类MLP的初始化,提供了默认参数,误差为“log_loss”
"""
super(MLPClassifier, self).__init__(hidden_layer_sizes=hidden_layer_sizes,
hidden_layer_activation=hidden_layer_activation,
opt_way=opt_way,
alpha=alpha,
learning_rate_init=learning_rate_init,
learning_rate_way=learning_rate_way,
max_iteration=max_iteration,
random_seed=random_seed,
shuffle=shuffle,
batch_size=batch_size,
early_stop=early_stop,
valid_ratio=valid_ratio,
momentum=momentum,
nesterovs_momentum=nesterovs_momentum,
beta_1=beta_1,
beta_2=beta_2,
invscaling_pow_step=invscaling_pow_step,
adaptive_epsilon=adaptive_epsilon,
adaptive_min_decrease_threshold=
adaptive_min_decrease_threshold,
warm_start=warm_start,
loss="log_loss")
def incremential_fit(self, X, Y, classes=None):
"""对于增量学习,第一批样本不一定覆盖所有类别(例如:多分类),因此需要传入classes参数
"""
# 检测是不是第一批样本
# 如果是
if not hasattr(self, "classes"):
possible_mission_type = judge_y_type(Y)
if possible_mission_type == 'unknow':
raise ValueError("incremential_fit:不支持的任务类型")
# 如果是多标签分类
if possible_mission_type == 'multilabel':
self.label_binarizer = preprocessing.LabelBinarizer()
self.label_binarizer.fit(Y)
self.classes = self.label_binarizer.classes_
self.mission_type = 'multilabel'
elif classes is None:
raise ValueError("分类任务(非多标签分类)中,第一次fit必须传入classes参数")
else:
self.classes = np.array(sorted(classes))
self.label_binarizer = preprocessing.LabelBinarizer()
self.label_binarizer.fit(classes)
if len(self.classes) <= 2:
self.mission_type = 'binary'
else:
self.mission_type = 'multiclass'
# 如果不是第一批
else:
if (self.mission_type == 'binary' or self.mission_type == 'multiclass') and\
classes is not None and np.array_equal(self.classes, np.array(sorted(classes))):
raise ValueError("分类任务(非多标签分类)中传入的classes参数和上次的不一致")
print "任务类型: %s, classes: %s" %(self.mission_type, self.classes)
return self._fit(X, Y, True)
def _valid_data(self, X, Y, incremential):
"""MLP分类器验证输入数据,这里借助sklearn中的LabelBinarizer函数
"""
n_samples, n_features = X.shape
n_y_samples = Y.shape[0]
if n_samples <> n_y_samples:
raise ValueError("特征和label个数不一致")
if not incremential:
# 在fit函数中已经已经做了相应处理
pass
#self.label_binarizer = preprocessing.LabelBinarizer()
#self.label_binarizer.fit(Y)
#self.classes = self.label_binarizer.classes_
else:
temp_binarizer = preprocessing.LabelBinarizer()
temp_binarizer.fit(Y)
if np.setdiff1d(temp_binarizer.classes_, self.classes):
if self.mission_type == "multilabel":
raise ValueError("本次classes和上次classes不一致, %s->%s" \
%(self.classes, temp_binarizer.classes_))
else:
raise ValueError("本次classes包含未识别的类别, %s->%s" \
%(self.classes, temp_binarizer.classes_))
Y = self.label_binarizer.transform(Y)
return X, Y
| apache-2.0 | -8,613,274,251,287,674,000 | 40.962756 | 100 | 0.555205 | false |
ivoire/ReactOBus | reactobus/core.py | 1 | 1772 | # -*- coding: utf-8 -*-
# vim: set ts=4
# Copyright 2016 Rémi Duraffort
# This file is part of ReactOBus.
#
# ReactOBus is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ReactOBus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with ReactOBus. If not, see <http://www.gnu.org/licenses/>
import logging
import multiprocessing
from setproctitle import setproctitle
import zmq
LOG = logging.getLogger("ROB.core")
class Core(multiprocessing.Process):
def __init__(self, inbound, outbound):
super().__init__()
self.inbound = inbound
self.outbound = outbound
def run(self):
setproctitle("ReactOBus [core]")
# Create the ZMQ context
self.context = zmq.Context.instance()
self.pull = self.context.socket(zmq.PULL)
LOG.debug("Binding inbound (%s)", self.inbound)
self.pull.bind(self.inbound)
self.pub = self.context.socket(zmq.PUB)
# Set 0 limit on input and output HWM
self.pub.setsockopt(zmq.SNDHWM, 0)
LOG.debug("Binding outbound (%s)", self.outbound)
self.pub.bind(self.outbound)
while True:
msg = self.pull.recv_multipart()
LOG.debug(msg)
# TODO: use a proxy
# Publish to all outputs
self.pub.send_multipart(msg)
| agpl-3.0 | -4,419,558,720,863,790,600 | 32.415094 | 77 | 0.671937 | false |
opencivicdata/scrapers-ca | disabled/ca_nl_municipalities/people.py | 1 | 3744 | from utils import CanadianScraper, CanadianPerson as Person
from pupa.scrape import Organization
import os
import re
import subprocess
from urllib.request import urlopen
COUNCIL_PAGE = 'http://www.ma.gov.nl.ca/ma/municipal_directory/index.html'
class NewfoundlandAndLabradorMunicipalitiesPersonScraper(CanadianScraper):
def scrape(self):
page = self.lxmlize(COUNCIL_PAGE)
url = page.xpath('//a[contains(text(),"Municipal Directory")]/@href')[0]
response = urlopen(url).read()
pdf = open('/tmp/nl.pdf', 'w')
pdf.write(response)
pdf.close()
data = subprocess.check_output(['pdftotext', '-layout', '/tmp/nl.pdf', '-'])
pages = data.split('Municipal Directory')[1:]
for page in pages:
page = page.splitlines(True)
column_index = {}
for line in page:
if 'Official Name' in line:
column_index['dist_end'] = re.search('Region', line).start()
column_index['name_start'] = re.search('Mayor', line).start() + 1
column_index['name_end'] = re.search('Clerk', line).start() - 1
column_index['phone_start'] = re.search('Line 1', line).start()
column_index['phone_end'] = re.search('Line 2', line).start() - 1
column_index['fax_start'] = re.search('Fax', line).start()
column_index['fax_end'] = re.search('E-mail', line).start() - 2
column_index['email_start'] = column_index['fax_end'] + 1
column_index['email_end'] = re.search('Address', line).start() - 1
column_index['address_start'] = column_index['email_end'] + 1
column_index['address_end'] = re.search('Days', line).start() - 1
break
for line in page:
if 'Official Name' in line or not line.strip():
continue
district = line[:column_index['dist_end']]
name = line[column_index['name_start']:column_index['name_end']].strip()
phone = line[column_index['phone_start']:column_index['phone_end']].strip().replace('(', '').replace(') ', '-')
# fax = line[column_index['fax_start']:column_index['fax_end']].strip().replace('(', '').replace(') ', '-')
email = line[column_index['email_start']:column_index['email_end']].strip()
address = line[column_index['address_start']:column_index['address_end']].strip()
address = re.sub(r'\s{2,}', ', ', address)
if not name or not district:
continue
org = Organization(name=district + ' Municipal Council', classification='legislature', jurisdiction_id=self.jurisdiction.jurisdiction_id)
org.add_source(COUNCIL_PAGE)
org.add_source(url)
yield org
p = Person(primary_org='legislature', name=name, district=district)
p.add_source(COUNCIL_PAGE)
p.add_source(url)
membership = p.add_membership(org, role='Mayor', district=district)
if phone:
membership.add_contact_detail('voice', phone, 'legislature')
# I'm excluding fax because that column isn't properly aligned
# if fax:
# membership.add_contact_detail('fax', fax)
if email:
membership.add_contact_detail('email', email)
if address:
membership.add_contact_detail('address', address, 'legislature')
yield p
os.system('rm /tmp/nl.pdf')
| mit | -4,509,376,508,700,123,000 | 50.287671 | 153 | 0.545139 | false |
bureaucratic-labs/pinkerton | pinkerton/tests/test_linker.py | 1 | 1153 | import pytest
from pinkerton.linker import EntityLinker
from pinkerton.extractor import EntityExtractor
from pinkerton.similarity import LDASimilarity
from pinkerton.providers import WikipediaProvider
@pytest.fixture
def history_text():
return '''
Иван Васильевич, царь всея руси, по историческим данным, был тираном
'''
@pytest.fixture
def linker():
return EntityLinker(
extractor=EntityExtractor(
api_base_url='https://natasha.b-labs.pro/api/'
),
comparator=LDASimilarity(),
providers=[
WikipediaProvider(),
],
)
@pytest.mark.asyncio
async def test_link_person(linker, history_text):
async for obj, entities in linker.process(history_text):
assert obj['fields']['firstname'] == 'Иван'
assert obj['fields']['middlename'] == 'Василиевич'
entity, score = entities[0]
assert entity['title'] == 'Иван Грозный'
assert entity['source'] == 'https://ru.wikipedia.org/wiki/%D0%98%D0%B2%D0%B0%D0%BD_%D0%93%D1%80%D0%BE%D0%B7%D0%BD%D1%8B%D0%B9'
| mit | -394,008,716,112,874,750 | 27.210526 | 134 | 0.663246 | false |
denniswjackson/embedded-tools | apollo/bin/stubFactory/stub_neom8.py | 1 | 1798 | import sys
from genStubs import *
stub = Stubs( "neom8", sys.argv[1], sys.argv[2] )
stub.include("NeoM8.h")
stub.newline()
stub.stubConstructor( "NeoM8", "const char * const PUartName", "m_config( )",
"m_uart( PUartName )",
"m_driverState( DRIVERSTATE_POWERON )",
"m_ubxParser( )",
"m_navPvtHandler( NULL )",
"m_navSatHandler( NULL )",
"m_navStatusHandler( NULL )",
"m_expectedAckAck( )",
"m_expectedAckAckReceived( false )",
"m_expectedCfgPrt( )",
"m_expectedCfgPrtReceived( false )" )
stub.stubFunction( ("NeoM8::Error_t","NeoM8::ERROR_NONE"), "NeoM8::init", "const NeoM8::SConfig * const" )
stub.stubFunction( ("void",), "NeoM8::setNavPvtHandler", "const NeoM8::NavPvtHandler_t" )
stub.stubFunction( ("void",), "NeoM8::setNavSatHandler", "const NeoM8::NavSatHandler_t" )
stub.stubFunction( ("void",), "NeoM8::setNavStatusHandler", "const NeoM8::NavStatusHandler_t" )
stub.stubFunction( ("void",), "NeoM8::hardReset" )
stub.stubFunction( ("NeoM8::Error_t","NeoM8::ERROR_NONE"), "NeoM8::softReset", "const Ubx::CfgRst::ResetMode_t", "const Ubx::CfgRst::NavBbrMask_t" )
stub.stubFunction( ("NeoM8::Error_t","NeoM8::ERROR_NONE"), "NeoM8::process", "const uint32_t" ) | mit | 3,529,628,094,997,150,700 | 68.192308 | 148 | 0.451613 | false |
trendelkampschroer/PyEMMA | pyemma/_base/progress/reporter.py | 1 | 4786 | '''
Created on 16.07.2015
@author: marscher
'''
from __future__ import absolute_import
from pyemma.util.types import is_int
from pyemma._base.progress.bar import ProgressBar as _ProgressBar
from pyemma._base.progress.bar import show_progressbar as _show_progressbar
class ProgressReporter(object):
""" Derive from this class to make some protected methods available to register
and update status of different stages of an algorithm.
"""
# Note: this class has intentionally no constructor, because it is more
# comfortable for the user of this class (who is then not in the need to call it).
@property
def progress_silence(self):
""" If set to True, no progress will be reported. Defaults to False."""
if not hasattr(self, '_prog_rep_silence'):
self._prog_rep_silence = False
return self._prog_rep_silence
@progress_silence.setter
def progress_silence(self, value):
setattr(self, '_prog_rep_silence', value)
def _progress_register(self, amount_of_work, description=None, stage=0):
""" Registers a progress which can be reported/displayed via a progress bar.
Parameters
----------
amount_of_work : int
Amount of steps the underlying algorithm has to perform.
description : str, optional
This string will be displayed in the progress bar widget.
stage : int, optional, default=0
If the algorithm has multiple different stages (eg. calculate means
in the first pass over the data, calculate covariances in the second),
one needs to estimate different times of arrival.
"""
if hasattr(self, '_prog_rep_silence') and self._prog_rep_silence:
return
# note this semantic makes it possible to use this class without calling
# its constructor.
if not hasattr(self, '_prog_rep_progressbars'):
self._prog_rep_progressbars = {}
if not is_int(amount_of_work):
raise ValueError("amount_of_work has to be of integer type. But is "
+ str(type(amount_of_work)))
# if stage in self._prog_rep_progressbars:
# import warnings
# warnings.warn("overriding progress for stage " + str(stage))
self._prog_rep_progressbars[stage] = _ProgressBar(
amount_of_work, description=description)
def register_progress_callback(self, call_back, stage=0):
""" Registers the progress reporter.
Parameters
----------
call_back : function
This function will be called with the following arguments:
1. stage (int)
2. instance of pyemma.utils.progressbar.ProgressBar
3. optional *args and named keywords (**kw), for future changes
stage: int, optional, default=0
The stage you want the given call back function to be fired.
"""
if hasattr(self, '_prog_rep_silence') and self._prog_rep_silence:
return
if not hasattr(self, '_callbacks'):
self._prog_rep_callbacks = {}
assert callable(call_back)
# check we have the desired function signature
import inspect
argspec = inspect.getargspec(call_back)
assert len(argspec.args) == 2
assert argspec.varargs is not None
assert argspec.keywords is not None
if stage not in self._prog_rep_callbacks:
self._prog_rep_callbacks[stage] = []
self._prog_rep_callbacks[stage].append(call_back)
def _progress_update(self, numerator_increment, stage=0):
""" Updates the progress. Will update progress bars or other progress output.
Parameters
----------
numerator : int
numerator of partial work done already in current stage
stage : int, nonnegative, default=0
Current stage of the algorithm, 0 or greater
"""
if hasattr(self, '_prog_rep_silence') and self._prog_rep_silence:
return
if stage not in self._prog_rep_progressbars:
raise RuntimeError(
"call _progress_register(amount_of_work, stage=x) on this instance first!")
pg = self._prog_rep_progressbars[stage]
pg.numerator += numerator_increment
_show_progressbar(pg)
if hasattr(self, '_prog_rep_callbacks'):
for callback in self._prog_rep_callbacks[stage]:
callback(stage, pg)
def _progress_force_finish(self, stage=0):
""" forcefully finish the progress for given stage """
pg = self._prog_rep_progressbars[stage]
pg.numerator = pg.denominator
pg._eta.eta_epoch = 0
_show_progressbar(pg)
| bsd-2-clause | -2,879,395,618,199,037,400 | 36.984127 | 91 | 0.624948 | false |
openstack/rally | rally/common/db/migrations/versions/2016_01_ca3626f62937_init_migration.py | 1 | 7920 | # Copyright (c) 2016 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Init migration
Revision ID: ca3626f62937
Revises:
Create Date: 2016-01-07 00:27:39.687814
"""
from alembic import op
import sqlalchemy as sa
from rally.common.db import api
from rally.common.db import sa_types
from rally import exceptions
# revision identifiers, used by Alembic.
revision = "ca3626f62937"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
dialect = api.get_engine().dialect
deployments_columns = [
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("uuid", sa.String(length=36), nullable=False),
sa.Column("parent_uuid", sa.String(length=36), nullable=True),
sa.Column("name", sa.String(length=255), nullable=True),
sa.Column("started_at", sa.DateTime(), nullable=True),
sa.Column("completed_at", sa.DateTime(), nullable=True),
sa.Column("config", sa_types.MutableJSONEncodedDict(), nullable=False),
sa.Column("admin", sa.PickleType(), nullable=True),
sa.Column("users", sa.PickleType(), nullable=False),
sa.Column("enum_deployments_status", sa.Enum(
"cleanup->failed", "cleanup->finished", "cleanup->started",
"deploy->failed", "deploy->finished", "deploy->inconsistent",
"deploy->init", "deploy->started", "deploy->subdeploy",
name="enum_deploy_status"), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name")
]
if dialect.name.startswith("sqlite"):
deployments_columns.append(
sa.ForeignKeyConstraint(
["parent_uuid"], [u"deployments.uuid"],
name="fk_parent_uuid", use_alter=True)
)
# commands auto generated by Alembic - please adjust!
op.create_table("deployments", *deployments_columns)
op.create_index("deployment_parent_uuid", "deployments",
["parent_uuid"], unique=False)
op.create_index("deployment_uuid", "deployments", ["uuid"], unique=True)
if not dialect.name.startswith("sqlite"):
op.create_foreign_key("fk_parent_uuid", "deployments", "deployments",
["parent_uuid"], ["uuid"])
op.create_table(
"workers",
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("hostname", sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("hostname", name="uniq_worker@hostname")
)
op.create_table(
"resources",
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("provider_name", sa.String(length=255), nullable=True),
sa.Column("type", sa.String(length=255), nullable=True),
sa.Column("info", sa_types.MutableJSONEncodedDict(), nullable=False),
sa.Column("deployment_uuid", sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(["deployment_uuid"], [u"deployments.uuid"]),
sa.PrimaryKeyConstraint("id")
)
op.create_index("resource_deployment_uuid", "resources",
["deployment_uuid"], unique=False)
op.create_index("resource_provider_name", "resources",
["deployment_uuid", "provider_name"], unique=False)
op.create_index("resource_provider_name_and_type", "resources",
["deployment_uuid", "provider_name", "type"],
unique=False)
op.create_index("resource_type", "resources",
["deployment_uuid", "type"], unique=False)
op.create_table(
"tasks",
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("uuid", sa.String(length=36), nullable=False),
sa.Column("status", sa.Enum(
"aborted", "aborting", "cleaning up", "failed", "finished",
"init", "paused", "running", "setting up", "soft_aborting",
"verifying", name="enum_tasks_status"), nullable=False),
sa.Column("verification_log", sa.Text(), nullable=True),
sa.Column("tag", sa.String(length=64), nullable=True),
sa.Column("deployment_uuid", sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(["deployment_uuid"], [u"deployments.uuid"], ),
sa.PrimaryKeyConstraint("id")
)
op.create_index("task_deployment", "tasks", ["deployment_uuid"],
unique=False)
op.create_index("task_status", "tasks", ["status"], unique=False)
op.create_index("task_uuid", "tasks", ["uuid"], unique=True)
op.create_table(
"verifications",
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("uuid", sa.String(length=36), nullable=False),
sa.Column("deployment_uuid", sa.String(length=36), nullable=False),
sa.Column("status", sa.Enum(
"aborted", "aborting", "cleaning up", "failed", "finished",
"init", "paused", "running", "setting up", "soft_aborting",
"verifying", name="enum_tasks_status"), nullable=False),
sa.Column("set_name", sa.String(length=20), nullable=True),
sa.Column("tests", sa.Integer(), nullable=True),
sa.Column("errors", sa.Integer(), nullable=True),
sa.Column("failures", sa.Integer(), nullable=True),
sa.Column("time", sa.Float(), nullable=True),
sa.ForeignKeyConstraint(["deployment_uuid"], [u"deployments.uuid"], ),
sa.PrimaryKeyConstraint("id")
)
op.create_index("verification_uuid", "verifications", ["uuid"],
unique=True)
op.create_table(
"task_results",
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("key", sa_types.MutableJSONEncodedDict(), nullable=False),
sa.Column("data", sa_types.MutableJSONEncodedDict(), nullable=False),
sa.Column("task_uuid", sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(["task_uuid"], ["tasks.uuid"], ),
sa.PrimaryKeyConstraint("id")
)
op.create_table(
"verification_results",
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("verification_uuid", sa.String(length=36), nullable=True),
sa.Column("data", sa_types.MutableJSONEncodedDict(), nullable=False),
sa.ForeignKeyConstraint(["verification_uuid"], ["verifications.uuid"]),
sa.PrimaryKeyConstraint("id")
)
# end Alembic commands
def downgrade():
raise exceptions.DowngradeNotSupported()
| apache-2.0 | 6,561,320,124,382,621,000 | 40.684211 | 79 | 0.62399 | false |
opesci/devito | devito/passes/iet/languages/openmp.py | 1 | 6258 | import cgen as c
from sympy import Not
from devito.arch import AMDGPUX, NVIDIAX
from devito.ir import (Block, Call, Conditional, List, Prodder, ParallelIteration,
ParallelBlock, PointerCast, While, FindNodes, Transformer)
from devito.mpi.routines import IrecvCall, IsendCall
from devito.passes.iet.definitions import DataManager, DeviceAwareDataManager
from devito.passes.iet.engine import iet_pass
from devito.passes.iet.orchestration import Orchestrator
from devito.passes.iet.parpragma import (PragmaSimdTransformer, PragmaShmTransformer,
PragmaDeviceAwareTransformer, PragmaLangBB)
from devito.passes.iet.languages.C import CBB
from devito.passes.iet.languages.utils import make_clause_reduction
from devito.symbolics import CondEq, DefFunction
__all__ = ['SimdOmpizer', 'Ompizer', 'OmpIteration', 'OmpRegion',
'DeviceOmpizer', 'DeviceOmpIteration', 'DeviceOmpDataManager',
'OmpDataManager', 'OmpOrchestrator']
class OmpRegion(ParallelBlock):
@classmethod
def _make_header(cls, nthreads, private=None):
private = ('private(%s)' % ','.join(private)) if private else ''
return c.Pragma('omp parallel num_threads(%s) %s' % (nthreads.name, private))
class OmpIteration(ParallelIteration):
@classmethod
def _make_construct(cls, parallel=False, **kwargs):
if parallel:
return 'omp parallel for'
else:
return 'omp for'
@classmethod
def _make_clauses(cls, ncollapse=None, chunk_size=None, nthreads=None,
reduction=None, schedule=None, **kwargs):
clauses = []
clauses.append('collapse(%d)' % (ncollapse or 1))
if chunk_size is not False:
clauses.append('schedule(%s,%s)' % (schedule or 'dynamic',
chunk_size or 1))
if nthreads:
clauses.append('num_threads(%s)' % nthreads)
if reduction:
clauses.append(make_clause_reduction(reduction))
return clauses
@classmethod
def _process_kwargs(cls, **kwargs):
kwargs = super()._process_kwargs(**kwargs)
kwargs.pop('schedule', None)
kwargs.pop('parallel', False)
kwargs.pop('chunk_size', None)
kwargs.pop('nthreads', None)
return kwargs
class DeviceOmpIteration(OmpIteration):
@classmethod
def _make_construct(cls, **kwargs):
return 'omp target teams distribute parallel for'
@classmethod
def _make_clauses(cls, **kwargs):
kwargs['chunk_size'] = False
return super()._make_clauses(**kwargs)
@classmethod
def _process_kwargs(cls, **kwargs):
kwargs = super()._process_kwargs(**kwargs)
kwargs.pop('gpu_fit', None)
return kwargs
class ThreadedProdder(Conditional, Prodder):
_traversable = []
def __init__(self, prodder):
# Atomic-ize any single-thread Prodders in the parallel tree
condition = CondEq(Ompizer.lang['thread-num'], 0)
# Prod within a while loop until all communications have completed
# In other words, the thread delegated to prodding is entrapped for as long
# as it's required
prod_until = Not(DefFunction(prodder.name, [i.name for i in prodder.arguments]))
then_body = List(header=c.Comment('Entrap thread until comms have completed'),
body=While(prod_until))
Conditional.__init__(self, condition, then_body)
Prodder.__init__(self, prodder.name, prodder.arguments, periodic=prodder.periodic)
class OmpBB(PragmaLangBB):
mapper = {
# Misc
'name': 'OpenMP',
'header': 'omp.h',
# Platform mapping
AMDGPUX: None,
NVIDIAX: None,
# Runtime library
'init': None,
'thread-num': DefFunction('omp_get_thread_num'),
'num-devices': lambda args:
DefFunction('omp_get_num_devices', args),
'set-device': lambda args:
Call('omp_set_default_device', args),
# Pragmas
'simd-for': c.Pragma('omp simd'),
'simd-for-aligned': lambda i, j: c.Pragma('omp simd aligned(%s:%d)' % (i, j)),
'atomic': c.Pragma('omp atomic update'),
'map-enter-to': lambda i, j:
c.Pragma('omp target enter data map(to: %s%s)' % (i, j)),
'map-enter-alloc': lambda i, j:
c.Pragma('omp target enter data map(alloc: %s%s)' % (i, j)),
'map-update': lambda i, j:
c.Pragma('omp target update from(%s%s)' % (i, j)),
'map-update-host': lambda i, j:
c.Pragma('omp target update from(%s%s)' % (i, j)),
'map-update-device': lambda i, j:
c.Pragma('omp target update to(%s%s)' % (i, j)),
'map-release': lambda i, j, k:
c.Pragma('omp target exit data map(release: %s%s)%s'
% (i, j, k)),
'map-exit-delete': lambda i, j, k:
c.Pragma('omp target exit data map(delete: %s%s)%s'
% (i, j, k)),
}
mapper.update(CBB.mapper)
Region = OmpRegion
HostIteration = OmpIteration
DeviceIteration = DeviceOmpIteration
Prodder = ThreadedProdder
class DeviceOmpBB(OmpBB):
# NOTE: Work around clang>=10 issue concerning offloading arrays declared
# with an `__attribute__(aligned(...))` qualifier
PointerCast = lambda *args: PointerCast(*args, alignment=False)
class SimdOmpizer(PragmaSimdTransformer):
lang = OmpBB
class Ompizer(PragmaShmTransformer):
lang = OmpBB
class DeviceOmpizer(PragmaDeviceAwareTransformer):
lang = DeviceOmpBB
@iet_pass
def make_gpudirect(self, iet):
mapper = {}
for node in FindNodes((IsendCall, IrecvCall)).visit(iet):
header = c.Pragma('omp target data use_device_ptr(%s)' %
node.arguments[0].name)
mapper[node] = Block(header=header, body=node)
iet = Transformer(mapper).visit(iet)
return iet, {}
class OmpDataManager(DataManager):
lang = OmpBB
class DeviceOmpDataManager(DeviceAwareDataManager):
lang = DeviceOmpBB
class OmpOrchestrator(Orchestrator):
lang = DeviceOmpBB
| mit | 9,140,172,263,593,566,000 | 31.092308 | 90 | 0.616012 | false |
cw-andrews/texools | travis_pypi_setup.py | 1 | 4077 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Update encrypted deploy password in Travis config file."""
from __future__ import print_function
import base64
import json
import os
from getpass import getpass
import yaml
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
GITHUB_REPO = 'cw-andrews/texools'
TRAVIS_CONFIG_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '.travis.yml')
def load_key(pubkey):
"""Load public RSA key.
Work around keys with incorrect header/footer format.
Read more about RSA encryption with cryptography:
https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/
"""
try:
return load_pem_public_key(pubkey.encode(), default_backend())
except ValueError:
# workaround for https://github.com/travis-ci/travis-api/issues/196
pubkey = pubkey.replace('BEGIN RSA', 'BEGIN').replace('END RSA', 'END')
return load_pem_public_key(pubkey.encode(), default_backend())
def encrypt(pubkey, password):
"""Encrypt password using given RSA public key and encode it with base64.
The encrypted password can only be decrypted by someone with the
private key (in this case, only Travis).
"""
key = load_key(pubkey)
encrypted_password = key.encrypt(password, PKCS1v15())
return base64.b64encode(encrypted_password)
def fetch_public_key(repo):
"""Download RSA public key Travis will use for this repo.
Travis API docs: http://docs.travis-ci.com/api/#repository-keys
"""
keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo)
data = json.loads(urlopen(keyurl).read().decode())
if 'key' not in data:
errmsg = "Could not find public key for repo: {}.\n".format(repo)
errmsg += "Have you already added your GitHub repo to Travis?"
raise ValueError(errmsg)
return data['key']
def prepend_line(filepath, line):
"""Rewrite a file adding a line to its beginning."""
with open(filepath) as f:
lines = f.readlines()
lines.insert(0, line)
with open(filepath, 'w') as f:
f.writelines(lines)
def load_yaml_config(filepath):
"""Load yaml config file at the given path."""
with open(filepath) as f:
return yaml.load(f)
def save_yaml_config(filepath, config):
"""Save yaml config file at the given path."""
with open(filepath, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
def update_travis_deploy_password(encrypted_password):
"""Put `encrypted_password` into the deploy section of .travis.yml."""
config = load_yaml_config(TRAVIS_CONFIG_FILE)
config['deploy']['password'] = dict(secure=encrypted_password)
save_yaml_config(TRAVIS_CONFIG_FILE, config)
line = ('# This file was autogenerated and will overwrite'
' each time you run travis_pypi_setup.py\n')
prepend_line(TRAVIS_CONFIG_FILE, line)
def main(args):
"""Add a PyPI password to .travis.yml so that Travis can deploy to PyPI.
Fetch the Travis public key for the repo, and encrypt the PyPI password
with it before adding, so that only Travis can decrypt and use the PyPI
password.
"""
public_key = fetch_public_key(args.repo)
password = args.password or getpass('PyPI password: ')
update_travis_deploy_password(encrypt(public_key, password.encode()))
print("Wrote encrypted password to .travis.yml -- you're ready to deploy")
if '__main__' == __name__:
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--repo', default=GITHUB_REPO,
help='GitHub repo (default: %s)' % GITHUB_REPO)
parser.add_argument('--password',
help='PyPI password (will prompt if not provided)')
args = parser.parse_args()
main(args)
| mit | -1,700,616,678,246,694,100 | 31.102362 | 79 | 0.680893 | false |
dantebarba/docker-media-server | plex/Sub-Zero.bundle/Contents/Code/support/scanning.py | 1 | 7590 | # coding=utf-8
import traceback
import helpers
from babelfish.exceptions import LanguageError
from support.lib import Plex, get_intent
from support.plex_media import get_stream_fps, is_stream_forced, update_stream_info
from support.storage import get_subtitle_storage
from support.config import config, TEXT_SUBTITLE_EXTS
from support.subtitlehelpers import get_subtitles_from_metadata
from subzero.video import parse_video, set_existing_languages
from subzero.language import language_from_stream, Language
def prepare_video(pms_video_info, ignore_all=False, hints=None, rating_key=None, providers=None, skip_hashing=False):
"""
returnes a subliminal/guessit-refined parsed video
:param pms_video_info:
:param ignore_all:
:param hints:
:param rating_key:
:return:
"""
embedded_subtitles = not ignore_all and Prefs['subtitles.scan.embedded']
external_subtitles = not ignore_all and Prefs['subtitles.scan.external']
plex_part = pms_video_info["plex_part"]
if ignore_all:
Log.Debug("Force refresh intended.")
Log.Debug("Detecting streams: %s, account_for_external_subtitles=%s, account_for_embedded_subtitles=%s" % (
plex_part.file, external_subtitles, embedded_subtitles))
known_embedded = []
parts = []
for media in list(Plex["library"].metadata(rating_key))[0].media:
parts += media.parts
plexpy_part = None
for part in parts:
if int(part.id) == int(plex_part.id):
plexpy_part = part
# embedded subtitles
# fixme: skip the whole scanning process if known_embedded == wanted languages?
audio_languages = []
if plexpy_part:
update_stream_info(plexpy_part)
for stream in plexpy_part.streams:
if stream.stream_type == 2:
lang = None
try:
lang = language_from_stream(stream.language_code)
except LanguageError:
Log.Info("Couldn't detect embedded audio stream language: %s", stream.language_code)
# treat unknown language as lang1?
if not lang and config.treat_und_as_first:
lang = Language.rebuild(list(config.lang_list)[0])
Log.Info("Assuming language %s for audio stream: %s", lang, getattr(stream, "index", None))
audio_languages.append(lang)
# subtitle stream
elif stream.stream_type == 3 and embedded_subtitles:
is_forced = is_stream_forced(stream)
if ((config.forced_only or config.forced_also) and is_forced) or not is_forced:
# embedded subtitle
# fixme: tap into external subtitles here instead of scanning for ourselves later?
if stream.codec and getattr(stream, "index", None):
if config.exotic_ext or stream.codec.lower() in config.text_based_formats:
lang = None
try:
lang = language_from_stream(stream.language_code)
except LanguageError:
Log.Info("Couldn't detect embedded subtitle stream language: %s", stream.language_code)
# treat unknown language as lang1?
if not lang and config.treat_und_as_first:
lang = Language.rebuild(list(config.lang_list)[0])
Log.Info("Assuming language %s for subtitle stream: %s", lang,
getattr(stream, "index", None))
if lang:
if is_forced:
lang.forced = True
known_embedded.append(lang)
else:
Log.Warn("Part %s missing of %s, not able to scan internal streams", plex_part.id, rating_key)
# metadata subtitles
known_metadata_subs = set()
meta_subs = get_subtitles_from_metadata(plex_part)
for language, subList in meta_subs.iteritems():
try:
lang = Language.fromietf(Locale.Language.Match(language))
except LanguageError:
if config.treat_und_as_first:
lang = Language.rebuild(list(config.lang_list)[0])
else:
continue
if subList:
for key in subList:
if key.startswith("subzero_md_forced"):
lang = Language.rebuild(lang, forced=True)
known_metadata_subs.add(lang)
Log.Debug("Found metadata subtitle %r:%s for %s", lang, key, plex_part.file)
Log.Debug("Known metadata subtitles: %r", known_metadata_subs)
Log.Debug("Known embedded subtitles: %r", known_embedded)
subtitle_storage = get_subtitle_storage()
stored_subs = subtitle_storage.load(rating_key)
subtitle_storage.destroy()
try:
# get basic video info scan (filename)
video = parse_video(plex_part.file, hints, skip_hashing=config.low_impact_mode or skip_hashing,
providers=providers)
# set stream languages
if audio_languages:
video.audio_languages = audio_languages
Log.Info("Found audio streams: %s" % ", ".join([str(l) for l in audio_languages]))
if not ignore_all:
set_existing_languages(video, pms_video_info, external_subtitles=external_subtitles,
embedded_subtitles=embedded_subtitles, known_embedded=known_embedded,
stored_subs=stored_subs, languages=config.lang_list,
only_one=config.only_one, known_metadata_subs=known_metadata_subs,
match_strictness=config.ext_match_strictness)
# add video fps info
video.fps = plex_part.fps
return video
except ValueError:
Log.Warn("File could not be guessed: %s: %s", plex_part.file, traceback.format_exc())
def scan_videos(videos, ignore_all=False, providers=None, skip_hashing=False):
"""
receives a list of videos containing dictionaries returned by media_to_videos
:param videos:
:param kind: series or movies
:return: dictionary of subliminal.video.scan_video, key=subliminal scanned video, value=plex file part
"""
ret = {}
for video in videos:
intent = get_intent()
force_refresh = intent.get("force", video["id"], video["series_id"], video["season_id"])
Log.Debug("Determining force-refresh (video: %s, series: %s, season: %s), result: %s"
% (video["id"], video["series_id"], video["season_id"], force_refresh))
hints = helpers.get_item_hints(video)
video["plex_part"].fps = get_stream_fps(video["plex_part"].streams)
p = providers or config.get_providers(media_type="series" if video["type"] == "episode" else "movies")
scanned_video = prepare_video(video, ignore_all=force_refresh or ignore_all, hints=hints,
rating_key=video["id"], providers=p,
skip_hashing=skip_hashing)
if not scanned_video:
continue
scanned_video.id = video["id"]
part_metadata = video.copy()
del part_metadata["plex_part"]
scanned_video.plexapi_metadata = part_metadata
scanned_video.ignore_all = force_refresh
ret[scanned_video] = video["plex_part"]
return ret
| gpl-3.0 | 5,807,542,513,225,596,000 | 42.371429 | 119 | 0.592754 | false |
retr0h/ansible | test/TestRunner.py | 1 | 25293 |
# tests are fairly 'live' (but safe to run)
# setup authorized_keys for logged in user such
# that the user can log in as themselves before running tests
import unittest
import getpass
import ansible.runner
import os
import shutil
import time
import tempfile
from nose.plugins.skip import SkipTest
def get_binary(name):
for directory in os.environ["PATH"].split(os.pathsep):
path = os.path.join(directory, name)
if os.path.isfile(path) and os.access(path, os.X_OK):
return path
return None
class TestRunner(unittest.TestCase):
def setUp(self):
self.user = getpass.getuser()
self.runner = ansible.runner.Runner(
basedir='test/',
module_name='ping',
module_path='library/',
module_args='',
remote_user=self.user,
remote_pass=None,
host_list='test/ansible_hosts',
timeout=5,
forks=1,
background=0,
pattern='all',
transport='local',
)
self.cwd = os.getcwd()
self.test_dir = os.path.join(self.cwd, 'test')
self.stage_dir = self._prepare_stage_dir()
def _prepare_stage_dir(self):
stage_path = os.path.join(self.test_dir, 'test_data')
if os.path.exists(stage_path):
shutil.rmtree(stage_path, ignore_errors=False)
assert not os.path.exists(stage_path)
os.makedirs(stage_path)
assert os.path.exists(stage_path)
return stage_path
def _get_test_file(self, filename):
# get a file inside the test input directory
filename = os.path.join(self.test_dir, filename)
assert os.path.exists(filename)
return filename
def _get_stage_file(self, filename):
# get a file inside the test output directory
filename = os.path.join(self.stage_dir, filename)
return filename
def _run(self, module_name, module_args, background=0, check_mode=False):
''' run a module and get the localhost results '''
self.runner.module_name = module_name
args = ' '.join(module_args)
self.runner.module_args = args
self.runner.background = background
self.runner.check = check_mode
results = self.runner.run()
# when using nosetests this will only show up on failure
# which is pretty useful
assert "localhost" in results['contacted']
return results['contacted']['localhost']
def test_action_plugins(self):
result = self._run("uncategorized_plugin", [])
assert result.get("msg") == "uncategorized"
result = self._run("categorized_plugin", [])
assert result.get("msg") == "categorized"
def test_ping(self):
result = self._run('ping', [])
assert "ping" in result
def test_facter(self):
if not get_binary("facter"):
raise SkipTest
result = self._run('facter', [])
assert "hostname" in result
# temporarily disbabled since it occasionally hangs
# ohai's fault, setup module doesn't actually run this
# to get ohai's "facts" anyway
#
#def test_ohai(self):
# if not get_binary("facter"):
# raise SkipTest
# result = self._run('ohai',[])
# assert "hostname" in result
def test_copy(self):
# test copy module, change trigger, etc
input_ = self._get_test_file('sample.j2')
output = self._get_stage_file('sample.out')
assert not os.path.exists(output)
result = self._run('copy', [
"src=%s" % input_,
"dest=%s" % output,
])
assert os.path.exists(output)
data_in = file(input_).read()
data_out = file(output).read()
assert data_in == data_out
assert 'failed' not in result
assert result['changed'] is True
assert 'md5sum' in result
result = self._run('copy', [
"src=%s" % input_,
"dest=%s" % output,
])
assert result['changed'] is False
with open(output, "a") as output_stream:
output_stream.write("output file now differs from input")
result = self._run('copy',
["src=%s" % input_, "dest=%s" % output, "force=no"],
check_mode=True)
assert result['changed'] is False
def test_command(self):
# test command module, change trigger, etc
result = self._run('command', ["/bin/echo", "hi"])
assert "failed" not in result
assert "msg" not in result
assert result['rc'] == 0
assert result['stdout'] == 'hi'
assert result['stderr'] == ''
result = self._run('command', ["false"])
assert result['rc'] == 1
assert 'failed' not in result
result = self._run('command', ["/usr/bin/this_does_not_exist", "splat"])
assert 'msg' in result
assert 'failed' in result
result = self._run('shell', ["/bin/echo", "$HOME"])
assert 'failed' not in result
assert result['rc'] == 0
result = self._run('command', ["creates='/tmp/ansible command test'", "chdir=/tmp", "touch", "'ansible command test'"])
assert 'changed' in result
assert result['rc'] == 0
result = self._run('command', ["creates='/tmp/ansible command test'", "false"])
assert 'skipped' in result
result = self._run('shell', ["removes=/tmp/ansible\\ command\\ test", "chdir=/tmp", "rm -f 'ansible command test'; echo $?"])
assert 'changed' in result
assert result['rc'] == 0
assert result['stdout'] == '0'
result = self._run('shell', ["removes=/tmp/ansible\\ command\\ test", "false"])
assert 'skipped' in result
def test_git(self):
self._run('file', ['path=/tmp/gitdemo', 'state=absent'])
self._run('file', ['path=/tmp/gd', 'state=absent'])
self._run('command', ['git init gitdemo', 'chdir=/tmp'])
self._run('command', ['touch a', 'chdir=/tmp/gitdemo'])
self._run('command', ['git add *', 'chdir=/tmp/gitdemo'])
self._run('command', ['git commit -m "test commit 2"', 'chdir=/tmp/gitdemo'])
self._run('command', ['touch b', 'chdir=/tmp/gitdemo'])
self._run('command', ['git add *', 'chdir=/tmp/gitdemo'])
self._run('command', ['git commit -m "test commit 2"', 'chdir=/tmp/gitdemo'])
result = self._run('git', ["repo=\"file:///tmp/gitdemo\"", "dest=/tmp/gd"])
assert result['changed']
# test the force option not set
self._run('file', ['path=/tmp/gd/a', 'state=absent'])
result = self._run('git', ["repo=\"file:///tmp/gitdemo\"", "dest=/tmp/gd", "force=no"])
assert result['failed']
# test the force option when set
result = self._run('git', ["repo=\"file:///tmp/gitdemo\"", "dest=/tmp/gd", "force=yes"])
assert result['changed']
def test_file(self):
filedemo = tempfile.mkstemp()[1]
assert self._run('file', ['dest=' + filedemo, 'state=directory'])['failed']
assert os.path.isfile(filedemo)
assert self._run('file', ['dest=' + filedemo, 'src=/dev/null', 'state=link'])['failed']
assert os.path.isfile(filedemo)
res = self._run('file', ['dest=' + filedemo, 'mode=604', 'state=file'])
assert res['changed']
assert os.path.isfile(filedemo) and os.stat(filedemo).st_mode == 0100604
assert self._run('file', ['dest=' + filedemo, 'state=absent'])['changed']
assert not os.path.exists(filedemo)
assert not self._run('file', ['dest=' + filedemo, 'state=absent'])['changed']
filedemo = tempfile.mkdtemp()
assert self._run('file', ['dest=' + filedemo, 'state=file'])['failed']
assert os.path.isdir(filedemo)
# this used to fail but will now make a 'null' symlink in the directory pointing to dev/null.
# I feel this is ok but don't want to enforce it with a test.
#result = self._run('file', ['dest=' + filedemo, 'src=/dev/null', 'state=link'])
#assert result['failed']
#assert os.path.isdir(filedemo)
assert self._run('file', ['dest=' + filedemo, 'mode=701', 'state=directory'])['changed']
assert os.path.isdir(filedemo) and os.stat(filedemo).st_mode == 040701
assert self._run('file', ['dest=' + filedemo, 'state=absent'])['changed']
assert not os.path.exists(filedemo)
assert not self._run('file', ['dest=' + filedemo, 'state=absent'])['changed']
tmp_dir = tempfile.mkdtemp()
filedemo = os.path.join(tmp_dir, 'link')
os.symlink('/dev/zero', filedemo)
assert self._run('file', ['dest=' + filedemo, 'state=file'])['failed']
assert os.path.islink(filedemo)
assert self._run('file', ['dest=' + filedemo, 'state=directory'])['failed']
assert os.path.islink(filedemo)
assert self._run('file', ['dest=' + filedemo, 'src=/dev/null', 'state=link'])['changed']
assert os.path.islink(filedemo) and os.path.realpath(filedemo) == '/dev/null'
assert self._run('file', ['dest=' + filedemo, 'state=absent'])['changed']
assert not os.path.exists(filedemo)
assert not self._run('file', ['dest=' + filedemo, 'state=absent'])['changed']
# Make sure that we can deal safely with bad symlinks
os.symlink('/tmp/non_existent_target', filedemo)
assert self._run('file', ['dest=' + tmp_dir, 'state=directory recurse=yes mode=701'])['changed']
assert not self._run('file', ['dest=' + tmp_dir, 'state=directory', 'recurse=yes', 'owner=' + str(os.getuid())])['changed']
assert os.path.islink(filedemo)
assert self._run('file', ['dest=' + filedemo, 'state=absent'])['changed']
assert not os.path.exists(filedemo)
os.rmdir(tmp_dir)
def test_large_output(self):
large_path = "/usr/share/dict/words"
if not os.path.exists(large_path):
large_path = "/usr/share/dict/cracklib-small"
if not os.path.exists(large_path):
raise SkipTest
# Ensure reading a large amount of output from a command doesn't hang.
result = self._run('command', ["/bin/cat", large_path])
assert "failed" not in result
assert "msg" not in result
assert result['rc'] == 0
assert len(result['stdout']) > 100000
assert result['stderr'] == ''
def test_async(self):
# test async launch and job status
# of any particular module
result = self._run('command', [get_binary("sleep"), "3"], background=20)
assert 'ansible_job_id' in result
assert 'started' in result
jid = result['ansible_job_id']
# no real chance of this op taking a while, but whatever
time.sleep(5)
# CLI will abstract this (when polling), but this is how it works internally
result = self._run('async_status', ["jid=%s" % jid])
# TODO: would be nice to have tests for supervisory process
# killing job after X seconds
assert 'finished' in result
assert 'failed' not in result
assert 'rc' in result
assert 'stdout' in result
assert result['ansible_job_id'] == jid
def test_fetch(self):
input_ = self._get_test_file('sample.j2')
output = os.path.join(self.stage_dir, 'localhost', input_)
self._run('fetch', ["src=%s" % input_, "dest=%s" % self.stage_dir])
assert os.path.exists(output)
assert open(input_).read() == open(output).read()
def test_assemble(self):
input = self._get_test_file('assemble.d')
output = self._get_stage_file('sample.out')
result = self._run('assemble', [
"src=%s" % input,
"dest=%s" % output,
])
assert os.path.exists(output)
out = file(output).read()
assert out.find("first") != -1
assert out.find("second") != -1
assert out.find("third") != -1
assert result['changed'] is True
assert 'md5sum' in result
assert 'failed' not in result
result = self._run('assemble', [
"src=%s" % input,
"dest=%s" % output,
])
assert result['changed'] is False
def test_lineinfile(self):
# Unit tests for the lineinfile module, without backref features.
sampleroot = 'rocannon'
sample_origin = self._get_test_file(sampleroot + '.txt')
sample = self._get_stage_file(sampleroot + '.out' + '.txt')
shutil.copy(sample_origin, sample)
# The order of the test cases is important
# defaults to insertafter at the end of the file
testline = 'First: Line added by default at the end of the file.'
testcase = ('lineinfile', [
"dest=%s" % sample,
"regexp='^First: '",
"line='%s'" % testline
])
result = self._run(*testcase)
assert result['changed']
assert result['msg'] == 'line added'
artifact = [x.strip() for x in open(sample)]
assert artifact[-1] == testline
assert artifact.count(testline) == 1
# run a second time, verify only one line has been added
result = self._run(*testcase)
assert not result['changed']
assert result['msg'] == ''
artifact = [x.strip() for x in open(sample)]
assert artifact.count(testline) == 1
# insertafter with EOF
testline = 'Second: Line added with insertafter=EOF'
testcase = ('lineinfile', [
"dest=%s" % sample,
"insertafter=EOF",
"regexp='^Second: '",
"line='%s'" % testline
])
result = self._run(*testcase)
assert result['changed']
assert result['msg'] == 'line added'
artifact = [x.strip() for x in open(sample)]
assert artifact[-1] == testline
assert artifact.count(testline) == 1
# with invalid insertafter regex
# If the regexp doesn't match and the insertafter doesn't match,
# do nothing.
testline = 'Third: Line added with an invalid insertafter regex'
testcase = ('lineinfile', [
"dest=%s" % sample,
"insertafter='^abcdefgh'",
"regexp='^Third: '",
"line='%s'" % testline
])
result = self._run(*testcase)
assert not result['changed']
# with an insertafter regex
# The regexp doesn't match, but the insertafter is specified and does,
# so insert after insertafter.
testline = 'Fourth: Line added with a valid insertafter regex'
testcase = ('lineinfile', [
"dest=%s" % sample,
"insertafter='^receive messages to '",
"regexp='^Fourth: '",
"line='%s'" % testline
])
result = self._run(*testcase)
assert result['changed']
assert result['msg'] == 'line added'
artifact = [x.strip() for x in open(sample)]
assert artifact.count(testline) == 1
idx = artifact.index('receive messages to and from a corresponding device over any distance')
assert artifact[idx + 1] == testline
# replacement of a line from a regex
# we replace the line, so we need to get its idx before the run
artifact = [x.strip() for x in open(sample)]
target_line = 'combination of microphone, speaker, keyboard and display. It can send and'
idx = artifact.index(target_line)
testline = 'Fith: replacement of a line: combination of microphone'
testcase = ('lineinfile', [
"dest=%s" % sample,
"regexp='combination of microphone'",
"line='%s'" % testline
])
result = self._run(*testcase)
assert result['changed']
assert result['msg'] == 'line replaced'
artifact = [x.strip() for x in open(sample)]
assert artifact.count(testline) == 1
assert artifact.index(testline) == idx
assert target_line not in artifact
# removal of a line
# we replace the line, so we need to get its idx before the run
artifact = [x.strip() for x in open(sample)]
target_line = 'receive messages to and from a corresponding device over any distance'
idx = artifact.index(target_line)
testcase = ('lineinfile', [
"dest=%s" % sample,
"regexp='^receive messages to and from '",
"state=absent"
])
result = self._run(*testcase)
assert result['changed']
artifact = [x.strip() for x in open(sample)]
assert target_line not in artifact
# with both insertafter and insertbefore (should fail)
testline = 'Seventh: this line should not be there'
testcase = ('lineinfile', [
"dest=%s" % sample,
"insertafter='BOF'",
"insertbefore='BOF'",
"regexp='^communication. '",
"line='%s'" % testline
])
result = self._run(*testcase)
assert result['failed']
# insertbefore with BOF
testline = 'Eighth: insertbefore BOF'
testcase = ('lineinfile', [
"dest=%s" % sample,
"insertbefore=BOF",
"regexp='^Eighth: '",
"line='%s'" % testline
])
result = self._run(*testcase)
assert result['changed']
assert result['msg'] == 'line added'
artifact = [x.strip() for x in open(sample)]
assert artifact.count(testline) == 1
assert artifact[0] == testline
# insertbefore with regex
testline = 'Ninth: insertbefore with a regex'
testcase = ('lineinfile', [
"dest=%s" % sample,
"insertbefore='^communication. Typically '",
"regexp='^Ninth: '",
"line='%s'" % testline
])
result = self._run(*testcase)
assert result['changed']
assert result['msg'] == 'line added'
artifact = [x.strip() for x in open(sample)]
assert artifact.count(testline) == 1
idx = artifact.index('communication. Typically it is depicted as a lunch-box sized object with some')
assert artifact[idx - 1] == testline
# Testing validate
testline = 'Tenth: Testing with validate'
testcase = ('lineinfile', [
"dest=%s" % sample,
"regexp='^Tenth: '",
"line='%s'" % testline,
"validate='grep -q Tenth %s'",
])
result = self._run(*testcase)
assert result['changed'], "File wasn't changed when it should have been"
assert result['msg'] == 'line added', "msg was incorrect"
artifact = [ x.strip() for x in open(sample) ]
assert artifact[-1] == testline
# Testing validate
testline = '#11: Testing with validate'
testcase = ('lineinfile', [
"dest=%s" % sample,
"regexp='^#11: '",
"line='%s'" % testline,
"validate='grep -q #12# %s'",
])
result = self._run(*testcase)
assert result['failed']
# cleanup
os.unlink(sample)
def test_lineinfile_backrefs(self):
# Unit tests for the lineinfile module, with backref features.
sampleroot = 'rocannon'
sample_origin = self._get_test_file(sampleroot + '.txt')
origin_lines = [line.strip() for line in open(sample_origin)]
sample = self._get_stage_file(sampleroot + '.out' + '.txt')
shutil.copy(sample_origin, sample)
# The order of the test cases is important
# The regexp doesn't match, so the line will not be added anywhere.
testline = r'\1: Line added by default at the end of the file.'
testcase = ('lineinfile', [
"dest=%s" % sample,
"regexp='^(First): '",
"line='%s'" % testline,
"backrefs=yes",
])
result = self._run(*testcase)
assert not result['changed']
assert result['msg'] == ''
artifact = [x.strip() for x in open(sample)]
assert artifact == origin_lines
# insertafter with EOF
# The regexp doesn't match, so the line will not be added anywhere.
testline = r'\1: Line added with insertafter=EOF'
testcase = ('lineinfile', [
"dest=%s" % sample,
"insertafter=EOF",
"regexp='^(Second): '",
"line='%s'" % testline,
"backrefs=yes",
])
result = self._run(*testcase)
assert not result['changed']
assert result['msg'] == ''
artifact = [x.strip() for x in open(sample)]
assert artifact == origin_lines
# with invalid insertafter regex
# The regexp doesn't match, so do nothing.
testline = r'\1: Line added with an invalid insertafter regex'
testcase = ('lineinfile', [
"dest=%s" % sample,
"insertafter='^abcdefgh'",
"regexp='^(Third): '",
"line='%s'" % testline,
"backrefs=yes",
])
result = self._run(*testcase)
assert not result['changed']
assert artifact == origin_lines
# with an insertafter regex
# The regexp doesn't match, so do nothing.
testline = r'\1: Line added with a valid insertafter regex'
testcase = ('lineinfile', [
"dest=%s" % sample,
"insertafter='^receive messages to '",
"regexp='^(Fourth): '",
"line='%s'" % testline,
"backrefs=yes",
])
result = self._run(*testcase)
assert not result['changed']
assert result['msg'] == ''
assert artifact == origin_lines
# replacement of a line from a regex
# we replace the line, so we need to get its idx before the run
artifact = [x.strip() for x in open(sample)]
target_line = 'combination of microphone, speaker, keyboard and display. It can send and'
idx = artifact.index(target_line)
testline = r'\1 of megaphone'
testline_after = 'combination of megaphone'
testcase = ('lineinfile', [
"dest=%s" % sample,
"regexp='(combination) of microphone'",
"line='%s'" % testline,
"backrefs=yes",
])
result = self._run(*testcase)
assert result['changed']
assert result['msg'] == 'line replaced'
artifact = [x.strip() for x in open(sample)]
assert artifact.count(testline_after) == 1
assert artifact.index(testline_after) == idx
assert target_line not in artifact
# Go again, should be unchanged now.
testline = r'\1 of megaphone'
testline_after = 'combination of megaphone'
testcase = ('lineinfile', [
"dest=%s" % sample,
"regexp='(combination) of megaphone'",
"line='%s'" % testline,
"backrefs=yes",
])
result = self._run(*testcase)
assert not result['changed']
assert result['msg'] == ''
# Try a numeric, named capture group example.
f = open(sample, 'a+')
f.write("1 + 1 = 3" + os.linesep)
f.close()
testline = r"2 + \g<num> = 3"
testline_after = "2 + 1 = 3"
testcase = ('lineinfile', [
"dest=%s" % sample,
r"regexp='1 \+ (?P<num>\d) = 3'",
"line='%s'" % testline,
"backrefs=yes",
])
result = self._run(*testcase)
artifact = [x.strip() for x in open(sample)]
assert result['changed']
assert result['msg'] == 'line replaced'
artifact = [x.strip() for x in open(sample)]
assert '1 + 1 = 3' not in artifact
assert testline_after == artifact[-1]
# with both insertafter and insertbefore (should fail)
testline = 'Seventh: this line should not be there'
testcase = ('lineinfile', [
"dest=%s" % sample,
"insertafter='BOF'",
"insertbefore='BOF'",
"regexp='^communication. '",
"line='%s'" % testline
])
result = self._run(*testcase)
assert result['failed']
os.unlink(sample)
| gpl-3.0 | 2,774,080,525,903,675,400 | 39.339713 | 133 | 0.539675 | false |
seecr/weightless-core | setup.py | 1 | 2970 | #!/usr/bin/env python
## begin license ##
#
# "Weightless" is a High Performance Asynchronous Networking Library. See http://weightless.io
#
# Copyright (C) 2006-2011 Seek You Too (CQ2) http://www.cq2.nl
# Copyright (C) 2011-2012, 2015, 2020-2021 Seecr (Seek You Too B.V.) https://seecr.nl
#
# This file is part of "Weightless"
#
# "Weightless" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Weightless" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Weightless"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
from distutils.core import setup
from distutils.extension import Extension
from os import walk
version = '$Version: 0.9.x$'[9:-1].strip()
packages = []
for path, dirs, files in walk('weightless'):
if '__init__.py' in files:
packagename = path.replace('/', '.')
packages.append(packagename)
setup(
name='weightless-core',
version=version,
packages=packages,
url='http://www.weightless.io',
author='Erik J. Groeneveld',
author_email='[email protected]',
description='Weightless data-processing with coroutines',
long_description="""
Weightless presents a way to implement data-processing programs, such as web-servers, with coroutines in Python. The results are lightweight, efficient and readable programs without call-backs, threads and buffering. Weightless supports:
1. decomposing programs into coroutines using compose
2. creating pipelines using the observer pattern
3. connecting file descriptors (sockets etc) to pipelines using gio
""",
license='GNU Public License',
platforms=['cpython'],
ext_modules=[
Extension("weightless.core.ext", sources=[
"weightless/core/_core.c",
"weightless/core/_compose.c",
"weightless/core/_observable.c"
],
extra_compile_args=["-g3", "-O0"],
)
],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: Communications',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: HTTP Servers',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Text Processing'
],
)
| gpl-2.0 | -3,168,034,106,615,879,000 | 36.125 | 237 | 0.676768 | false |
saltastro/salt-data-quality-site | fabfile.py | 1 | 11681 | import os
import subprocess
import sys
import tempfile
import time
from fabric.api import local, env, run, sudo
from config import Config
# environment variable prefix
prefix = Config.environment_variable_prefix()
# disable logging (as otherwise we would have to use the production setting for the log file location)
os.environ[prefix + 'WITH_LOGGING'] = '0'
# get variables
settings = Config.settings('production')
host = os.environ[prefix + 'DEPLOY_HOST']
deploy_user = os.environ.get(prefix + 'DEPLOY_USER', 'deploy')
deploy_user_group = os.environ.get(prefix + 'DEPLOY_USER_GROUP', deploy_user)
repository = os.environ[prefix + 'DEPLOY_GIT_REPOSITORY']
app_dir_name = os.environ[prefix + 'DEPLOY_APP_DIR_NAME']
web_user = os.environ.get(prefix + 'DEPLOY_WEB_USER', 'www-data')
web_user_group = os.environ.get(prefix + 'DEPLOY_WEB_USER_GROUP', 'www-data')
domain_name = os.environ.get(prefix + 'DEPLOY_DOMAIN_NAME', host)
bokeh_server_port = os.environ.get(prefix + 'DEPLOY_BOKEH_SERVER_PORT', 5100)
migration_tool = settings['migration_tool']
migration_sql_dir = settings['migration_sql_dir']
site_dir = '$HOME/' + app_dir_name
env.hosts = ['{username}@{host}'.format(username=deploy_user, host=host)]
def migrate_database():
if migration_tool == 'None':
return
elif migration_tool == 'Flyway':
root_dir = os.path.abspath(os.path.join(__file__, os.pardir))
local('export FLASK_APP=site_app.py; FLASK_CONFIG=production; {root_dir}/venv/bin/flask flyway'\
.format(root_dir=root_dir))
elif migration_tool == 'Flask-Migrate':
local('export FLASK_APP=site_app.py; FLASK_CONFIG=production; venv/bin/flask db upgrade -d {sql_dir}'
.format(sql_dir=migration_sql_dir))
else:
print('Unknown database migration tool: {tool}'.format(tool=migration_tool))
sys.exit(1)
def upgrade_libs():
sudo('apt-get update')
sudo('apt-get upgrade')
def update_supervisor():
# Python files to load into the Bokeh server
files = [f for f in os.listdir('bokeh_server') if f.lower().endswith('.py')]
# replace placeholders
sed = 'sed -e" s=---SITE_PATH---={site_dir}=g"' \
' -e "s=---WEB_USER---={web_user}=g"' \
' -e "s=---HOST---={host}=g"' \
' -e "s=---BOKEH_SERVER_PORT---={bokeh_server_port}=g"' \
' -e "s=---FILES---={files}=g"' \
' {site_dir}/supervisor.conf '.format(
site_dir=site_dir,
web_user=web_user,
host=domain_name,
bokeh_server_port=bokeh_server_port,
files=' '.join(files))
sudo('{sed} > /etc/supervisor/conf.d/{domain_name}.conf'.format(
sed=sed,
domain_name=domain_name))
sudo('service supervisor restart')
def update_nginx_conf():
static_dir = site_dir + '/app/static'
sudo('sed s=---DOMAIN_NAME---={domain_name}= {site_dir}/nginx.conf | sed s=---STATIC_DIR---={static_dir}= '
'> /etc/nginx/sites-available/{domain_name}'.format(domain_name=domain_name,
site_dir=site_dir,
static_dir=static_dir))
sudo('ln -sf /etc/nginx/sites-available/{domain_name} /etc/nginx/sites-enabled/{domain_name}'.format(
domain_name=domain_name))
sudo('service nginx restart')
def update_environment_variables_file():
"""Update the environment variables file on the server.
The file is rendered readable to the deploy user and the web user.
"""
settings = Config.settings('production')
environment_variables = dict(
ELS_DATABASE_URI=settings['database_uris']['els'],
SDB_DATABASE_URI=settings['database_uris']['sdb'],
SUTHWEATHER_DATABASE_URI=settings['database_uris']['suthweather'],
LOGGING_FILE_BASE_PATH=settings['logging_file_base_path'],
LOGGING_FILE_LOGGING_LEVEL=settings['logging_file_logging_level_name'],
LOGGING_FILE_MAX_BYTES=settings['logging_file_max_bytes'],
LOGGING_FILE_BACKUP_COUNT=settings['logging_file_backup_count'],
LOGGING_MAIL_FROM_ADDRESS=settings['logging_mail_from_address'],
LOGGING_MAIL_LOGGING_LEVEL=settings['logging_mail_logging_level_name'],
LOGGING_MAIL_SUBJECT=settings['logging_mail_subject'],
LOGGING_MAIL_TO_ADDRESSES=settings['logging_mail_to_addresses'],
SECRET_KEY=settings['secret_key'],
SSL_STATUS=settings['ssl_status']
)
file_content = ''
keys = sorted(environment_variables.keys())
for key in keys:
file_content += '{prefix}{key}={value}\n'.format(prefix=prefix, key=key, value=environment_variables[key])
with tempfile.NamedTemporaryFile('w') as f:
f.write(file_content)
f.flush()
tmp_env_file = '/tmp/.env.{timestamp}'.format(timestamp=time.time())
local('scp {path} {username}@{host}:{tmp_env_file}'.format(username=deploy_user,
host=host,
path=f.name,
site_dir=site_dir,
tmp_env_file=tmp_env_file))
env_file = '{site_dir}/.env'.format(site_dir=site_dir)
run('mv {tmp_env_file} {env_file}'.format(tmp_env_file=tmp_env_file, env_file=env_file))
run('chmod 640 {env_file}'.format(env_file=env_file))
sudo('chown {username}:{web_user_group} {env_file}'.format(username=deploy_user,
web_user_group=web_user_group,
env_file=env_file))
def update_log_dir():
"""Update the log directory.
The directory for the log files is created (if it doesn't exist yet), access is granted to the user only, and
ownership of this file is transferred to the web user.
If the directory exists already, it is checked that it is actually owned by the web user.
"""
settings = Config.settings('production')
log_file = os.path.abspath(settings['logging_file_base_path'])
log_dir = os.path.abspath(os.path.join(settings['logging_file_base_path'], os.path.pardir))
sudo('if [[ ! -d {log_dir} ]]\n'
'then\n'
' mkdir {log_dir}\n'
' chmod 700 {log_dir}\n'
' chown {web_user}:{web_user_group} {log_dir}\n'
'elif [ `ls -ld {log_dir} | awk \'{{print $3}}\'` != "{web_user}" ]\n'
'then\n'
' echo "The directory {log_dir} for the log files isn\'t owned by the web user ({web_user})."\n'
' sleep 5\n'
' exit 1\n'
'fi'.format(log_dir=log_dir,
web_user=web_user,
web_user_group=web_user_group))
sudo('if [[ ! -e {log_file} ]]\n'
'then\n'
' touch {log_file}\n'
' chmod 700 {log_file}\n'
' chown {web_user}:{web_user_group} {log_file}\n'
'elif [ `ls -l {log_file} | awk \'{{print $3}}\'` != "{web_user}" ]\n'
'then\n'
' echo "The log file {log_file} isn\'t owned by the web user {{web_user}}."\n'
' sleep 5\n'
' exit 1\n'
'fi'.format(log_file=log_file,
web_user=web_user,
web_user_group=web_user_group))
def update_webassets():
# remove bundle and cache directories
static_dir = site_dir + '/app/static'
webassets_cache = static_dir + '/.webassets-cache'
cache = static_dir + '/cache'
run('if [[ -d {cache} ]]\n'
'then\n'
' rm -r {cache}\n'
'fi'.format(cache=cache))
sudo('if [[ -d {webassets_cache} ]]\n'
'then\n'
' rm -r {webassets_cache}\n'
'fi'.format(webassets_cache=webassets_cache))
# create bundles (must be run as root, as the deploy user doesn't own the error log)
sudo('cd {site_dir}; export FLASK_APP=site_app.py; export FLASK_CONFIG=production; venv/bin/flask assets build'
.format(site_dir=site_dir))
# make deploy user owner of the bundle directory
sudo('chown -R {deploy_user}:{deploy_user_group} {cache}'
.format(deploy_user=deploy_user,
deploy_user_group=deploy_user_group,
cache=cache))
# make web user owner of the bundle directory
sudo('chown -R {web_user}:{web_user_group} {webassets_cache}'
.format(web_user=web_user,
web_user_group=web_user_group,
webassets_cache=webassets_cache))
def update_python_packages():
run('cd {site_dir}\n'
'source venv/bin/activate\n'
'pip install -r requirements.txt\n'
'deactivate'
.format(site_dir=site_dir))
def deploy(with_setting_up=False):
"""Deploy the site to the remote server.
If you deploy for the first time, you should request setting up by passing `True` as the `with_setting_up` argument.
You should only have to do this once, but setting up again should cause no problems.
Params:
-------
with_setting_up: bool
Set up the server bvefore deploying the site.
"""
# # test everything
# local('./run_tests.sh')
#
# # push Git content to the remote repository
# local('git push')
#
# # migrate database
# migrate_database()
if with_setting_up:
# upgrade/update apt
upgrade_libs()
# necessary to install many Python libraries
sudo('apt-get install -y build-essential')
sudo('apt-get install -y git')
sudo('apt-get install -y python3')
sudo('apt-get install -y python3-pip')
sudo('apt-get install -y python3-all-dev')
# enable virtual environments
sudo('pip3 install virtualenv')
# MySQL
sudo('apt-get install -y mysql-client')
sudo('apt-get install -y libmysqlclient-dev')
# Java
sudo('apt-get install -y default-jre')
# supervisor
sudo('apt-get install -y supervisor')
# nginx
sudo('apt-get install -y nginx')
# clone the Git repository (if it doesn't exist yet)
run('if [[ ! -d {site_dir} ]]\n'
'then\n'
' git clone {repository} {site_dir}\n'
'fi'.format(repository=repository, site_dir=site_dir))
# create environment variable prefix file
run('cd {site_dir}; echo {prefix} > env_var_prefix'.format(prefix=prefix, site_dir=site_dir))
# create a virtual environment (if it doesn't exist yet)
run('cd {site_dir}\n'
'if [[ ! -d venv ]]\n'
'then\n'
' python3 -m virtualenv venv\n'
'fi'.format(site_dir=site_dir))
# update the Git repository
run('cd {site_dir}; git pull'.format(site_dir=site_dir))
# install Python packages
update_python_packages()
# setup the environment variables file
# this must happen before Supervisor or Nginx are updated
update_environment_variables_file()
# setup the log directory
# this must happen before Supervisor or Nginx are updated
update_log_dir()
# create static file bundles
update_webassets()
# setup Supervisor
update_supervisor()
# setup Nginx
update_nginx_conf()
# yes, all should be working now - but it seems that existing Supervisor jobs might not have been killed
# hence we rather do a full reboot..
reboot()
def setup():
deploy(with_setting_up=True)
def reboot():
"""Reboot the remote server.
"""
sudo('reboot')
| mit | 8,530,947,404,351,064,000 | 36.802589 | 120 | 0.589504 | false |
Reinaesaya/OUIRL-ChatBot | tests/training_tests/test_list_training.py | 1 | 6825 | # -*- coding: utf-8 -*-
from tests.base_case import ChatBotTestCase
from chatterbot.trainers import ListTrainer
class ListTrainingTests(ChatBotTestCase):
def setUp(self):
super(ListTrainingTests, self).setUp()
self.chatbot.set_trainer(ListTrainer)
def test_training_adds_statements(self):
"""
Test that the training method adds statements
to the database.
"""
conversation = [
"Hello",
"Hi there!",
"How are you doing?",
"I'm great.",
"That is good to hear",
"Thank you.",
"You are welcome.",
"Sure, any time.",
"Yeah",
"Can I help you with anything?"
]
self.chatbot.train(conversation)
response = self.chatbot.get_response("Thank you.")
self.assertEqual(response.text, "You are welcome.")
def test_training_increments_occurrence_count(self):
conversation = [
"Do you like my hat?",
"I do not like your hat."
]
self.chatbot.train(conversation)
self.chatbot.train(conversation)
statements = self.chatbot.storage.filter(
in_response_to__contains="Do you like my hat?"
)
response = statements[0].in_response_to[0]
self.assertEqual(response.occurrence, 2)
def test_database_has_correct_format(self):
"""
Test that the database maintains a valid format
when data is added and updated. This means that
after the training process, the database should
contain nine objects and eight of these objects
should list the previous member of the list as
a response.
"""
conversation = [
"Hello sir!",
"Hi, can I help you?",
"Yes, I am looking for italian parsely.",
"Italian parsely is right over here in out produce department",
"Great, thank you for your help.",
"No problem, did you need help finding anything else?",
"Nope, that was it.",
"Alright, have a great day.",
"Thanks, you too."
]
self.chatbot.train(conversation)
# There should be a total of 9 statements in the database after training
self.assertEqual(self.chatbot.storage.count(), 9)
# The first statement should be in response to another statement
self.assertEqual(
len(self.chatbot.storage.find(conversation[0]).in_response_to),
0
)
# The second statement should have one response
self.assertEqual(
len(self.chatbot.storage.find(conversation[1]).in_response_to),
1
)
# The second statement should be in response to the first statement
self.assertIn(
conversation[0],
self.chatbot.storage.find(conversation[1]).in_response_to,
)
def test_training_with_unicode_characters(self):
"""
Ensure that the training method adds unicode statements
to the database.
"""
conversation = [
u'¶ ∑ ∞ ∫ π ∈ ℝ² ∖ ⩆ ⩇ ⩈ ⩉ ⩊ ⩋ ⪽ ⪾ ⪿ ⫀ ⫁ ⫂ ⋒ ⋓',
u'⊂ ⊃ ⊆ ⊇ ⊈ ⊉ ⊊ ⊋ ⊄ ⊅ ⫅ ⫆ ⫋ ⫌ ⫃ ⫄ ⫇ ⫈ ⫉ ⫊ ⟃ ⟄',
u'∠ ∡ ⦛ ⦞ ⦟ ⦢ ⦣ ⦤ ⦥ ⦦ ⦧ ⦨ ⦩ ⦪ ⦫ ⦬ ⦭ ⦮ ⦯ ⦓ ⦔ ⦕ ⦖ ⟀',
u'∫ ∬ ∭ ∮ ∯ ∰ ∱ ∲ ∳ ⨋ ⨌ ⨍ ⨎ ⨏ ⨐ ⨑ ⨒ ⨓ ⨔ ⨕ ⨖ ⨗ ⨘ ⨙ ⨚ ⨛ ⨜',
u'≁ ≂ ≃ ≄ ⋍ ≅ ≆ ≇ ≈ ≉ ≊ ≋ ≌ ⩯ ⩰ ⫏ ⫐ ⫑ ⫒ ⫓ ⫔ ⫕ ⫖',
u'¬ ⫬ ⫭ ⊨ ⊭ ∀ ∁ ∃ ∄ ∴ ∵ ⊦ ⊬ ⊧ ⊩ ⊮ ⊫ ⊯ ⊪ ⊰ ⊱ ⫗ ⫘',
u'∧ ∨ ⊻ ⊼ ⊽ ⋎ ⋏ ⟑ ⟇ ⩑ ⩒ ⩓ ⩔ ⩕ ⩖ ⩗ ⩘ ⩙ ⩚ ⩛ ⩜ ⩝ ⩞ ⩟ ⩠ ⩢',
]
self.chatbot.train(conversation)
response = self.chatbot.get_response(conversation[1])
self.assertEqual(response, conversation[2])
def test_similar_sentence_gets_same_response_multiple_times(self):
"""
Tests if the bot returns the same response for the same
question (which is similar to the one present in the training set)
when asked repeatedly.
"""
training = [
'how do you login to gmail?',
'Goto gmail.com, enter your login information and hit enter!'
]
similar_question = 'how do I login to gmail?'
self.chatbot.train(training)
response_to_trained_set = self.chatbot.get_response('how do you login to gmail?')
response1 = self.chatbot.get_response(similar_question)
response2 = self.chatbot.get_response(similar_question)
self.assertEqual(response_to_trained_set, response1)
self.assertEqual(response1, response2)
class ChatterBotResponseTests(ChatBotTestCase):
def setUp(self):
super(ChatterBotResponseTests, self).setUp()
"""
Set up a database for testing.
"""
self.chatbot.set_trainer(ListTrainer)
data1 = [
"african or european?",
"Huh? I... I don't know that.",
"How do you know so much about swallows?"
]
data2 = [
"Siri is adorable",
"Who is Seri?",
"Siri is my cat"
]
data3 = [
"What... is your quest?",
"To seek the Holy Grail.",
"What... is your favourite colour?",
"Blue."
]
self.chatbot.train(data1)
self.chatbot.train(data2)
self.chatbot.train(data3)
def test_answer_to_known_input(self):
"""
Test that a matching response is returned
when an exact match exists.
"""
input_text = "What... is your favourite colour?"
response = self.chatbot.get_response(input_text)
self.assertIn("Blue", response.text)
def test_answer_close_to_known_input(self):
input_text = "What is your favourite colour?"
response = self.chatbot.get_response(input_text)
self.assertIn("Blue", response.text)
def test_match_has_no_response(self):
"""
Make sure that the if the last line in a file
matches the input text then a index error does
not occure.
"""
input_text = "Siri is my cat"
response = self.chatbot.get_response(input_text)
self.assertGreater(len(response.text), 0)
def test_empty_input(self):
"""
If empty input is provided, anything may be returned.
"""
response = self.chatbot.get_response("")
self.assertTrue(len(response.text) >= 0)
| bsd-3-clause | 5,038,697,892,158,376,000 | 30.36715 | 89 | 0.543662 | false |
codeforamerica/comport | comport/department/views.py | 1 | 21005 | # -*- coding: utf-8 -*-
from flask import Blueprint, render_template, request, redirect, url_for, flash, Response, abort
from .models import Department, Extractor
from comport.data.models import DemographicValue, DenominatorValue
from flask.ext.login import login_required
from comport.decorators import admin_or_department_required, authorized_access_only
import uuid
import datetime
blueprint = Blueprint("department", __name__, url_prefix='/department',
static_folder="../static")
# <<<<<<<< ADMIN ENDPOINTS >>>>>>>>>>
@blueprint.route("/<int:department_id>")
@login_required
@admin_or_department_required()
def department_dashboard(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
current_date = datetime.datetime.now()
return render_template("department/dashboard.html", department=department, current_month=current_date.month, current_year=current_date.year)
@blueprint.route("/<int:department_id>/activate", methods=['POST'])
@login_required
@admin_or_department_required()
def activate_extractor(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
if request.method == 'POST':
if request.form['submit'] == 'Activate':
password = str(uuid.uuid4())
extractor, envs = Extractor.from_department_and_password(department=department, password=password)
return render_template("department/extractorEnvs.html", department=department, envs=envs)
@blueprint.route("/<int:department_id>/start", methods=['POST'])
@login_required
@admin_or_department_required()
def start_extractor(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
if request.method == 'POST':
if request.form['submit'] == 'Set':
extractor = department.get_extractor()
extractor.next_year = request.form["year"]
extractor.next_month = request.form["month"]
extractor.save()
flash("Extractor start date set to {}/{}".format(extractor.next_month, extractor.next_year), "info")
return redirect(url_for('department.department_dashboard', department_id=department.id))
# <<<<<<<< EDIT ENDPOINTS >>>>>>>>>>
@blueprint.route("/<int:department_id>/edit/ois")
@login_required
@admin_or_department_required()
def edit_ois(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/ois.html", department=department, chart_blocks=department.get_ois_blocks(), editing=True)
@blueprint.route("/<int:department_id>/edit/useofforce")
@login_required
@admin_or_department_required()
def edit_use_of_force(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/useofforce.html", department=department, chart_blocks=department.get_uof_blocks(), editing=True)
@blueprint.route("/<int:department_id>/edit/complaints")
@login_required
@admin_or_department_required()
def edit_complaints(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/complaints.html", department=department, chart_blocks=department.get_complaint_blocks(), editing=True)
@blueprint.route("/<int:department_id>/edit/pursuits")
@login_required
@admin_or_department_required()
def edit_pursuits(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/pursuits.html", department=department, chart_blocks=department.get_pursuits_blocks(), editing=True)
@blueprint.route("/<int:department_id>/edit/assaultsonofficers")
@login_required
@admin_or_department_required()
def edit_assaultsonofficers(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/assaults.html", department=department, chart_blocks=department.get_assaults_blocks(), editing=True)
@blueprint.route("/<int:department_id>/edit/demographics")
@login_required
@admin_or_department_required()
def edit_demographics(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template(
"department/demographics.html",
department=department,
department_values=department.get_raw_department_demographics(),
city_values=department.get_raw_city_demographics())
@blueprint.route("/<int:department_id>/demographicValue/create", methods=["POST"])
@login_required
@admin_or_department_required()
def new_demographic_row(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
DemographicValue.create(
department_id=department_id,
race=request.form["race"],
count=int(request.form["count"]),
department_value=request.form["department_or_city"] == "department")
return redirect(url_for(
'department.edit_demographics', department_id=department_id
))
@blueprint.route("/<int:department_id>/demographicValue/<int:value_id>/delete", methods=["POST"])
@login_required
@admin_or_department_required()
def delete_demographic_row(department_id, value_id):
department = Department.get_by_id(department_id)
value = DemographicValue.get_by_id(value_id)
if not department or not value:
abort(404)
value.delete()
return redirect(url_for(
'department.edit_demographics', department_id=department_id
))
@blueprint.route("/<int:department_id>/edit/denominators")
@login_required
@admin_or_department_required()
def edit_denominators(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template(
"department/denominators.html",
department=department,
denominator_values=department.denominator_values
)
@blueprint.route("/<int:department_id>/denominatorValue/create", methods=["POST"])
@login_required
@admin_or_department_required()
def new_denominator_row(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
DenominatorValue.create(
department_id=department_id,
month=int(request.form["month"]),
year=int(request.form["year"]),
officers_out_on_service=int(request.form["officersOutOnService"])
)
return redirect(url_for(
'department.edit_denominators', department_id=department_id
))
@blueprint.route("/<int:department_id>/denominatorValue/<int:value_id>/delete", methods=["POST"])
@login_required
@admin_or_department_required()
def delete_denominator_row(department_id, value_id):
department = Department.get_by_id(department_id)
value = DenominatorValue.get_by_id(value_id)
if not department or not value:
abort(404)
value.delete()
return redirect(url_for(
'department.edit_denominators', department_id=department_id
))
@blueprint.route("/<int:department_id>/edit/index", methods=["GET", "POST"])
@login_required
@admin_or_department_required()
def edit_index(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/index.html", department=department, chart_blocks=department.get_introduction_blocks(), editing=True)
# <<<<<<<< PREVIEW ENDPOINTS >>>>>>>>>>
@blueprint.route("/<int:department_id>/preview/ois")
@login_required
@admin_or_department_required()
def preview_ois(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/ois.html", department=department, chart_blocks=department.get_ois_blocks(), editing=False)
@blueprint.route("/<int:department_id>/preview/useofforce")
@login_required
@admin_or_department_required()
def preview_use_of_force(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/useofforce.html", department=department, chart_blocks=department.get_uof_blocks(), editing=False)
@blueprint.route("/<int:department_id>/preview/complaints")
@login_required
@admin_or_department_required()
def preview_complaints(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/complaints.html", department=department, chart_blocks=department.get_complaint_blocks(), editing=False)
@blueprint.route("/<int:department_id>/preview/pursuits")
@login_required
@admin_or_department_required()
def preview_pursuits(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/pursuits.html", department=department, chart_blocks=department.get_pursuits_blocks(), editing=False)
@blueprint.route("/<int:department_id>/preview/assaultsonofficers")
@login_required
@admin_or_department_required()
def preview_assaults(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/assaults.html", department=department, chart_blocks=department.get_assaults_blocks(), editing=False)
@blueprint.route("/<int:department_id>/preview/index")
@login_required
@admin_or_department_required()
def preview_index(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/index.html", chart_blocks=department.get_introduction_blocks(), department=department, editing=False)
# <<<<<<<< SCHEMA ENDPOINTS >>>>>>>>>>
@blueprint.route('/<int:department_id>/preview/schema/complaints')
@login_required
@admin_or_department_required()
def complaints_schema_preview(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/schema/complaints.html", department=department, chart_blocks=department.get_complaint_schema_blocks(), editing=False)
@blueprint.route('/<int:department_id>/edit/schema/complaints')
@login_required
@admin_or_department_required()
def complaints_schema_edit(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/schema/complaints.html", department=department, chart_blocks=department.get_complaint_schema_blocks(), editing=True)
@blueprint.route('/<int:department_id>/preview/schema/useofforce')
@login_required
@admin_or_department_required()
def useofforce_schema_preview(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/schema/useofforce.html", department=department, chart_blocks=department.get_uof_schema_blocks(), editing=False)
@blueprint.route('/<int:department_id>/edit/schema/useofforce')
@login_required
@admin_or_department_required()
def useofforce_schema_edit(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/schema/useofforce.html", department=department, chart_blocks=department.get_uof_schema_blocks(), editing=True)
@blueprint.route('/<int:department_id>/edit/schema/ois')
@login_required
@admin_or_department_required()
def ois_schema_edit(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/schema/ois.html", department=department, chart_blocks=department.get_ois_schema_blocks(), editing=True)
@blueprint.route('/<int:department_id>/preview/schema/ois')
@login_required
@admin_or_department_required()
def ois_schema_preview(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/schema/ois.html", department=department, chart_blocks=department.get_ois_schema_blocks(), editing=False)
@blueprint.route('/<int:department_id>/preview/schema/pursuits')
@login_required
@admin_or_department_required()
def pursuits_schema_preview(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/schema/pursuits.html", department=department, chart_blocks=department.get_pursuits_schema_blocks(), editing=False)
@blueprint.route('/<int:department_id>/edit/schema/pursuits')
@login_required
@admin_or_department_required()
def pursuits_schema_edit(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/schema/pursuits.html", department=department, chart_blocks=department.get_pursuits_schema_blocks(), editing=True)
@blueprint.route('/<int:department_id>/preview/schema/assaultsonofficers')
@login_required
@admin_or_department_required()
def assaults_schema_preview(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/schema/assaults.html", department=department, chart_blocks=department.get_assaults_schema_blocks(), editing=False)
@blueprint.route('/<int:department_id>/edit/schema/assaultsonofficers')
@login_required
@admin_or_department_required()
def assaults_schema_edit(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/schema/assaults.html", department=department, chart_blocks=department.get_assaults_schema_blocks(), editing=True)
# <<<<<<<< DATA ENDPOINTS >>>>>>>>>>
@blueprint.route('/<int:department_id>/uof.csv')
@authorized_access_only(dataset="use_of_force_incidents")
def use_of_force_csv(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return Response(department.get_uof_csv(), mimetype="text/csv")
@blueprint.route('/<int:department_id>/complaints.csv')
@authorized_access_only(dataset="citizen_complaints")
def complaints_csv(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return Response(department.get_complaint_csv(), mimetype="text/csv")
@blueprint.route('/<int:department_id>/pursuits.csv')
@authorized_access_only(dataset="pursuits")
def pursuits_csv(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return Response(department.get_pursuits_csv(), mimetype="text/csv")
@blueprint.route('/<int:department_id>/assaultsonofficers.csv')
@authorized_access_only(dataset="assaults_on_officers")
def assaults_csv(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return Response(department.get_assaults_csv(), mimetype="text/csv")
@blueprint.route('/<int:department_id>/ois.csv')
@authorized_access_only(dataset="officer_involved_shootings")
def ois_csv(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return Response(department.get_ois_csv(), mimetype="text/csv")
@blueprint.route('/<int:department_id>/officerCalls.csv')
@authorized_access_only()
def denominator_csv(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return Response(department.get_denominator_csv(), mimetype="text/csv")
@blueprint.route('/<int:department_id>/demographics.csv')
@authorized_access_only()
def demographics_csv(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return Response(department.get_demographic_csv(), mimetype="text/csv")
# <<<<<<<< PUBLIC ENDPOINTS >>>>>>>>>>
@blueprint.route("/<short_name>/")
@authorized_access_only()
def public_intro(short_name):
department = Department.query.filter_by(short_name=short_name.upper()).first()
if not department:
abort(404)
return render_template("department/site/index.html", chart_blocks=department.get_introduction_blocks(), department=department, editing=False, published=True)
@blueprint.route("/<short_name>/complaints/")
@authorized_access_only(dataset="citizen_complaints")
def public_complaints(short_name):
department = Department.query.filter_by(short_name=short_name.upper()).first()
if not department:
abort(404)
return render_template("department/site/complaints.html", department=department, chart_blocks=department.get_complaint_blocks(), editing=False, published=True)
@blueprint.route('/<short_name>/schema/complaints/')
@authorized_access_only(dataset="citizen_complaints")
def public_complaints_schema(short_name):
department = Department.query.filter_by(short_name=short_name.upper()).first()
if not department:
abort(404)
return render_template("department/site/schema/complaints.html", department=department, chart_blocks=department.get_complaint_schema_blocks(), published=True)
@blueprint.route("/<short_name>/pursuits/")
@authorized_access_only(dataset="pursuits")
def public_pursuits(short_name):
department = Department.query.filter_by(short_name=short_name.upper()).first()
if not department:
abort(404)
return render_template("department/site/pursuits.html", department=department, chart_blocks=department.get_pursuits_blocks(), editing=False, published=True)
@blueprint.route('/<short_name>/schema/pursuits/')
@authorized_access_only(dataset="pursuits")
def public_pursuits_schema(short_name):
department = Department.query.filter_by(short_name=short_name.upper()).first()
if not department:
abort(404)
return render_template("department/site/schema/pursuits.html", department=department, chart_blocks=department.get_pursuits_schema_blocks(), published=True)
@blueprint.route("/<short_name>/assaultsonofficers/")
@authorized_access_only(dataset="assaults_on_officers")
def public_assaults(short_name):
department = Department.query.filter_by(short_name=short_name.upper()).first()
if not department:
abort(404)
return render_template("department/site/assaults.html", department=department, chart_blocks=department.get_assaults_blocks(), editing=False, published=True)
@blueprint.route('/<short_name>/schema/assaultsonofficers/')
@authorized_access_only(dataset="assaults_on_officers")
def public_assaults_schema(short_name):
department = Department.query.filter_by(short_name=short_name.upper()).first()
if not department:
abort(404)
return render_template("department/site/schema/assaults.html", department=department, chart_blocks=department.get_assaults_schema_blocks(), editing=False, published=True)
@blueprint.route("/<short_name>/useofforce/")
@authorized_access_only(dataset="use_of_force_incidents")
def public_uof(short_name):
department = Department.query.filter_by(short_name=short_name.upper()).first()
if not department:
abort(404)
return render_template("department/site/useofforce.html", department=department, chart_blocks=department.get_uof_blocks(), editing=False, published=True)
@blueprint.route('/<short_name>/schema/useofforce/')
@authorized_access_only(dataset="use_of_force_incidents")
def public_uof_schema(short_name):
department = Department.query.filter_by(short_name=short_name.upper()).first()
if not department:
abort(404)
return render_template("department/site/schema/useofforce.html", department=department, chart_blocks=department.get_uof_schema_blocks(), editing=False, published=True)
@blueprint.route("/<short_name>/officerinvolvedshootings/")
@authorized_access_only(dataset="officer_involved_shootings")
def public_ois(short_name):
department = Department.query.filter_by(short_name=short_name.upper()).first()
if not department:
abort(404)
return render_template("department/site/ois.html", department=department, chart_blocks=department.get_ois_blocks(), editing=False, published=True)
@blueprint.route('/<short_name>/schema/officerinvolvedshootings/')
@authorized_access_only(dataset="officer_involved_shootings")
def public_ois_schema(short_name):
department = Department.query.filter_by(short_name=short_name.upper()).first()
if not department:
abort(404)
return render_template("department/site/schema/ois.html", department=department, chart_blocks=department.get_ois_schema_blocks(), editing=False, published=True)
| bsd-3-clause | -4,313,935,917,857,913,000 | 41.178715 | 174 | 0.733349 | false |
orezpraw/estimate-charm | estimatecharm/pythonSource.py | 1 | 3918 | #!/usr/bin/python
# Copyright 2013, 2014, 2015 Joshua Charles Campbell, Alex Wilson
#
# This file is part of EstimateCharm.
#
# EstimateCharm is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EstimateCharm is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with EstimateCharm. If not, see <http://www.gnu.org/licenses/>.
from estimatecharm.unnaturalCode import *
from logging import debug, info, warning, error
from estimatecharm import flexibleTokenize
import re
import sys, token
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
COMMENT = 53
ws = re.compile('\s')
class pythonLexeme(ucLexeme):
@classmethod
def stringify(cls, t, v):
"""
Stringify a lexeme: produce a string describing it.
In the case of comments, strings, indents, dedents, and newlines, and
the endmarker, a string with '<CATEGORY-NAME>' is returned. Else, its
actual text is returned.
"""
if t == 'COMMENT':
return '<'+t+'>'
# Josh though this would be a good idea for some strange reason:
elif len(v) > 20 :
return '<'+t+'>'
elif ws.match(str(v)) :
return '<'+t+'>'
elif t == 'STRING' :
return '<'+t+'>'
elif len(v) > 0 :
return v
else:
# This covers for <DEDENT> case, and also, probably some other
# special cases...
return '<' + t + '>'
@classmethod
def fromTuple(cls, tup):
if isinstance(tup[0], int):
t0 = token.tok_name[tup[0]]
else:
t0 = tup[0]
return tuple.__new__(cls, (t0, str(tup[1]), ucPos(tup[2]), ucPos(tup[3]), cls.stringify(t0, str(tup[1]))))
def comment(self):
return (self.ltype == 'COMMENT')
class pythonSource(ucSource):
def lex(self, code, mid_line=False):
tokGen = flexibleTokenize.generate_tokens(StringIO(code).readline,
mid_line)
return [pythonLexeme.fromTuple(t) for t in tokGen]
def deLex(self):
line = 1
col = 0
src = ""
for l in self:
for i in range(line, l.start.line):
src += os.linesep
col = 0
line += 1
for i in range(col, l.start.col):
src += " "
col += 1
src += l.val
col += len(l.val)
nls = l.val.count(os.linesep)
if (nls > 0):
line += nls
col = len(l.val.splitlines().pop())
return src
def unCommented(self):
assert len(self)
return filter(lambda a: not a.comment(), copy(self))
def scrubbed(self):
"""Clean up python source code removing extra whitespace tokens and comments"""
ls = copy(self)
assert len(ls)
i = 0
r = []
for i in range(0, len(ls)):
if ls[i].comment():
continue
elif ls[i].ltype == 'NL':
continue
elif ls[i].ltype == 'NEWLINE' and i < len(ls)-1 and ls[i+1].ltype == 'NEWLINE':
continue
elif ls[i].ltype == 'NEWLINE' and i < len(ls)-1 and ls[i+1].ltype == 'INDENT':
continue
else:
r.append(ls[i])
assert len(r)
return pythonSource(r) | agpl-3.0 | 4,989,256,644,889,786,000 | 31.122951 | 114 | 0.552578 | false |
pelitweets/pelitweets-bot | TwitterBot.py | 1 | 2586 | # -*- coding: utf-8 -*-
#!/usr/bin/python
# Based on https://github.com/peterwalker78/twitterbot
import tweepy
import datetime
import time
from os import environ
import pymongo
# Twitter parameters
try:
consumer_key = environ['TWITTER_CONSUMER_KEY']
consumer_secret = environ['TWITTER_CONSUMER_SECRET']
access_token = environ['TWITTER_ACCESS_TOKEN_KEY']
access_token_secret = environ['TWITTER_ACCESS_TOKEN_SECRET']
twitter_account = environ['TWITTER_ACCOUNT']
except KeyError, e:
raise Exception('Please define TWITTER_ACCOUNT, TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET, TWITTER_ACCESS_TOKEN_KEY and TWITTER_ACCESS_TOKEN_SECRET as environment variables')
# Connect with Twitter
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
user = twitter_account
# Mongo parameters
try:
MONGODB_URI = 'mongodb://%s:%s@%s:%d/%s' % (
environ['MONGO_DBUSER'], environ['MONGO_DBPASSWORD'], environ['MONGO_URL'], int(environ['MONGO_PORT']),
environ['MONGO_DBNAME'])
except KeyError, e:
raise Exception('Please define MONGO_DBUSER, MONGO_DBPASSWORD, MONGO_URL, MONGO_PORT and MONGO_DBNAME as environment variables')
# Connect with mongo
client = pymongo.MongoClient(MONGODB_URI)
db = client.get_default_database()
pelitweets_db = db['movies']
responded_tweets_db = db['responded_tweets']
# Get last 100 mentions of my timeline
mymentions = api.mentions_timeline()
for mention in mymentions:
tweet_time = mention.created_at
since = datetime.datetime(2014, 03, 15)
# Avoid old tweets
if tweet_time < since:
continue
# Check if the mention has been already responded
query = {'mention_id': mention.id}
doc = responded_tweets_db.find_one(query)
# Not responded
if not doc:
tweet_text = unicode(mention.text)
movie_name = tweet_text[tweet_text.find(u'@pelitweets') + 12:]
# Get Movie with the required title
query = {'movie_title': movie_name}
movie = pelitweets_db.find_one(query)
# Reply with ratings
reply_text = "@%s %s ratings: IMDB: %s, Filmaffinity: %s, Twitter: %s" % (mention.author.screen_name, movie_name, movie['movie_rating_imdb'], movie['movie_rating_fa'], movie['movie_rating_average'])
print reply_text
api.update_status(reply_text, mention.id)
time.sleep(1)
# Avoid response again
json_tweet_data = {
'mention_id': mention.id
}
responded_tweets_db.insert(json_tweet_data)
| mit | 6,031,489,597,193,782,000 | 30.536585 | 206 | 0.688322 | false |
vls/cffi_re2 | tests/TestBasicRegex.py | 1 | 7832 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import cffi_re2
import sys
import re as pyre
if sys.version_info < (2, 7):
from nose.tools import raises
from nose_extra_tools import assert_is_not_none, assert_is_none, assert_equal, assert_true, assert_false
else:
from nose.tools import raises, assert_is_not_none, assert_is_none, assert_equal, assert_true, assert_false
class TestBasicRegex(object):
def test_basic_search(self):
robj = cffi_re2.compile(r'b+')
assert_is_not_none(robj.search('abbcd'))
def test_basic_match(self):
# Search-type regex should NOT match full string
robj = cffi_re2.compile(r'b+')
assert_is_none(robj.match('abbcd'))
# This regex only matches the left end
robj = cffi_re2.compile(r'[abc]+$')
assert_is_none(robj.match('abbcd'))
# Full match regex should match
robj = cffi_re2.compile(r'[abcd]+')
assert_is_not_none(robj.match('abbcd'))
# Regex match should be left-anchored, not both-anchored
robj = cffi_re2.compile(r'a+')
assert_is_not_none(robj.match('aaab'))
assert_is_none(robj.match('baaab'))
def test_re_compatibility(self):
"""Test compatibility with the Python re library"""
#
cm = cffi_re2.search(r'a(b+)', "abbc")
rm = pyre.search(r'a(b+)', "abbc")
assert_equal(cm.groups(), rm.groups())
#
cm = cffi_re2.match(r'b+', 'abbcd')
rm = pyre.match(r'b+', 'abbcd')
assert_equal(cm, rm)
# Match without groups
cm = cffi_re2.match(r'[abc]+', 'abbcd')
rm = pyre.match(r'[abc]+', 'abbcd')
assert_equal(cm.groups(), rm.groups())
# Full match regex should match
cm = cffi_re2.match(r'([abc]+)', 'abbcd')
rm = pyre.match(r'([abc]+)', 'abbcd')
assert_equal(cm.groups(), rm.groups())
assert_equal(cm.group(0), rm.group(0))
assert_equal(cm.group(1), rm.group(1))
cm = cffi_re2.match(r'([ab]+)(c+)', 'abbcd')
rm = pyre.match(r'([ab]+)(c+)', 'abbcd')
assert_equal(cm.groups(), rm.groups())
assert_equal(cm.group(0), rm.group(0))
assert_equal(cm.group(1), rm.group(1))
assert_equal(cm.group(2), rm.group(2))
def test_sub_basic(self):
robj = cffi_re2.compile(r'b+')
assert_equal(robj.sub('', 'abbcbbd'), 'acd')
def test_basic_groups(self):
robj = cffi_re2.compile(r'a(b+)')
mo = robj.search("abbc")
assert_is_not_none(mo)
assert_equal(mo.groups(), ("bb",))
def test_basic_findall(self):
robj = cffi_re2.compile(r'a(b+)')
mo = robj.findall("abbcdefabbbbca")
assert_is_not_none(mo)
assert_equal(mo, ["bb", "bbbb"])
def test_findall_overlapping(self):
"""Check overlapping matches with findall"""
# Prerequisited
assert_equal(cffi_re2.findall(r'-{1,2}', 'program-files'), ['-'])
assert_equal(cffi_re2.findall(r'-{1,2}', 'pro--gram-files'), ['--', '-'])
assert_equal(cffi_re2.findall(r'-{1,2}', 'pro---gram-files'), ['--', '-', '-'])
# Actual test
assert_equal(cffi_re2.findall(r'-{1,2}', 'pro----gram-files'), ['--', '--', '-'])
def test_findall_subgroups(self):
mo = cffi_re2.findall(r'ab+', "abbcdefabbbbca")
assert_equal(mo, ["abb", "abbbb"])
mo = cffi_re2.findall(r'a(b+)', "abbcdefabbbbca")
assert_equal(mo, ["bb", "bbbb"])
mo = cffi_re2.findall(r'(a)(b+)', "abbcdefabbbbca")
assert_equal(mo, [("a", "bb"), ("a", "bbbb")])
mo = cffi_re2.findall(r'(a)(b)(b+)', "abbcdefabbbbca")
assert_equal(mo, [("a", "b", "b"), ("a", "b", "bbb")])
def test_medium_complexity(self):
"""Check medium complexity regexes"""
# Examples from github.com/ulikoehler/KATranslationCheck
# 1
rgx = cffi_re2.compile(r"\b[Ii]nto\b")
assert_is_not_none(rgx.search("Into the darkness"))
assert_is_not_none(rgx.search("I went into the darkness"))
assert_is_none(rgx.search("abcde beintoaqe aqet"))
# 2
rgx = cffi_re2.compile(r"\d+\$\s*dollars?")
assert_is_not_none(rgx.search("12$ dollars"))
assert_is_not_none(rgx.match("12$ dollars"))
assert_is_not_none(rgx.match("1$ dollar"))
assert_is_not_none(rgx.match("1$ dollar"))
assert_is_not_none(rgx.match("1$ dollars"))
def test_sub_function(self):
# Python re example
def dashrepl(matchobj):
if matchobj.group(0) == '-':
return ' '
else:
return '-'
assert_equal(cffi_re2.sub(r'-{1,2}', dashrepl, 'pro-gram--files'),
'pro gram-files')
#
print()
assert_equal(cffi_re2.sub(r'-{1,2}', dashrepl, 'pro----gram-files'),
'pro--gram files')
def test_module_level_functions(self):
"""
Quick test of module-level functions.
These are generally expected to call the compiled counterparts,
so these tests do not check all aspects
"""
assert_equal(cffi_re2.findall(r'a(b+)', "abbcdefabbbbca"), ["bb", "bbbb"])
assert_equal(cffi_re2.sub(r'b+', '', 'abbcbbd'), 'acd')
assert_is_not_none(cffi_re2.search(r'b+', 'abbcbbd'))
assert_is_none(cffi_re2.match(r'b+', 'abbcbbd'))
assert_is_not_none(cffi_re2.match(r'b+', 'bbbbb'))
def test_more_x(self):
assert_is_none(cffi_re2.search(r"<(a|span|div|table)", "Kapazitäten"))
assert_equal(cffi_re2.findall(r"<(a|span|div|table)", "Kapazitäten"), [])
def test_optional_groups(self):
assert_equal(cffi_re2.findall(r"(foo)?bar", "bar"), [''])
def test_optional_groups(self):
result = cffi_re2.search(r"(foo)?bar", "bar")
print(result.ranges)
assert_equal(result.group(0), "bar")
assert_is_none(result.group(1))
assert_equal(result.groups(), (None,))
class TestFlags(object):
def test_flag_ignorecase(self):
rgx_ci = cffi_re2.compile(r'a(b+)$', flags=cffi_re2.IGNORECASE)
rgx_cs = cffi_re2.compile(r'a(b+)$')
# Check case sensitive
assert_is_none(rgx_cs.match("AB"))
assert_is_none(rgx_cs.match("Ab"))
assert_is_none(rgx_cs.match("aB"))
assert_is_none(rgx_cs.match("aBb"))
assert_is_none(rgx_cs.match("abB"))
assert_is_not_none(rgx_cs.match("ab"))
assert_is_not_none(rgx_cs.match("abb"))
# Check case insensitive
assert_is_not_none(rgx_ci.match("AB"))
assert_is_not_none(rgx_ci.match("Ab"))
assert_is_not_none(rgx_ci.match("aB"))
assert_is_not_none(rgx_ci.match("aBb"))
assert_is_not_none(rgx_ci.match("abB"))
assert_is_not_none(rgx_ci.match("ab"))
assert_is_not_none(rgx_ci.match("abb"))
# Official example
assert_equal(cffi_re2.sub(r'\sAND\s', ' & ', 'Baked Beans And Spam', flags=cffi_re2.IGNORECASE),
'Baked Beans & Spam')
class TestChineseRegex(object):
"""Written by Github user @vls"""
def test_match_chinese(self):
robj = cffi_re2.compile('梦[^一-龥]*幻[^一-龥]*西[^一-龥]*游')
assert_true(robj.search('梦1幻2西3游'))
assert_false(robj.search('梦倩女幻幽魂西2游'))
def test_sub_chinese(self):
robj = cffi_re2.compile('梦[^一-龥]*幻[^一-龥]*西[^一-龥]*游')
assert_equal(robj.sub('倩女', '梦幻西游好玩吗?'), u'倩女好玩吗?')
@raises(ValueError)
def test_invalid_regex(self):
p = '(?!=.*[没不])'
robj = cffi_re2.compile(p)
@raises(ValueError)
def test_invalid_regex_2(self):
p = '(?<![没不])'
robj = cffi_re2.compile(p)
| mit | 6,689,135,428,112,306,000 | 38.438776 | 110 | 0.563907 | false |
snyderr/robotframework | src/robot/running/userkeywordrunner.py | 1 | 9358 | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.errors import (ExecutionFailed, ReturnFromKeyword, ExecutionPassed,
UserKeywordExecutionFailed, DataError, VariableError,
ExecutionSkipped,SkipExecution,PassExecution)
from robot.result import Keyword as KeywordResult
from robot.utils import DotDict, prepr, split_tags_from_doc
from robot.variables import is_list_var, VariableAssignment
from .arguments import DefaultValue
from .statusreporter import StatusReporter
from .steprunner import StepRunner
from .timeouts import KeywordTimeout
class UserKeywordRunner(object):
def __init__(self, handler, name=None):
self._handler = handler
self.name = name or handler.name
@property
def longname(self):
libname = self._handler.libname
return '%s.%s' % (libname, self.name) if libname else self.name
@property
def libname(self):
return self._handler.libname
@property
def arguments(self):
return self._handler.arguments
def run(self, kw, context):
assignment = VariableAssignment(kw.assign)
result = self._get_result(kw, assignment, context.variables)
with StatusReporter(context, result):
with assignment.assigner(context) as assigner:
return_value = self._run(context, kw.args, result)
assigner.assign(return_value)
return return_value
def _get_result(self, kw, assignment, variables):
handler = self._handler
doc = variables.replace_string(handler.doc, ignore_errors=True)
doc, tags = split_tags_from_doc(doc)
tags = variables.replace_list(handler.tags, ignore_errors=True) + tags
return KeywordResult(kwname=self.name,
libname=handler.libname,
doc=doc.splitlines()[0] if doc else '',
args=kw.args,
assign=tuple(assignment),
tags=tags,
type=kw.type)
def _run(self, context, args, result):
variables = context.variables
args = self._resolve_arguments(args, variables)
with context.user_keyword:
self._set_arguments(args, context)
timeout = self._get_timeout(variables)
if timeout is not None:
result.timeout = str(timeout)
with context.timeout(timeout):
error, return_ = self._execute(context)
if error and not error.can_continue(context.in_teardown):
raise error
return_value = self._get_return_value(variables, return_)
if error:
error.return_value = return_value
raise error
return return_value
def _get_timeout(self, variables=None):
timeout = self._handler.timeout
if not timeout:
return None
timeout = KeywordTimeout(timeout.value, timeout.message, variables)
return timeout
def _resolve_arguments(self, arguments, variables=None):
return self.arguments.resolve(arguments, variables)
def _set_arguments(self, arguments, context):
positional, named = arguments
variables = context.variables
args, kwargs = self.arguments.map(positional, named,
replace_defaults=False)
self._set_variables(args, kwargs, variables)
context.output.trace(lambda: self._trace_log_args_message(variables))
def _set_variables(self, positional, kwargs, variables):
before_varargs, varargs = self._split_args_and_varargs(positional)
for name, value in zip(self.arguments.positional, before_varargs):
if isinstance(value, DefaultValue):
value = value.resolve(variables)
variables['${%s}' % name] = value
if self.arguments.varargs:
variables['@{%s}' % self.arguments.varargs] = varargs
if self.arguments.kwargs:
variables['&{%s}' % self.arguments.kwargs] = DotDict(kwargs)
def _split_args_and_varargs(self, args):
if not self.arguments.varargs:
return args, []
positional = len(self.arguments.positional)
return args[:positional], args[positional:]
def _trace_log_args_message(self, variables):
args = ['${%s}' % arg for arg in self.arguments.positional]
if self.arguments.varargs:
args.append('@{%s}' % self.arguments.varargs)
if self.arguments.kwargs:
args.append('&{%s}' % self.arguments.kwargs)
return self._format_trace_log_args_message(args, variables)
def _format_trace_log_args_message(self, args, variables):
args = ['%s=%s' % (name, prepr(variables[name])) for name in args]
return 'Arguments: [ %s ]' % ' | '.join(args)
def _execute(self, context):
if not (self._handler.keywords or self._handler.return_value):
raise DataError("User keyword '%s' contains no keywords." % self.name)
error = return_ = pass_ = None
try:
StepRunner(context).run_steps(self._handler.keywords)
except ReturnFromKeyword as exception:
return_ = exception
error = exception.earlier_failures
except ExecutionPassed as exception:
pass_ = exception
error = exception.earlier_failures
except ExecutionSkipped as exception:
pass_ = exception
error = exception.earlier_failures
except ExecutionFailed as exception:
error = exception
with context.keyword_teardown(error):
td_error = self._run_teardown(context)
if error or td_error:
error = UserKeywordExecutionFailed(error, td_error)
return error or pass_, return_
def _get_return_value(self, variables, return_):
ret = self._handler.return_value if not return_ else return_.return_value
if not ret:
return None
contains_list_var = any(is_list_var(item) for item in ret)
try:
ret = variables.replace_list(ret)
except DataError as err:
raise VariableError('Replacing variables from keyword return value '
'failed: %s' % err.message)
if len(ret) != 1 or contains_list_var:
return ret
return ret[0]
def _run_teardown(self, context):
if not self._handler.teardown:
return None
try:
name = context.variables.replace_string(self._handler.teardown.name)
except DataError as err:
return ExecutionFailed(err.message, syntax=True)
if name.upper() in ('', 'NONE'):
return None
try:
StepRunner(context).run_step(self._handler.teardown, name)
except PassExecution:
return None
except ExecutionFailed as err:
return err
return None
def dry_run(self, kw, context):
assignment = VariableAssignment(kw.assign)
result = self._get_result(kw, assignment, context.variables)
with StatusReporter(context, result):
assignment.validate_assignment()
self._dry_run(context, kw.args, result)
def _dry_run(self, context, args, result):
self._resolve_arguments(args)
with context.user_keyword:
timeout = self._get_timeout()
if timeout:
result.timeout = str(timeout)
error, _ = self._execute(context)
if error:
raise error
class EmbeddedArgumentsRunner(UserKeywordRunner):
def __init__(self, handler, name):
UserKeywordRunner.__init__(self, handler, name)
match = handler.embedded_name.match(name)
if not match:
raise ValueError('Does not match given name')
self.embedded_args = list(zip(handler.embedded_args, match.groups()))
def _resolve_arguments(self, args, variables=None):
# Validates that no arguments given.
self.arguments.resolve(args, variables)
if not variables:
return []
return [(n, variables.replace_scalar(v)) for n, v in self.embedded_args]
def _set_arguments(self, embedded_args, context):
variables = context.variables
for name, value in embedded_args:
variables['${%s}' % name] = value
context.output.trace(lambda: self._trace_log_args_message(variables))
def _trace_log_args_message(self, variables):
args = ['${%s}' % arg for arg, _ in self.embedded_args]
return self._format_trace_log_args_message(args, variables)
| apache-2.0 | 3,379,643,884,879,973,000 | 39.686957 | 82 | 0.615409 | false |
Opentrons/otone_frontend | scripts/zip_ot_app.py | 1 | 7057 | import sys
import os
import platform
import re
import subprocess
import struct
import time
import zipfile
script_tag = "[OT-App zipping] "
script_tab = " "
# The project_root_dir depends on this file location, assumed to be two levels
# below project root, so it cannot be moved without updating this variable
project_root_dir = \
os.path.dirname( # going up 1 level
os.path.dirname(os.path.realpath(__file__))) # folder dir of this
electron_app_dir = os.path.join(project_root_dir, "out")
def get_build_tag(os_type):
"""
Gets the OS, CPU architecture (32 vs 64 bit), and current time stamp and
appends CI branch, commit, or pull request info
:return: string of os, arch, and time stamp and if CI info if available
"""
arch_time_stamp = "{}{}_{}".format(
platform.system(),
struct.calcsize('P') * 8,
time.strftime("%Y-%m-%d_%H.%M")
)
ci_tag = None
if os_type == "mac":
print(script_tag + "Checking Travis-CI environment variables for tag:")
ci_tag = tag_from_ci_env_vars(
ci_name='Travis-CI',
pull_request_var='TRAVIS_PULL_REQUEST',
branch_var='TRAVIS_BRANCH',
commit_var='TRAVIS_COMMIT'
)
if os_type == "win":
print(script_tag + "Checking Appveyor-CI enironment variables for tag:")
ci_tag = tag_from_ci_env_vars(
ci_name='Appveyor-CI',
pull_request_var='APPVEYOR_PULL_REQUEST_NUMBER',
branch_var='APPVEYOR_REPO_BRANCH',
commit_var='APPVEYOR_REPO_COMMIT'
)
if ci_tag:
return "{}_{}".format(arch_time_stamp, ci_tag)
return arch_time_stamp
def tag_from_ci_env_vars(ci_name, pull_request_var, branch_var, commit_var):
pull_request = os.environ.get(pull_request_var)
branch = os.environ.get(branch_var)
commit = os.environ.get(commit_var)
if pull_request and pull_request != 'false':
try:
pr_number = int(re.findall("\d+", pull_request)[0])
print(script_tab + "Pull Request valid {} variable found: "
"{}".format(ci_name, pr_number))
return 'pull_{}'.format(pr_number)
except (ValueError, TypeError):
print(script_tab + 'The pull request environmental variable {} '
'value {} from {} is not a valid number'.format(
pull_request_var, pull_request, ci_name
))
if branch and commit:
print(script_tab + "\tBranch and commit valid {} variables found "
"{} {}".format(
ci_name, branch, commit
))
return "{}_{}".format(branch, commit[:10])
print(script_tab + "The environmental variables for {} were deemed "
"invalid".format(ci_name))
print(script_tab + "--{}: {}".format(pull_request_var, pull_request))
print(script_tab + "--{}: {}".format(branch_var, branch))
print(script_tab + "--{}: {}".format(commit_var, commit))
return None
def zip_ot_app(build_tag, os_type):
print(script_tab + "Zipping OT App. Using tag: {}".format(build_tag))
# Assuming there is only one app in the electron build dir, zip that app
current_app_name = os.listdir(electron_app_dir)[0]
current_app_path = os.path.join(electron_app_dir, current_app_name)
# We need to CD into the directory where the Mac app executable is located
# in order to zip the files within that directory and avoid zipping that
# directory itself
old_cwd = os.getcwd()
os.chdir(current_app_path)
print(script_tab + "Zipping {} located in {}".format(
current_app_name, os.getcwd())
)
releases_dir = os.path.join(project_root_dir, 'releases')
if not os.path.isdir(releases_dir):
os.mkdir(releases_dir)
# Place app in the releases dir
# e.g. <project root>/releases/opentrons_<build tag>.zip
zip_app_path = os.path.join(
releases_dir,
"opentrons_{}.zip".format(build_tag)
)
print(script_tab + "Zipped application will be located in: {}".format(
zip_app_path
))
if os_type == "mac":
zip_process = subprocess.Popen(
['zip', '-r', '-X', '--symlinks', zip_app_path, '.'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
_, std_err = zip_process.communicate()
if std_err:
print(script_tab + "Error using zip command: {}".format(std_err))
if os_type == "win":
zip_output = zipfile.ZipFile(zip_app_path, 'w', zipfile.ZIP_DEFLATED)
for dirname, subdirs, subfiles in os.walk('.'):
zip_output.write(dirname)
for filename in subfiles:
zip_output.write(os.path.join(dirname, filename))
zip_output.close()
# zip_command = "powershell.exe -nologo -noprofile -command \"& "
# zip_command += "{ Add-Type -A 'System.IO.Compression.FileSystem'; "
# zip_command += "[IO.Compression.ZipFile]::CreateFromDirectory("
# zip_command += "'{" + current_app_path + "}','"+zip_app_path+"'); }\""
# print(script_tab + zip_command)
# zip_process = subprocess.Popen(
# zip_command,
# stdout=subprocess.PIPE,
# stderr=subprocess.PIPE,
# shell=True
# )
os.chdir(old_cwd)
def get_os():
"""
Gets the OS to based on the command line argument of the platform info.
Only possibilities are: "windows", "mac", "linux"
"""
valid_os = ["windows", "linux", "mac"]
print(script_tab + "Checking for command line argument indicated OS:")
if len(sys.argv) > 1:
if sys.argv[1] in valid_os:
# Take the first argument and use it as the os
print(script_tab + "Valid command line argument found: %s" %
sys.argv[1])
if sys.argv[1] == "windows":
return "win"
else:
return "mac"
else:
print(script_tab + "Invalid command line argument found: %s\n" %
sys.argv[1] + script_tab + "Options available: %s" % valid_os)
print(script_tab + "Valid command line arg not found, checking system.")
os_found = platform.system().lower()
if os_found == "windows":
os_found = "win"
print(script_tab + "OS found is: %s" % os_found)
return os_found
elif os_found == "linux" or os_found == "darwin":
os_found = "mac"
print(script_tab + "OS found is: %s" % os_found)
return os_found
else:
raise SystemExit("Exit: OS data found is invalid '%s'" % os_found)
def main():
print(script_tag + "Zipping OT App procedure started.")
print(script_tag + "Checking for OS.")
os_type = get_os()
print(script_tag + "Zipping OT App for %s." % os_type)
build_tag = get_build_tag(os_type)
zip_ot_app(build_tag, os_type)
if __name__ == '__main__':
main()
| apache-2.0 | -8,465,474,372,647,020,000 | 34.285 | 80 | 0.577157 | false |
zuher83/z-odoo8-addons | z_partner_ref/__openerp__.py | 1 | 1529 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Odoo, Open Source Management Solution
# Copyright (C) 2013-2014 Zuher ELMAS. All Rights Reserved
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
'name': 'Z Partner References',
'version': '1',
"category" : 'Generic module',
'complexity': "easy",
'description': """
Create automatic partner reference by type (supplier, customer and others partners)
""",
'author': 'Zuher Elmas',
'depends': ['contacts'],
'update_xml': ['partner_sequence.xml',
'partner_view.xml'],
'demo_xml': [],
'test': [],
'installable': True,
'application': False,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -501,026,057,751,060,100 | 33.75 | 91 | 0.59189 | false |
ptarroso/JoinSplit | JoinSplit_dialog.py | 1 | 6960 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
JoinSplitDialog
A QGIS plugin
JoinSplit
-------------------
begin : 2015-02-25
git sha : $Format:%H$
copyright : (C) 2015 by Pedro Tarroso
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; version 3. *
* *
***************************************************************************/
"""
from builtins import str
from builtins import range
import os
from PyQt5 import QtWidgets
from qgis.PyQt import QtGui, uic, QtCore
from qgis.core import QgsProject, Qgis
FORM_CLASS, _ = uic.loadUiType(os.path.join(
os.path.dirname(__file__), 'JoinSplit_dialog_base.ui'))
class JoinSplitDialog(QtWidgets.QDialog, FORM_CLASS):
def __init__(self, iface, parent=None):
"""Constructor."""
super(JoinSplitDialog, self).__init__(parent)
# Set up the user interface from Designer.
# After setupUI you can access any designer object by doing
# self.<objectname>, and you can use autoconnect slots - see
# http://qt-project.org/doc/qt-4.8/designer-using-a-ui-file.html
# #widgets-and-dialogs-with-auto-connect
self.setupUi(self)
self.iface = iface
self.OutputButton.clicked.connect(self.outFolder)
self.JoinTableCombo.currentIndexChanged.connect(self.updateFields)
self.JoinFieldCombo.currentIndexChanged.connect(self.populateSplits)
self.checkStyle.stateChanged.connect(self.styleState)
self.styleButton.clicked.connect(self.styleFile)
def outFolder(self):
# Show the folder dialog for output
self.OutputLine.clear()
fileDialog = QtWidgets.QFileDialog()
outFolderName = fileDialog.getExistingDirectory(self, "Open a folder", ".", QtWidgets.QFileDialog.ShowDirsOnly)
outPath = QtCore.QFileInfo(outFolderName).absoluteFilePath()
if outFolderName:
self.OutputLine.clear()
self.OutputLine.insert(outPath)
def styleFile(self):
# Show the file dialog for choosing a style file
self.styleLine.clear()
fileDialog = QtWidgets.QFileDialog()
styleFileName = fileDialog.getOpenFileName(self, "Open style file",
'', "QML Files (*.qml)")[0]
styleFileName = QtCore.QFileInfo(styleFileName).absoluteFilePath()
if styleFileName:
self.styleLine.clear()
self.styleLine.insert(styleFileName)
def getOutFolder(self):
return(self.OutputLine.text())
def getJoinTable(self):
return(str(self.JoinTableCombo.currentText()))
def getJoinField(self):
return(str(self.JoinFieldCombo.currentText()))
def getGridLayer(self):
return(str(self.GridLayerCombo.currentText()))
def getIncZero(self):
return(bool(self.includeZero.checkState()))
def getcheckStyle(self):
return(bool(self.checkStyle.checkState()))
def getStyleFile(self):
if self.getcheckStyle():
return(self.styleLine.text())
else:
return(False)
def updateCombos(self, items):
if len(items) > 0:
self.GridLayerCombo.clear()
self.JoinTableCombo.clear()
for item in items:
self.GridLayerCombo.addItem(item)
self.JoinTableCombo.addItem(item)
def updateFields(self):
joinTable = self.getJoinTable()
if joinTable != "":
allLayers = [layer for layer in QgsProject.instance().mapLayers().values()]
allLyrNames = [lyr.name() for lyr in allLayers]
if joinTable in allLyrNames:
lyr = allLayers[allLyrNames.index(joinTable)]
fields = lyr.fields()
self.JoinFieldCombo.clear()
fieldNames = [self.JoinFieldCombo.addItem(f.name()) for f in fields]
def populateSplits(self):
joinTable = self.getJoinTable()
if joinTable != "":
allLayers = [layer for layer in QgsProject.instance().mapLayers().values()]
allLyrNames = [lyr.name() for lyr in allLayers]
if joinTable in allLyrNames:
lyr = allLayers[allLyrNames.index(joinTable)]
fields = lyr.fields()
self.splitFields.clear()
for item in [f.name() for f in fields]:
if item != self.getJoinField():
self.splitFields.addItem(item)
def getSplits(self):
splits = []
count = self.splitFields.count()
for i in range(0, count):
item = self.splitFields.item(i)
if item.isSelected():
splits.append(item.text())
return(splits)
def styleState(self, enable):
self.styleButton.setEnabled(bool(enable))
self.styleLine.setEnabled(bool(enable))
def setProgressBar(self, main, text, maxVal=100):
self.widget = self.iface.messageBar().createMessage(main, text)
self.prgBar = QtWidgets.QProgressBar()
self.prgBar.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.prgBar.setValue(0)
self.prgBar.setMaximum(maxVal)
self.widget.layout().addWidget(self.prgBar)
self.iface.messageBar().pushWidget(self.widget, Qgis.Info)
def showMessage(self, main, txt):
self.widget.setTitle(main)
self.widget.setText(txt)
def ProgressBar(self, value):
self.prgBar.setValue(value)
if (value == self.prgBar.maximum()):
self.iface.messageBar().clearWidgets()
self.iface.mapCanvas().refresh()
def emitMsg(self, main, text, type):
# Emits a message to QGIS.
# type is either Qgis.Warning or Qgis.Critical
# TODO: Replace the warnMsg and the errorMsg to this function!!!
msg = self.iface.messageBar().createMessage(main, text)
self.iface.messageBar().pushWidget(msg, type)
def warnMsg(self, main, text):
self.warn = self.iface.messageBar().createMessage(main, text)
self.iface.messageBar().pushWidget(self.warn)
def errorMsg(self, main, text):
self.warn = self.iface.messageBar().createMessage(main, text)
self.iface.messageBar().pushWidget(self.warn)
| gpl-3.0 | -365,973,032,194,760,960 | 38.771429 | 119 | 0.572126 | false |
dc3-plaso/plaso | tests/multi_processing/engine.py | 1 | 1033 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests the multi-process processing engine."""
import unittest
from tests import test_lib as shared_test_lib
class MultiProcessEngineTest(shared_test_lib.BaseTestCase):
"""Tests for the multi-process engine."""
# TODO: add test for _AbortJoin
# TODO: add test for _AbortKill
# TODO: add test for _AbortTerminate
# TODO: add test for _CheckStatusWorkerProcess
# TODO: add test for _GetProcessStatus
# TODO: add test for _KillProcess
# TODO: add test for _LogMemoryUsage
# TODO: add test for _ProfilingSampleMemory
# TODO: add test for _RaiseIfNotMonitored
# TODO: add test for _RaiseIfNotRegistered
# TODO: add test for _RegisterProcess
# TODO: add test for _StartMonitoringProcess
# TODO: add test for _StartStatusUpdateThread
# TODO: add test for _StopMonitoringProcess
# TODO: add test for _StopMonitoringProcesses
# TODO: add test for _StopStatusUpdateThread
# TODO: add test for _TerminateProcess
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 7,618,904,295,516,928,000 | 30.30303 | 59 | 0.725073 | false |
deepmind/bsuite | bsuite/__init__.py | 1 | 1081 | # python3
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Behaviour Suite for Reinforcement Learning."""
from . import bsuite as _bsuite
from bsuite._metadata import __version__
load = _bsuite.load
load_from_id = _bsuite.load_from_id
load_and_record = _bsuite.load_and_record
load_and_record_to_sqlite = _bsuite.load_and_record_to_sqlite
load_and_record_to_csv = _bsuite.load_and_record_to_csv
| apache-2.0 | 8,878,673,792,378,914,000 | 40.576923 | 78 | 0.704903 | false |
karanchawla/CallistoRover | src/callisto_control/diffdrive_controller/src/diffdrive_controller.py | 1 | 2557 | #!/usr/bin/python
import rospy
import roslib
# Messages
from geometry_msgs.msg import Twist
from std_msgs.msg import Float32
class CmdVelToDiffDriveMotors:
def __init__(self):
rospy.init_node('diffdrive_controller')
self.cmdvel_sub = rospy.Subscriber('cmd_vel', Twist, self.twistCallback)
self.lwheel_tangent_vel_target_pub = rospy.Publisher('lwheel_tangent_vel_target', Float32, queue_size=10)
self.rwheel_tangent_vel_target_pub = rospy.Publisher('rwheel_tangent_vel_target', Float32, queue_size=10)
self.L = rospy.get_param('~robot_wheel_separation_distance', 0.14)
self.R = rospy.get_param('~robot_wheel_radius', 0.03)
self.rate = rospy.get_param('~rate', 50)
self.timeout_idle = rospy.get_param('~timeout_idle', 2)
self.time_prev_update = rospy.Time.now()
self.target_v = 0;
self.target_w = 0;
# When given no commands for some time, do not move
def spin(self):
rospy.loginfo("Start diffdrive_controller")
rate = rospy.Rate(self.rate)
time_curr_update = rospy.Time.now()
rospy.on_shutdown(self.shutdown)
while not rospy.is_shutdown():
time_diff_update = (time_curr_update - self.time_prev_update).to_sec()
if time_diff_update < self.timeout_idle: # Only move if command given recently
self.update();
rate.sleep()
rospy.spin();
def shutdown(self):
rospy.loginfo("Stop diffdrive_controller")
# Stop message
self.lwheel_tangent_vel_target_pub.publish(0)
self.rwheel_tangent_vel_target_pub.publish(0)
rospy.sleep(1)
def update(self):
# Suppose we have a target velocity v and angular velocity w
# Suppose we have a robot with wheel radius R and distance between wheels L
# Let vr and vl be angular wheel velocity for right and left wheels, respectively
# Relate 2v = R (vr +vl) because the forward speed is the sum of the combined wheel velocities
# Relate Lw = R (vr - vl) because rotation is a function of counter-clockwise wheel speeds
# Compute vr = (2v + wL) / 2R
# Compute vl = (2v - wL) / 2R
vr = (2*self.target_v + self.target_w*self.L) / (2)
vl = (2*self.target_v - self.target_w*self.L) / (2)
self.rwheel_tangent_vel_target_pub.publish(vr)
self.lwheel_tangent_vel_target_pub.publish(vl)
def twistCallback(self,msg):
self.target_v = msg.linear.x;
self.target_w = msg.angular.z;
self.time_prev_update = rospy.Time.now()
def main():
cmdvel_to_motors = CmdVelToDiffDriveMotors();
cmdvel_to_motors.spin()
if __name__ == '__main__':
main();
| mit | 5,752,347,480,478,748,000 | 34.027397 | 109 | 0.678138 | false |
JIC-CSB/jicimagelib | tests/AutoName_unit_tests.py | 1 | 1160 | """Tests for the :class:`jicimagelib.io.AutoName` class."""
import unittest
class AutoNameTests(unittest.TestCase):
def test_import_AutoName_class(self):
# This throws an error if the class cannot be imported.
from jicimagelib.io import AutoName
def test_count(self):
from jicimagelib.io import AutoName
self.assertEqual(AutoName.count, 0)
def test_directory(self):
from jicimagelib.io import AutoName
self.assertEqual(AutoName.directory, None)
def test_suffix(self):
from jicimagelib.io import AutoName
self.assertEqual(AutoName.suffix, '.png')
def test_name_callable(self):
from jicimagelib.io import AutoName
self.assertTrue(callable(AutoName.name))
def test_name_logic(self):
from jicimagelib.io import AutoName
def no_transform(image):
return image
self.assertEqual(AutoName.name(no_transform), '1_no_transform.png')
AutoName.directory = '/tmp'
self.assertEqual(AutoName.name(no_transform), '/tmp/2_no_transform.png')
if __name__ == '__main__':
unittest.main()
| mit | -6,143,133,229,164,286,000 | 31.222222 | 80 | 0.652586 | false |
timbuchwaldt/bundlewrap | bundlewrap/deps.py | 1 | 23872 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .exceptions import BundleError, ItemDependencyError, NoSuchItem
from .items import Item
from .items.actions import Action
from .utils.text import bold, mark_for_translation as _
from .utils.ui import io
class DummyItem(object):
bundle = None
triggered = False
def __init__(self, *args, **kwargs):
self.needed_by = []
self.needs = []
self.preceded_by = []
self.precedes = []
self.tags = []
self.triggered_by = []
self.triggers = []
self._deps = []
self._precedes_items = []
def __lt__(self, other):
return self.id < other.id
def _precedes_incorrect_item(self):
return False
def apply(self, *args, **kwargs):
return (Item.STATUS_OK, [])
def test(self):
pass
class BundleItem(DummyItem):
"""
Represents a dependency on all items in a certain bundle.
"""
ITEM_TYPE_NAME = 'bundle'
def __init__(self, bundle):
self.bundle = bundle
super(BundleItem, self).__init__()
def __repr__(self):
return "<BundleItem: {}>".format(self.bundle.name)
@property
def id(self):
return "bundle:{}".format(self.bundle.name)
class TagItem(DummyItem):
"""
This item depends on all items with the given tag.
"""
ITEM_TYPE_NAME = 'tag'
def __init__(self, tag_name):
self.tag_name = tag_name
super(TagItem, self).__init__()
def __repr__(self):
return "<TagItem: {}>".format(self.tag_name)
@property
def id(self):
return "tag:{}".format(self.tag_name)
class TypeItem(DummyItem):
"""
Represents a dependency on all items of a certain type.
"""
ITEM_TYPE_NAME = 'type'
def __init__(self, item_type):
self.item_type = item_type
super(TypeItem, self).__init__()
def __repr__(self):
return "<TypeItem: {}>".format(self.item_type)
@property
def id(self):
return "{}:".format(self.item_type)
def find_item(item_id, items):
"""
Returns the first item with the given ID within the given list of
items.
"""
try:
item = list(filter(lambda item: item.id == item_id, items))[0]
except IndexError:
raise NoSuchItem(_("item not found: {}").format(item_id))
return item
def _find_items_of_types(item_types, items, include_dummy=False):
"""
Returns a subset of items with any of the given types.
"""
for item_id, item in items.items():
if item_id.split(":", 1)[0] in item_types and (
include_dummy or not isinstance(item, DummyItem)
):
yield item
def _flatten_dependencies(items):
"""
This will cause all dependencies - direct AND inherited - to be
listed in item._flattened_deps.
"""
for item in items.values():
if not hasattr(item, '_flattened_deps'):
_flatten_deps_for_item(item, items)
return items
def _flatten_deps_for_item(item, items):
"""
Recursively retrieves and returns a list of all inherited
dependencies of the given item.
This can handle loops, but will ignore them.
"""
item._flattened_deps = set(item._deps)
for dep in item._deps:
try:
dep_item = items[dep]
except KeyError:
raise ItemDependencyError(_(
"'{item}' in bundle '{bundle}' has a dependency (needs) "
"on '{dep}', which doesn't exist"
).format(
item=item.id,
bundle=item.bundle.name,
dep=dep,
))
# Don't recurse if we have already resolved nested dependencies
# for this item. Also serves as a guard against infinite
# recursion when there are loops.
if not hasattr(dep_item, '_flattened_deps'):
_flatten_deps_for_item(dep_item, items)
item._flattened_deps |= set(dep_item._flattened_deps)
item._flattened_deps = sorted(item._flattened_deps)
def _has_trigger_path(items, item, target_item_id):
"""
Returns True if the given item directly or indirectly (trough
other items) triggers the item with the given target item id.
"""
if target_item_id in item.triggers:
return True
for triggered_id in item.triggers:
try:
triggered_item = find_item(triggered_id, items)
except NoSuchItem:
# the triggered item may already have been skipped by
# `bw apply -s`
continue
if _has_trigger_path(items, triggered_item, target_item_id):
return True
return False
def _inject_bundle_items(items):
"""
Adds virtual items that depend on every item in a bundle.
"""
bundle_items = {}
for item in items.values():
if item.bundle is None:
continue
if item.bundle.name not in bundle_items:
bundle_items[item.bundle.name] = BundleItem(item.bundle)
bundle_items[item.bundle.name]._deps.append(item.id)
items.update({item.id: item for item in bundle_items.values()})
return items
def _inject_canned_actions(items):
"""
Looks for canned actions like "svc_upstart:mysql:reload" in item
triggers and adds them to the list of items.
"""
added_actions = {}
for item in items.values():
for triggered_item_id in item.triggers:
if triggered_item_id in added_actions:
# action has already been triggered
continue
try:
type_name, item_name, action_name = triggered_item_id.split(":")
except ValueError:
# not a canned action
continue
target_item_id = "{}:{}".format(type_name, item_name)
try:
target_item = items[target_item_id]
except KeyError:
raise BundleError(_(
"{item} in bundle '{bundle}' triggers unknown item '{target_item}'"
).format(
bundle=item.bundle.name,
item=item.id,
target_item=target_item_id,
))
try:
action_attrs = target_item.get_canned_actions()[action_name]
except KeyError:
raise BundleError(_(
"{item} in bundle '{bundle}' triggers unknown "
"canned action '{action}' on {target_item}"
).format(
action=action_name,
bundle=item.bundle.name,
item=item.id,
target_item=target_item_id,
))
action_attrs.update({'triggered': True})
action = Action(
item.bundle,
triggered_item_id,
action_attrs,
skip_name_validation=True,
)
action._prepare_deps(items)
added_actions[triggered_item_id] = action
items.update({item.id: item for item in added_actions.values()})
return items
def _inject_concurrency_blockers(items, node_os, node_os_version):
"""
Looks for items with BLOCK_CONCURRENT set and inserts daisy-chain
dependencies to force a sequential apply.
"""
# find every item type that cannot be applied in parallel
item_types = set()
for item in items.values():
item._concurrency_deps = [] # used for DOT (graphviz) output only
if (
not isinstance(item, DummyItem) and
item.block_concurrent(node_os, node_os_version)
):
item_types.add(item.__class__)
# Now that we have collected all relevant types,
# we must group them together when they overlap. E.g.:
#
# Type1.block_concurrent(...) == ["type1", "type2"]
# Type2.block_concurrent(...) == ["type2", "type3"]
# Type4.block_concurrent(...) == ["type4"]
#
# becomes
#
# ["type1", "type2", "type3"]
# ["type4"]
#
# because the first two types overlap in blocking type2. This is
# necessary because existing dependencies from type3 to type1 need
# to be taken into account when generating the daisy-chains
# connecting the three types. If we processed blockers for Type1 and
# Type2 independently, we might end up with two very different
# chains for Type2, which may cause circular dependencies.
chain_groups = []
for item_type in item_types:
block_concurrent = [item_type.ITEM_TYPE_NAME]
block_concurrent.extend(item_type.block_concurrent(node_os, node_os_version))
found = False
for blocked_types in chain_groups:
for blocked_type in block_concurrent:
if blocked_type in blocked_types:
blocked_types.extend(block_concurrent)
found = True
break
if not found:
chain_groups.append(block_concurrent)
# daisy-chain all items of the chain group while respecting existing
# dependencies between them
for blocked_types in chain_groups:
blocked_types = set(blocked_types)
type_items = list(_find_items_of_types(
blocked_types,
items,
))
processed_items = []
for item in type_items:
# disregard deps to items of other types
item.__deps = list(filter(
lambda dep: dep.split(":", 1)[0] in blocked_types,
item._flattened_deps,
))
previous_item = None
while len(processed_items) < len(type_items):
# find the first item without same-type deps we haven't
# processed yet
try:
item = list(filter(
lambda item: not item.__deps and item not in processed_items,
type_items,
))[0]
except IndexError:
# this can happen if the flattened deps of all items of
# this type already contain a dependency on another
# item of this type
break
if previous_item is not None: # unless we're at the first item
# add dep to previous item -- unless it's already in there
if previous_item.id not in item._deps:
item._deps.append(previous_item.id)
item._concurrency_deps.append(previous_item.id)
item._flattened_deps.append(previous_item.id)
previous_item = item
processed_items.append(item)
# Now remove all deps on the processed item. This frees up
# items depending *only* on the processed item to be
# eligible for the next iteration of this loop.
for other_item in type_items:
try:
other_item.__deps.remove(item.id)
except ValueError:
pass
return items
def _inject_tag_items(items):
"""
Takes a list of items and adds tag items depending on each type of
item in the list. Returns the appended list.
"""
tag_items = {}
for item in items.values():
for tag in item.tags:
if tag not in tag_items:
tag_items[tag] = TagItem(tag)
tag_items[tag]._deps.append(item.id)
items.update({item.id: item for item in tag_items.values()})
return items
def _inject_type_items(items):
"""
Takes a list of items and adds dummy items depending on each type of
item in the list. Returns the appended list.
"""
type_items = {}
for item in items.values():
# create dummy items that depend on each item of their type
item_type = item.id.split(":")[0]
if item_type not in type_items:
type_items[item_type] = TypeItem(item_type)
type_items[item_type]._deps.append(item.id)
# create DummyItem for every type
for dep in item._deps:
item_type = dep.split(":")[0]
if item_type not in type_items:
type_items[item_type] = TypeItem(item_type)
items.update({item.id: item for item in type_items.values()})
return items
def _inject_reverse_dependencies(items):
"""
Looks for 'needed_by' deps and creates standard dependencies
accordingly.
"""
def add_dep(item, dep):
if dep not in item._deps:
item._deps.append(dep)
item._reverse_deps.append(dep)
for item in items.values():
item._reverse_deps = []
for item in items.values():
for depending_item_id in item.needed_by:
# bundle items
if depending_item_id.startswith("bundle:"):
depending_bundle_name = depending_item_id.split(":")[1]
for depending_item in items.values():
if depending_item.bundle.name == depending_bundle_name:
add_dep(depending_item, item.id)
# tag items
if depending_item_id.startswith("tag:"):
tag_name = depending_item_id.split(":")[1]
for depending_item in items.values():
if tag_name in depending_item.tags:
add_dep(depending_item, item.id)
# type items
if depending_item_id.endswith(":"):
target_type = depending_item_id[:-1]
for depending_item in _find_items_of_types([target_type], items):
add_dep(depending_item, item.id)
# single items
else:
try:
depending_item = items[depending_item_id]
except KeyError:
raise ItemDependencyError(_(
"'{item}' in bundle '{bundle}' has a reverse dependency (needed_by) "
"on '{dep}', which doesn't exist"
).format(
item=item.id,
bundle=item.bundle.name,
dep=depending_item_id,
))
add_dep(depending_item, item.id)
return items
def _inject_reverse_triggers(items):
"""
Looks for 'triggered_by' and 'precedes' attributes and turns them
into standard triggers (defined on the opposing end).
"""
for item in items.values():
for triggering_item_id in item.triggered_by:
try:
triggering_item = items[triggering_item_id]
except KeyError:
raise ItemDependencyError(_(
"'{item}' in bundle '{bundle}' has a reverse trigger (triggered_by) "
"on '{dep}', which doesn't exist"
).format(
item=item.id,
bundle=item.bundle.name,
dep=triggering_item_id,
))
if triggering_item.id.startswith("bundle:"): # bundle items
bundle_name = triggering_item.id.split(":")[1]
for actual_triggering_item in items.values():
if triggering_item.bundle.name == bundle_name:
actual_triggering_item.triggers.append(item.id)
elif triggering_item.id.startswith("tag:"): # tag items
tag_name = triggering_item.id.split(":")[1]
for actual_triggering_item in items.values():
if tag_name in triggering_item.tags:
actual_triggering_item.triggers.append(item.id)
elif triggering_item.id.endswith(":"): # type items
target_type = triggering_item.id[:-1]
for actual_triggering_item in _find_items_of_types([target_type], items):
actual_triggering_item.triggers.append(item.id)
else:
triggering_item.triggers.append(item.id)
for preceded_item_id in item.precedes:
try:
preceded_item = items[preceded_item_id]
except KeyError:
raise ItemDependencyError(_(
"'{item}' in bundle '{bundle}' has a reverse trigger (precedes) "
"on '{dep}', which doesn't exist"
).format(
item=item.id,
bundle=item.bundle.name,
dep=preceded_item_id,
))
if preceded_item.id.startswith("bundle:"): # bundle items
bundle_name = preceded_item.id.split(":")[1]
for actual_preceded_item in items.values():
if actual_preceded_item.bundle.name == bundle_name:
actual_preceded_item.preceded_by.append(item.id)
elif preceded_item.id.startswith("tag:"): # tag items
tag_name = preceded_item.id.split(":")[1]
for actual_preceded_item in items.values():
if tag_name in actual_preceded_item.tags:
actual_preceded_item.preceded_by.append(item.id)
elif preceded_item.id.endswith(":"): # type items
target_type = preceded_item.id[:-1]
for actual_preceded_item in _find_items_of_types([target_type], items):
actual_preceded_item.preceded_by.append(item.id)
else:
preceded_item.preceded_by.append(item.id)
return items
def _inject_trigger_dependencies(items):
"""
Injects dependencies from all triggered items to their triggering
items.
"""
for item in items.values():
for triggered_item_id in item.triggers:
try:
triggered_item = items[triggered_item_id]
except KeyError:
raise BundleError(_(
"unable to find definition of '{item1}' triggered "
"by '{item2}' in bundle '{bundle}'"
).format(
bundle=item.bundle.name,
item1=triggered_item_id,
item2=item.id,
))
if not triggered_item.triggered:
raise BundleError(_(
"'{item1}' in bundle '{bundle1}' triggered "
"by '{item2}' in bundle '{bundle2}', "
"but missing 'triggered' attribute"
).format(
item1=triggered_item.id,
bundle1=triggered_item.bundle.name,
item2=item.id,
bundle2=item.bundle.name,
))
triggered_item._deps.append(item.id)
return items
def _inject_preceded_by_dependencies(items):
"""
Injects dependencies from all triggering items to their
preceded_by items and attaches triggering items to preceding items.
"""
for item in items.values():
if item.preceded_by and item.triggered:
raise BundleError(_(
"triggered item '{item}' in bundle '{bundle}' must not use "
"'preceded_by' (use chained triggers instead)".format(
bundle=item.bundle.name,
item=item.id,
),
))
for triggered_item_id in item.preceded_by:
try:
triggered_item = items[triggered_item_id]
except KeyError:
raise BundleError(_(
"unable to find definition of '{item1}' preceding "
"'{item2}' in bundle '{bundle}'"
).format(
bundle=item.bundle.name,
item1=triggered_item_id,
item2=item.id,
))
if not triggered_item.triggered:
raise BundleError(_(
"'{item1}' in bundle '{bundle1}' precedes "
"'{item2}' in bundle '{bundle2}', "
"but missing 'triggered' attribute"
).format(
item1=triggered_item.id,
bundle1=triggered_item.bundle.name,
item2=item.id,
bundle2=item.bundle.name if item.bundle else "N/A",
))
triggered_item._precedes_items.append(item)
item._deps.append(triggered_item.id)
return items
@io.job_wrapper(_("{} processing dependencies").format(bold("{1}")))
def prepare_dependencies(items, node_name, node_os, node_os_version):
"""
Performs all dependency preprocessing on a list of items.
"""
for item in items:
item._check_bundle_collisions(items)
item._check_loopback_dependency()
item._prepare_deps(items)
# transform items into a dict to prevent repeated item.id lookups
items = {item.id: item for item in items}
items = _inject_bundle_items(items)
items = _inject_tag_items(items)
items = _inject_type_items(items)
items = _inject_canned_actions(items)
items = _inject_reverse_triggers(items)
items = _inject_reverse_dependencies(items)
items = _inject_trigger_dependencies(items)
items = _inject_preceded_by_dependencies(items)
items = _flatten_dependencies(items)
items = _inject_concurrency_blockers(items, node_os, node_os_version)
for item in items.values():
if not isinstance(item, DummyItem):
item._check_redundant_dependencies()
return list(items.values())
def remove_dep_from_items(items, dep):
"""
Removes the given item id (dep) from the temporary list of
dependencies of all items in the given list.
"""
for item in items:
try:
item._deps.remove(dep)
except ValueError:
pass
return items
def remove_item_dependents(items, dep_item, skipped=False):
"""
Removes the items depending on the given item from the list of items.
"""
removed_items = []
for item in items:
if dep_item.id in item._deps:
if _has_trigger_path(items, dep_item, item.id):
# triggered items cannot be removed here since they
# may yet be triggered by another item and will be
# skipped anyway if they aren't
item._deps.remove(dep_item.id)
elif skipped and isinstance(item, DummyItem) and \
dep_item.triggered and not dep_item.has_been_triggered:
# don't skip dummy items because of untriggered members
# see issue #151; separate elif for clarity
item._deps.remove(dep_item.id)
else:
removed_items.append(item)
for item in removed_items:
items.remove(item)
if removed_items:
io.debug(
"skipped these items because they depend on {item}, which was "
"skipped previously: {skipped}".format(
item=dep_item.id,
skipped=", ".join([item.id for item in removed_items]),
)
)
all_recursively_removed_items = []
for removed_item in removed_items:
items, recursively_removed_items = \
remove_item_dependents(items, removed_item, skipped=skipped)
all_recursively_removed_items += recursively_removed_items
return (items, removed_items + all_recursively_removed_items)
def split_items_without_deps(items):
"""
Takes a list of items and extracts the ones that don't have any
dependencies. The extracted deps are returned as a list.
"""
remaining_items = []
removed_items = []
for item in items:
if item._deps:
remaining_items.append(item)
else:
removed_items.append(item)
return (remaining_items, removed_items)
| gpl-3.0 | -2,515,795,947,550,293,000 | 34.418398 | 93 | 0.55496 | false |
Karandash8/claude_reborn | claude_core/tools/messaging.py | 1 | 5585 | '''
Created on Sep 4, 2015
@author: andrei
'''
from kombu import Connection
import time
from Queue import Empty
import ConfigParser
import tools.auxiliary as auxiliary
import logging
import netifaces as ni
supported_apps = ['grok', 'enkf']
msg_args = {
'retrieve_cli_output': ['app_id'],
'update_output': ['app_id', 'output'],
'app_accepted': ['tmp_id', 'app_id'],
'update_app_status': ['app_id', 'status'],
'update_vm_spawning_time': ['app_id', 'time'],
'preprocess': ['input_id', 'input_file', 'group_id', 'input_param'],
'preprocessed': ['group_id', 'app_type', 'app_name', 'app_input_file', 'app_output_file', 'app_params'],
'preprocessing_completed': ['input_id', 'group_id'],
'postprocess': ['group_id', 'apps_ids_list', 'outputs_list'],
'postprocessed': ['group_id', 'output', 'apps_ids_list'],
'create_app': ['web_interface_id', 'app_type', 'app_params'],
'launch_app': ['app_manager_id'],
'const_output_update': ['app_manager_id', 'update_output'],
'delete_app': ['app_manager_id', 'web_interface_id'],
'created_app': ['web_interface_id', 'app_manager_id'],
'expired_app': ['web_interface_id'],
'launched_app': ['web_interface_id'],
'update_info': ['web_interface_id', 'info_type', 'info'],
'app_output_file_ready': ['web_interface_id'],
'deleted_app': ['web_interface_id'],
'new_job': ['web_interface_id', 'app_type', 'app_params'],
}
#get IP address of the specified interface
def getIPAdreess(interface='eth0'):
addr = ni.ifaddresses(interface)[ni.AF_INET][0]['addr']
return addr
#builds a RabbitMQ connection link
def buildRabbitMQConnectionLink(address=getIPAdreess(), protocol='amqp', user='rabbitmq', password='rabbitmq', port='5672'):
connection_link = protocol + '://' + user + ':' + password + '@' + address + ':' + port + '//'
return connection_link
#TODO: add warnings about extra parameters
def createMessage(msg_type, return_queue=None, interface='eth0', **kwargs):
try:
args = msg_args[msg_type]
except KeyError:
print 'Unexpected message type "%s". List of available types: %s' % (msg_type, msg_args.keys())
raise KeyError
try:
for element in args:
kwargs[element]
except KeyError:
print 'Missing "%s" element in kwargs' % element
raise KeyError
return_address = getIPAdreess(interface)
message = {
'msg_type': msg_type,
'return_params': {
'return_address': return_address,
'return_queue': return_queue
},
'kwargs': kwargs,
}
return message
def messageCheck(message):
if ('msg_type' in message) and ('kwargs' in message):
msg_type = message['msg_type']
kwargs = message['kwargs']
try:
args = msg_args[msg_type]
except KeyError:
print 'Unexpected message type "%s". List of available types: %s' % (msg_type, msg_args.keys())
raise KeyError
try:
for element in args:
kwargs[element]
except KeyError:
print 'Missing "%s" element in kwargs' % element
raise KeyError
return True
else:
return False
class MessageConsumer(object):
def __init__(self, connection_link, queue, callback):
self.queue = queue
self.connection_link = connection_link
self.callback = callback
self.logger = auxiliary.getLogger()
self.logger.info('Connection link: ' + connection_link)
def consumeOneMsg(self):
ret = True
with Connection(self.connection_link) as conn:
with conn.SimpleQueue(self.queue) as simple_queue:
try:
message = simple_queue.get_nowait()
self.logger.info('Message received')
self.callback(message)
message.ack()
except Empty:
ret = False
simple_queue.close()
return ret
def constantConsuming(self):
self.logger.info('Starting constant consuming')
while True:
if not self.consumeOneMsg():
time.sleep(1)
class MessageProducer(object):
def __init__(self, connection_link, queue, logger=None):
self.queue = queue
self.connection_link = connection_link
self.logger = logger
#logger.info('Connection link: ' + connection_link)
def publish(self, message):
with Connection(self.connection_link) as conn:
with conn.SimpleQueue(self.queue) as simple_queue:
simple_queue.put(message)
#self.logger.info('Message sent')
simple_queue.close() | gpl-2.0 | -2,158,500,554,645,272,000 | 35.509804 | 129 | 0.506714 | false |
zjutjsj1004/third | boost/tools/build/src/tools/builtin.py | 1 | 32270 | # Status: minor updates by Steven Watanabe to make gcc work
#
# Copyright (C) Vladimir Prus 2002. Permission to copy, use, modify, sell and
# distribute this software is granted provided this copyright notice appears in
# all copies. This software is provided "as is" without express or implied
# warranty, and with no claim as to its suitability for any purpose.
""" Defines standard features and rules.
"""
import b2.build.targets as targets
import sys
from b2.build import feature, property, virtual_target, generators, type, property_set, scanner
from b2.util.utility import *
from b2.util import path, regex, bjam_signature, is_iterable_typed
import b2.tools.types
from b2.manager import get_manager
# Records explicit properties for a variant.
# The key is the variant name.
__variant_explicit_properties = {}
def reset ():
""" Clear the module state. This is mainly for testing purposes.
"""
global __variant_explicit_properties
__variant_explicit_properties = {}
@bjam_signature((["name"], ["parents_or_properties", "*"], ["explicit_properties", "*"]))
def variant (name, parents_or_properties, explicit_properties = []):
""" Declares a new variant.
First determines explicit properties for this variant, by
refining parents' explicit properties with the passed explicit
properties. The result is remembered and will be used if
this variant is used as parent.
Second, determines the full property set for this variant by
adding to the explicit properties default values for all properties
which neither present nor are symmetric.
Lastly, makes appropriate value of 'variant' property expand
to the full property set.
name: Name of the variant
parents_or_properties: Specifies parent variants, if
'explicit_properties' are given,
and explicit_properties otherwise.
explicit_properties: Explicit properties.
"""
parents = []
if not explicit_properties:
explicit_properties = parents_or_properties
else:
parents = parents_or_properties
inherited = property_set.empty()
if parents:
# If we allow multiple parents, we'd have to to check for conflicts
# between base variants, and there was no demand for so to bother.
if len (parents) > 1:
raise BaseException ("Multiple base variants are not yet supported")
p = parents[0]
# TODO: the check may be stricter
if not feature.is_implicit_value (p):
raise BaseException ("Invalid base variant '%s'" % p)
inherited = __variant_explicit_properties[p]
explicit_properties = property_set.create_with_validation(explicit_properties)
explicit_properties = inherited.refine(explicit_properties)
# Record explicitly specified properties for this variant
# We do this after inheriting parents' properties, so that
# they affect other variants, derived from this one.
__variant_explicit_properties[name] = explicit_properties
feature.extend('variant', [name])
feature.compose ("<variant>" + name, explicit_properties.all())
__os_names = """
amiga aix bsd cygwin darwin dos emx freebsd hpux iphone linux netbsd
openbsd osf qnx qnxnto sgi solaris sun sunos svr4 sysv ultrix unix unixware
vms windows
""".split()
# Translates from bjam current OS to the os tags used in host-os and target-os,
# i.e. returns the running host-os.
#
def default_host_os():
host_os = os_name()
if host_os not in (x.upper() for x in __os_names):
if host_os == 'NT': host_os = 'windows'
elif host_os == 'AS400': host_os = 'unix'
elif host_os == 'MINGW': host_os = 'windows'
elif host_os == 'BSDI': host_os = 'bsd'
elif host_os == 'COHERENT': host_os = 'unix'
elif host_os == 'DRAGONFLYBSD': host_os = 'bsd'
elif host_os == 'IRIX': host_os = 'sgi'
elif host_os == 'MACOSX': host_os = 'darwin'
elif host_os == 'KFREEBSD': host_os = 'freebsd'
elif host_os == 'LINUX': host_os = 'linux'
elif host_os == 'HAIKU': host_os = 'haiku'
else: host_os = 'unix'
return host_os.lower()
def register_globals ():
""" Registers all features and variants declared by this module.
"""
# This feature is used to determine which OS we're on.
# In future, this may become <target-os> and <host-os>
# TODO: check this. Compatibility with bjam names? Subfeature for version?
os = sys.platform
feature.feature ('os', [os], ['propagated', 'link-incompatible'])
# The two OS features define a known set of abstract OS names. The host-os is
# the OS under which bjam is running. Even though this should really be a fixed
# property we need to list all the values to prevent unknown value errors. Both
# set the default value to the current OS to account for the default use case of
# building on the target OS.
feature.feature('host-os', __os_names)
feature.set_default('host-os', default_host_os())
feature.feature('target-os', __os_names, ['propagated', 'link-incompatible'])
feature.set_default('target-os', default_host_os())
feature.feature ('toolset', [], ['implicit', 'propagated' ,'symmetric'])
feature.feature ('stdlib', ['native'], ['propagated', 'composite'])
feature.feature ('link', ['shared', 'static'], ['propagated'])
feature.feature ('runtime-link', ['shared', 'static'], ['propagated'])
feature.feature ('runtime-debugging', ['on', 'off'], ['propagated'])
feature.feature ('optimization', ['off', 'speed', 'space'], ['propagated'])
feature.feature ('profiling', ['off', 'on'], ['propagated'])
feature.feature ('inlining', ['off', 'on', 'full'], ['propagated'])
feature.feature ('threading', ['single', 'multi'], ['propagated'])
feature.feature ('rtti', ['on', 'off'], ['propagated'])
feature.feature ('exception-handling', ['on', 'off'], ['propagated'])
# Whether there is support for asynchronous EH (e.g. catching SEGVs).
feature.feature ('asynch-exceptions', ['on', 'off'], ['propagated'])
# Whether all extern "C" functions are considered nothrow by default.
feature.feature ('extern-c-nothrow', ['off', 'on'], ['propagated'])
feature.feature ('debug-symbols', ['on', 'off'], ['propagated'])
feature.feature ('define', [], ['free'])
feature.feature ('undef', [], ['free'])
feature.feature ('include', [], ['free', 'path']) #order-sensitive
feature.feature ('cflags', [], ['free'])
feature.feature ('cxxflags', [], ['free'])
feature.feature ('asmflags', [], ['free'])
feature.feature ('linkflags', [], ['free'])
feature.feature ('archiveflags', [], ['free'])
feature.feature ('version', [], ['free'])
feature.feature ('location-prefix', [], ['free'])
feature.feature ('action', [], ['free'])
# The following features are incidental, since
# in themself they have no effect on build products.
# Not making them incidental will result in problems in corner
# cases, for example:
#
# unit-test a : a.cpp : <use>b ;
# lib b : a.cpp b ;
#
# Here, if <use> is not incidental, we'll decide we have two
# targets for a.obj with different properties, and will complain.
#
# Note that making feature incidental does not mean it's ignored. It may
# be ignored when creating the virtual target, but the rest of build process
# will use them.
feature.feature ('use', [], ['free', 'dependency', 'incidental'])
feature.feature ('dependency', [], ['free', 'dependency', 'incidental'])
feature.feature ('implicit-dependency', [], ['free', 'dependency', 'incidental'])
feature.feature('warnings', [
'on', # Enable default/"reasonable" warning level for the tool.
'all', # Enable all possible warnings issued by the tool.
'off'], # Disable all warnings issued by the tool.
['incidental', 'propagated'])
feature.feature('warnings-as-errors', [
'off', # Do not fail the compilation if there are warnings.
'on'], # Fail the compilation if there are warnings.
['incidental', 'propagated'])
feature.feature('c++-template-depth',
[str(i) for i in range(64,1024+1,64)] +
[str(i) for i in range(20,1000+1,10)] +
# Maximum template instantiation depth guaranteed for ANSI/ISO C++
# conforming programs.
['17'],
['incidental', 'optional', 'propagated'])
feature.feature ('source', [], ['free', 'dependency', 'incidental'])
feature.feature ('library', [], ['free', 'dependency', 'incidental'])
feature.feature ('file', [], ['free', 'dependency', 'incidental'])
feature.feature ('find-shared-library', [], ['free']) #order-sensitive ;
feature.feature ('find-static-library', [], ['free']) #order-sensitive ;
feature.feature ('library-path', [], ['free', 'path']) #order-sensitive ;
# Internal feature.
feature.feature ('library-file', [], ['free', 'dependency'])
feature.feature ('name', [], ['free'])
feature.feature ('tag', [], ['free'])
feature.feature ('search', [], ['free', 'path']) #order-sensitive ;
feature.feature ('location', [], ['free', 'path'])
feature.feature ('dll-path', [], ['free', 'path'])
feature.feature ('hardcode-dll-paths', ['true', 'false'], ['incidental'])
# This is internal feature which holds the paths of all dependency
# dynamic libraries. On Windows, it's needed so that we can all
# those paths to PATH, when running applications.
# On Linux, it's needed to add proper -rpath-link command line options.
feature.feature ('xdll-path', [], ['free', 'path'])
#provides means to specify def-file for windows dlls.
feature.feature ('def-file', [], ['free', 'dependency'])
# This feature is used to allow specific generators to run.
# For example, QT tools can only be invoked when QT library
# is used. In that case, <allow>qt will be in usage requirement
# of the library.
feature.feature ('allow', [], ['free'])
# The addressing model to generate code for. Currently a limited set only
# specifying the bit size of pointers.
feature.feature('address-model', ['16', '32', '64'], ['propagated', 'optional'])
# Type of CPU architecture to compile for.
feature.feature('architecture', [
# x86 and x86-64
'x86',
# ia64
'ia64',
# Sparc
'sparc',
# RS/6000 & PowerPC
'power',
# MIPS/SGI
'mips1', 'mips2', 'mips3', 'mips4', 'mips32', 'mips32r2', 'mips64',
# HP/PA-RISC
'parisc',
# Advanced RISC Machines
'arm',
# Combined architectures for platforms/toolsets that support building for
# multiple architectures at once. "combined" would be the default multi-arch
# for the toolset.
'combined',
'combined-x86-power'],
['propagated', 'optional'])
# The specific instruction set in an architecture to compile.
feature.feature('instruction-set', [
# x86 and x86-64
'native', 'i486', 'i586', 'i686', 'pentium', 'pentium-mmx', 'pentiumpro', 'pentium2', 'pentium3',
'pentium3m', 'pentium-m', 'pentium4', 'pentium4m', 'prescott', 'nocona', 'core2', 'corei7', 'corei7-avx', 'core-avx-i',
'conroe', 'conroe-xe', 'conroe-l', 'allendale', 'merom', 'merom-xe', 'kentsfield', 'kentsfield-xe', 'penryn', 'wolfdale',
'yorksfield', 'nehalem', 'sandy-bridge', 'ivy-bridge', 'haswell', 'k6', 'k6-2', 'k6-3', 'athlon', 'athlon-tbird', 'athlon-4', 'athlon-xp',
'athlon-mp', 'k8', 'opteron', 'athlon64', 'athlon-fx', 'k8-sse3', 'opteron-sse3', 'athlon64-sse3', 'amdfam10', 'barcelona',
'bdver1', 'bdver2', 'bdver3', 'btver1', 'btver2', 'winchip-c6', 'winchip2', 'c3', 'c3-2', 'atom',
# ia64
'itanium', 'itanium1', 'merced', 'itanium2', 'mckinley',
# Sparc
'v7', 'cypress', 'v8', 'supersparc', 'sparclite', 'hypersparc', 'sparclite86x', 'f930', 'f934',
'sparclet', 'tsc701', 'v9', 'ultrasparc', 'ultrasparc3',
# RS/6000 & PowerPC
'401', '403', '405', '405fp', '440', '440fp', '505', '601', '602',
'603', '603e', '604', '604e', '620', '630', '740', '7400',
'7450', '750', '801', '821', '823', '860', '970', '8540',
'power-common', 'ec603e', 'g3', 'g4', 'g5', 'power', 'power2',
'power3', 'power4', 'power5', 'powerpc', 'powerpc64', 'rios',
'rios1', 'rsc', 'rios2', 'rs64a',
# MIPS
'4kc', '4kp', '5kc', '20kc', 'm4k', 'r2000', 'r3000', 'r3900', 'r4000',
'r4100', 'r4300', 'r4400', 'r4600', 'r4650',
'r6000', 'r8000', 'rm7000', 'rm9000', 'orion', 'sb1', 'vr4100',
'vr4111', 'vr4120', 'vr4130', 'vr4300',
'vr5000', 'vr5400', 'vr5500',
# HP/PA-RISC
'700', '7100', '7100lc', '7200', '7300', '8000',
# Advanced RISC Machines
'armv2', 'armv2a', 'armv3', 'armv3m', 'armv4', 'armv4t', 'armv5',
'armv5t', 'armv5te', 'armv6', 'armv6j', 'iwmmxt', 'ep9312'],
['propagated', 'optional'])
feature.feature('conditional', [], ['incidental', 'free'])
# The value of 'no' prevents building of a target.
feature.feature('build', ['yes', 'no'], ['optional'])
# Windows-specific features
feature.feature ('user-interface', ['console', 'gui', 'wince', 'native', 'auto'], [])
feature.feature ('variant', [], ['implicit', 'composite', 'propagated', 'symmetric'])
variant ('debug', ['<optimization>off', '<debug-symbols>on', '<inlining>off', '<runtime-debugging>on'])
variant ('release', ['<optimization>speed', '<debug-symbols>off', '<inlining>full',
'<runtime-debugging>off', '<define>NDEBUG'])
variant ('profile', ['release'], ['<profiling>on', '<debug-symbols>on'])
reset ()
register_globals ()
class SearchedLibTarget (virtual_target.AbstractFileTarget):
def __init__ (self, name, project, shared, search, action):
virtual_target.AbstractFileTarget.__init__ (self, name, 'SEARCHED_LIB', project, action)
self.shared_ = shared
self.search_ = search
def shared (self):
return self.shared_
def search (self):
return self.search_
def actualize_location (self, target):
bjam.call("NOTFILE", target)
def path (self):
#FIXME: several functions rely on this not being None
return ""
class CScanner (scanner.Scanner):
def __init__ (self, includes):
scanner.Scanner.__init__ (self)
self.includes_ = []
for i in includes:
self.includes_.extend(i.split("&&"))
def pattern (self):
return r'#[ \t]*include[ ]*(<(.*)>|"(.*)")'
def process (self, target, matches, binding):
angle = regex.transform (matches, "<(.*)>")
quoted = regex.transform (matches, '"(.*)"')
g = str(id(self))
b = os.path.normpath(os.path.dirname(binding[0]))
# Attach binding of including file to included targets.
# When target is directly created from virtual target
# this extra information is unnecessary. But in other
# cases, it allows to distinguish between two headers of the
# same name included from different places.
# We don't need this extra information for angle includes,
# since they should not depend on including file (we can't
# get literal "." in include path).
g2 = g + "#" + b
g = "<" + g + ">"
g2 = "<" + g2 + ">"
angle = [g + x for x in angle]
quoted = [g2 + x for x in quoted]
all = angle + quoted
bjam.call("mark-included", target, all)
engine = get_manager().engine()
engine.set_target_variable(angle, "SEARCH", get_value(self.includes_))
engine.set_target_variable(quoted, "SEARCH", [b] + get_value(self.includes_))
# Just propagate current scanner to includes, in a hope
# that includes do not change scanners.
get_manager().scanners().propagate(self, angle + quoted)
scanner.register (CScanner, 'include')
type.set_scanner ('CPP', CScanner)
type.set_scanner ('C', CScanner)
# Ported to trunk@47077
class LibGenerator (generators.Generator):
""" The generator class for libraries (target type LIB). Depending on properties it will
request building of the approapriate specific type -- SHARED_LIB, STATIC_LIB or
SHARED_LIB.
"""
def __init__(self, id, composing = True, source_types = [], target_types_and_names = ['LIB'], requirements = []):
generators.Generator.__init__(self, id, composing, source_types, target_types_and_names, requirements)
def run(self, project, name, prop_set, sources):
assert isinstance(project, targets.ProjectTarget)
assert isinstance(name, basestring) or name is None
assert isinstance(prop_set, property_set.PropertySet)
assert is_iterable_typed(sources, virtual_target.VirtualTarget)
# The lib generator is composing, and can be only invoked with
# explicit name. This check is present in generator.run (and so in
# builtin.LinkingGenerator), but duplicate it here to avoid doing
# extra work.
if name:
properties = prop_set.raw()
# Determine the needed target type
actual_type = None
properties_grist = get_grist(properties)
if '<source>' not in properties_grist and \
('<search>' in properties_grist or '<name>' in properties_grist):
actual_type = 'SEARCHED_LIB'
elif '<file>' in properties_grist:
# The generator for
actual_type = 'LIB'
elif '<link>shared' in properties:
actual_type = 'SHARED_LIB'
else:
actual_type = 'STATIC_LIB'
prop_set = prop_set.add_raw(['<main-target-type>LIB'])
# Construct the target.
return generators.construct(project, name, actual_type, prop_set, sources)
def viable_source_types(self):
return ['*']
generators.register(LibGenerator("builtin.lib-generator"))
generators.override("builtin.prebuilt", "builtin.lib-generator")
def lib(names, sources=[], requirements=[], default_build=[], usage_requirements=[]):
"""The implementation of the 'lib' rule. Beyond standard syntax that rule allows
simplified: 'lib a b c ;'."""
assert is_iterable_typed(names, basestring)
assert is_iterable_typed(sources, basestring)
assert is_iterable_typed(requirements, basestring)
assert is_iterable_typed(default_build, basestring)
assert is_iterable_typed(usage_requirements, basestring)
if len(names) > 1:
if any(r.startswith('<name>') for r in requirements):
get_manager().errors()("When several names are given to the 'lib' rule\n" +
"it is not allowed to specify the <name> feature.")
if sources:
get_manager().errors()("When several names are given to the 'lib' rule\n" +
"it is not allowed to specify sources.")
project = get_manager().projects().current()
result = []
for name in names:
r = requirements[:]
# Support " lib a ; " and " lib a b c ; " syntax.
if not sources and not any(r.startswith("<name>") for r in requirements) \
and not any(r.startswith("<file") for r in requirements):
r.append("<name>" + name)
result.append(targets.create_typed_metatarget(name, "LIB", sources,
r,
default_build,
usage_requirements))
return result
get_manager().projects().add_rule("lib", lib)
# Updated to trunk@47077
class SearchedLibGenerator (generators.Generator):
def __init__ (self, id = 'SearchedLibGenerator', composing = False, source_types = [], target_types_and_names = ['SEARCHED_LIB'], requirements = []):
# TODO: the comment below looks strange. There are no requirements!
# The requirements cause the generators to be tried *only* when we're building
# lib target and there's 'search' feature. This seems ugly --- all we want
# is make sure SearchedLibGenerator is not invoked deep in transformation
# search.
generators.Generator.__init__ (self, id, composing, source_types, target_types_and_names, requirements)
def run(self, project, name, prop_set, sources):
assert isinstance(project, targets.ProjectTarget)
assert isinstance(name, basestring) or name is None
assert isinstance(prop_set, property_set.PropertySet)
assert is_iterable_typed(sources, virtual_target.VirtualTarget)
if not name:
return None
# If name is empty, it means we're called not from top-level.
# In this case, we just fail immediately, because SearchedLibGenerator
# cannot be used to produce intermediate targets.
properties = prop_set.raw ()
shared = '<link>shared' in properties
a = virtual_target.NullAction (project.manager(), prop_set)
real_name = feature.get_values ('<name>', properties)
if real_name:
real_name = real_name[0]
else:
real_name = name
search = feature.get_values('<search>', properties)
usage_requirements = property_set.create(['<xdll-path>' + p for p in search])
t = SearchedLibTarget(real_name, project, shared, search, a)
# We return sources for a simple reason. If there's
# lib png : z : <name>png ;
# the 'z' target should be returned, so that apps linking to
# 'png' will link to 'z', too.
return(usage_requirements, [b2.manager.get_manager().virtual_targets().register(t)] + sources)
generators.register (SearchedLibGenerator ())
class PrebuiltLibGenerator(generators.Generator):
def __init__(self, id, composing, source_types, target_types_and_names, requirements):
generators.Generator.__init__ (self, id, composing, source_types, target_types_and_names, requirements)
def run(self, project, name, properties, sources):
assert isinstance(project, targets.ProjectTarget)
assert isinstance(name, basestring)
assert isinstance(properties, property_set.PropertySet)
assert is_iterable_typed(sources, virtual_target.VirtualTarget)
f = properties.get("file")
return f + sources
generators.register(PrebuiltLibGenerator("builtin.prebuilt", False, [],
["LIB"], ["<file>"]))
generators.override("builtin.prebuilt", "builtin.lib-generator")
class CompileAction (virtual_target.Action):
def __init__ (self, manager, sources, action_name, prop_set):
virtual_target.Action.__init__ (self, manager, sources, action_name, prop_set)
def adjust_properties (self, prop_set):
""" For all virtual targets for the same dependency graph as self,
i.e. which belong to the same main target, add their directories
to include path.
"""
assert isinstance(prop_set, property_set.PropertySet)
s = self.targets () [0].creating_subvariant ()
return prop_set.add_raw (s.implicit_includes ('include', 'H'))
class CCompilingGenerator (generators.Generator):
""" Declare a special compiler generator.
The only thing it does is changing the type used to represent
'action' in the constructed dependency graph to 'CompileAction'.
That class in turn adds additional include paths to handle a case
when a source file includes headers which are generated themselfs.
"""
def __init__ (self, id, composing, source_types, target_types_and_names, requirements):
# TODO: (PF) What to do with optional_properties? It seemed that, in the bjam version, the arguments are wrong.
generators.Generator.__init__ (self, id, composing, source_types, target_types_and_names, requirements)
def action_class (self):
return CompileAction
def register_c_compiler (id, source_types, target_types, requirements, optional_properties = []):
g = CCompilingGenerator (id, False, source_types, target_types, requirements + optional_properties)
return generators.register (g)
class LinkingGenerator (generators.Generator):
""" The generator class for handling EXE and SHARED_LIB creation.
"""
def __init__ (self, id, composing, source_types, target_types_and_names, requirements):
generators.Generator.__init__ (self, id, composing, source_types, target_types_and_names, requirements)
def run (self, project, name, prop_set, sources):
assert isinstance(project, targets.ProjectTarget)
assert isinstance(name, basestring) or name is None
assert isinstance(prop_set, property_set.PropertySet)
assert is_iterable_typed(sources, virtual_target.VirtualTarget)
sources.extend(prop_set.get('<library>'))
# Add <library-path> properties for all searched libraries
extra = []
for s in sources:
if s.type () == 'SEARCHED_LIB':
search = s.search()
extra.extend(property.Property('<library-path>', sp) for sp in search)
# It's possible that we have libraries in sources which did not came
# from 'lib' target. For example, libraries which are specified
# just as filenames as sources. We don't have xdll-path properties
# for such target, but still need to add proper dll-path properties.
extra_xdll_path = []
for s in sources:
if type.is_derived (s.type (), 'SHARED_LIB') and not s.action ():
# Unfortunately, we don't have a good way to find the path
# to a file, so use this nasty approach.
p = s.project()
location = path.root(s.name(), p.get('source-location')[0])
extra_xdll_path.append(os.path.dirname(location))
# Hardcode DLL paths only when linking executables.
# Pros: do not need to relink libraries when installing.
# Cons: "standalone" libraries (plugins, python extensions) can not
# hardcode paths to dependent libraries.
if prop_set.get('<hardcode-dll-paths>') == ['true'] \
and type.is_derived(self.target_types_ [0], 'EXE'):
xdll_path = prop_set.get('<xdll-path>')
extra.extend(property.Property('<dll-path>', sp) \
for sp in extra_xdll_path)
extra.extend(property.Property('<dll-path>', sp) \
for sp in xdll_path)
if extra:
prop_set = prop_set.add_raw (extra)
result = generators.Generator.run(self, project, name, prop_set, sources)
if result:
ur = self.extra_usage_requirements(result, prop_set)
ur = ur.add(property_set.create(['<xdll-path>' + p for p in extra_xdll_path]))
else:
return None
return (ur, result)
def extra_usage_requirements (self, created_targets, prop_set):
assert is_iterable_typed(created_targets, virtual_target.VirtualTarget)
assert isinstance(prop_set, property_set.PropertySet)
result = property_set.empty ()
extra = []
# Add appropriate <xdll-path> usage requirements.
raw = prop_set.raw ()
if '<link>shared' in raw:
paths = []
# TODO: is it safe to use the current directory? I think we should use
# another mechanism to allow this to be run from anywhere.
pwd = os.getcwd()
for t in created_targets:
if type.is_derived(t.type(), 'SHARED_LIB'):
paths.append(path.root(path.make(t.path()), pwd))
extra += replace_grist(paths, '<xdll-path>')
# We need to pass <xdll-path> features that we've got from sources,
# because if shared library is built, exe which uses it must know paths
# to other shared libraries this one depends on, to be able to find them
# all at runtime.
# Just pass all features in property_set, it's theorically possible
# that we'll propagate <xdll-path> features explicitly specified by
# the user, but then the user's to blaim for using internal feature.
values = prop_set.get('<xdll-path>')
extra += replace_grist(values, '<xdll-path>')
if extra:
result = property_set.create(extra)
return result
def generated_targets (self, sources, prop_set, project, name):
assert is_iterable_typed(sources, virtual_target.VirtualTarget)
assert isinstance(prop_set, property_set.PropertySet)
assert isinstance(project, targets.ProjectTarget)
assert isinstance(name, basestring)
# sources to pass to inherited rule
sources2 = []
# sources which are libraries
libraries = []
# Searched libraries are not passed as argument to linker
# but via some option. So, we pass them to the action
# via property.
fsa = []
fst = []
for s in sources:
if type.is_derived(s.type(), 'SEARCHED_LIB'):
n = s.name()
if s.shared():
fsa.append(n)
else:
fst.append(n)
else:
sources2.append(s)
add = []
if fsa:
add.append("<find-shared-library>" + '&&'.join(fsa))
if fst:
add.append("<find-static-library>" + '&&'.join(fst))
spawn = generators.Generator.generated_targets(self, sources2, prop_set.add_raw(add), project, name)
return spawn
def register_linker(id, source_types, target_types, requirements):
g = LinkingGenerator(id, True, source_types, target_types, requirements)
generators.register(g)
class ArchiveGenerator (generators.Generator):
""" The generator class for handling STATIC_LIB creation.
"""
def __init__ (self, id, composing, source_types, target_types_and_names, requirements):
generators.Generator.__init__ (self, id, composing, source_types, target_types_and_names, requirements)
def run (self, project, name, prop_set, sources):
sources += prop_set.get ('<library>')
result = generators.Generator.run (self, project, name, prop_set, sources)
return result
def register_archiver(id, source_types, target_types, requirements):
g = ArchiveGenerator(id, True, source_types, target_types, requirements)
generators.register(g)
class DummyGenerator(generators.Generator):
"""Generator that accepts everything and produces nothing. Useful as a general
fallback for toolset-specific actions like PCH generation.
"""
def run (self, project, name, prop_set, sources):
return (property_set.empty(), [])
get_manager().projects().add_rule("variant", variant)
import stage
import symlink
import message
| mit | 3,333,964,424,337,424,400 | 40.741722 | 153 | 0.603843 | false |
zak-k/iris | lib/iris/tests/test_coding_standards.py | 1 | 17046 | # (C) British Crown Copyright 2013 - 2017, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
from datetime import datetime
from fnmatch import fnmatch
from glob import glob
from itertools import chain
import os
import re
import subprocess
import pep8
import iris
LICENSE_TEMPLATE = """
# (C) British Crown Copyright {YEARS}, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.""".strip()
LICENSE_RE_PATTERN = re.escape(LICENSE_TEMPLATE).replace('\{YEARS\}', '(.*?)')
# Add shebang possibility to the LICENSE_RE_PATTERN
LICENSE_RE_PATTERN = r'(\#\!.*\n)?' + LICENSE_RE_PATTERN
LICENSE_RE = re.compile(LICENSE_RE_PATTERN, re.MULTILINE)
# Guess iris repo directory of Iris - realpath is used to mitigate against
# Python finding the iris package via a symlink.
IRIS_DIR = os.path.realpath(os.path.dirname(iris.__file__))
REPO_DIR = os.path.dirname(os.path.dirname(IRIS_DIR))
DOCS_DIR = os.path.join(REPO_DIR, 'docs', 'iris')
DOCS_DIR = iris.config.get_option('Resources', 'doc_dir', default=DOCS_DIR)
exclusion = ['Makefile', 'build']
DOCS_DIRS = glob(os.path.join(DOCS_DIR, '*'))
DOCS_DIRS = [DOC_DIR for DOC_DIR in DOCS_DIRS if os.path.basename(DOC_DIR) not
in exclusion]
# pycodestyle / pep8 error codes that should be ignored:
PYCODESTYLE_IGNORE_OPTIONS = (
# "Module level import not at top of file" - due to conditional imports
'E402',
)
class StandardReportWithExclusions(pep8.StandardReport):
expected_bad_files = [
'*/iris/std_names.py',
'*/iris/analysis/_interpolate_private.py',
'*/iris/fileformats/cf.py',
'*/iris/fileformats/dot.py',
'*/iris/fileformats/grib/__init__.py',
'*/iris/fileformats/grib/_grib_cf_map.py',
'*/iris/fileformats/grib/load_rules.py',
'*/iris/fileformats/pp_rules.py',
'*/iris/fileformats/rules.py',
'*/iris/fileformats/um_cf_map.py',
'*/iris/fileformats/_pyke_rules/compiled_krb/compiled_pyke_files.py',
'*/iris/fileformats/_pyke_rules/compiled_krb/fc_rules_cf_fc.py',
'*/iris/io/__init__.py',
'*/iris/io/format_picker.py',
'*/iris/tests/__init__.py',
'*/iris/tests/pp.py',
'*/iris/tests/stock.py',
'*/iris/tests/system_test.py',
'*/iris/tests/test_analysis.py',
'*/iris/tests/test_analysis_calculus.py',
'*/iris/tests/test_basic_maths.py',
'*/iris/tests/test_cartography.py',
'*/iris/tests/test_cdm.py',
'*/iris/tests/test_cell.py',
'*/iris/tests/test_cf.py',
'*/iris/tests/test_constraints.py',
'*/iris/tests/test_coord_api.py',
'*/iris/tests/test_coord_categorisation.py',
'*/iris/tests/test_coordsystem.py',
'*/iris/tests/test_cube_to_pp.py',
'*/iris/tests/test_file_load.py',
'*/iris/tests/test_file_save.py',
'*/iris/tests/test_grib_save.py',
'*/iris/tests/test_grib_save_rules.py',
'*/iris/tests/test_hybrid.py',
'*/iris/tests/test_interpolation.py',
'*/iris/tests/test_intersect.py',
'*/iris/tests/test_io_init.py',
'*/iris/tests/test_iterate.py',
'*/iris/tests/test_load.py',
'*/iris/tests/test_merge.py',
'*/iris/tests/test_pickling.py',
'*/iris/tests/test_pp_cf.py',
'*/iris/tests/test_pp_module.py',
'*/iris/tests/test_pp_stash.py',
'*/iris/tests/test_pp_to_cube.py',
'*/iris/tests/test_quickplot.py',
'*/iris/tests/test_regrid.py',
'*/iris/tests/test_std_names.py',
'*/iris/tests/test_unit.py',
'*/iris/tests/test_uri_callback.py',
'*/iris/tests/test_util.py']
# Auto-generated by install process, though not always.
optional_bad_files = ['*/iris/fileformats/_old_pp_packing.py']
expected_bad_files += optional_bad_files
if DOCS_DIRS:
expected_bad_docs_files = [
'*/src/conf.py',
'*/src/developers_guide/gitwash_dumper.py']
expected_bad_files += expected_bad_docs_files
matched_exclusions = set()
def get_file_results(self):
# If the file had no errors, return self.file_errors (which will be 0)
if not self._deferred_print:
return self.file_errors
# Iterate over all of the patterns, to find a possible exclusion. If we
# the filename is to be excluded, go ahead and remove the counts that
# self.error added.
for pattern in self.expected_bad_files:
if fnmatch(self.filename, pattern):
self.matched_exclusions.add(pattern)
# invert the error method's counters.
for _, _, code, _, _ in self._deferred_print:
self.counters[code] -= 1
if self.counters[code] == 0:
self.counters.pop(code)
self.messages.pop(code)
self.file_errors -= 1
self.total_errors -= 1
return self.file_errors
# Otherwise call the superclass' method to print the bad results.
return super(StandardReportWithExclusions,
self).get_file_results()
class TestCodeFormat(tests.IrisTest):
def test_pep8_conformance(self):
#
# Tests the iris codebase against the "pep8" tool.
#
# Users can add their own excluded files (should files exist in the
# local directory which is not in the repository) by adding a
# ".pep8_test_exclude.txt" file in the same directory as this test.
# The file should be a line separated list of filenames/directories
# as can be passed to the "pep8" tool's exclude list.
# To get a list of bad files, rather than the specific errors, add
# "reporter=pep8.FileReport" to the StyleGuide constructor.
pep8style = pep8.StyleGuide(quiet=False,
reporter=StandardReportWithExclusions)
pep8style.options.ignore += PYCODESTYLE_IGNORE_OPTIONS
# Allow users to add their own exclude list.
extra_exclude_file = os.path.join(os.path.dirname(__file__),
'.pep8_test_exclude.txt')
if os.path.exists(extra_exclude_file):
with open(extra_exclude_file, 'r') as fh:
extra_exclude = [line.strip() for line in fh if line.strip()]
pep8style.options.exclude.extend(extra_exclude)
check_paths = [os.path.dirname(iris.__file__)]
if DOCS_DIRS:
check_paths.extend(DOCS_DIRS)
result = pep8style.check_files(check_paths)
self.assertEqual(result.total_errors, 0, "Found code syntax "
"errors (and warnings).")
reporter = pep8style.options.reporter
# If we've been using the exclusions reporter, check that we didn't
# exclude files unnecessarily.
if reporter is StandardReportWithExclusions:
unexpectedly_good = sorted(set(reporter.expected_bad_files) -
set(reporter.optional_bad_files) -
reporter.matched_exclusions)
if unexpectedly_good:
self.fail('Some exclude patterns were unnecessary as the '
'files they pointed to either passed the PEP8 tests '
'or do not point to a file:\n '
'{}'.format('\n '.join(unexpectedly_good)))
class TestLicenseHeaders(tests.IrisTest):
@staticmethod
def years_of_license_in_file(fh):
"""
Using :data:`LICENSE_RE` look for the years defined in the license
header of the given file handle.
If the license cannot be found in the given fh, None will be returned,
else a tuple of (start_year, end_year) will be returned.
"""
license_matches = LICENSE_RE.match(fh.read())
if not license_matches:
# no license found in file.
return None
years = license_matches.groups()[-1]
if len(years) == 4:
start_year = end_year = int(years)
elif len(years) == 11:
start_year, end_year = int(years[:4]), int(years[7:])
else:
fname = getattr(fh, 'name', 'unknown filename')
raise ValueError("Unexpected year(s) string in {}'s copyright "
"notice: {!r}".format(fname, years))
return (start_year, end_year)
@staticmethod
def whatchanged_parse(whatchanged_output):
"""
Returns a generator of tuples of data parsed from
"git whatchanged --pretty='TIME:%at". The tuples are of the form
``(filename, last_commit_datetime)``
Sample input::
['TIME:1366884020', '',
':000000 100644 0000000... 5862ced... A\tlib/iris/cube.py']
"""
dt = None
for line in whatchanged_output:
if not line.strip():
continue
elif line.startswith('TIME:'):
dt = datetime.fromtimestamp(int(line[5:]))
else:
# Non blank, non date, line -> must be the lines
# containing the file info.
fname = ' '.join(line.split('\t')[1:])
yield fname, dt
@staticmethod
def last_change_by_fname():
"""
Return a dictionary of all the files under git which maps to
the datetime of their last modification in the git history.
.. note::
This function raises a ValueError if the repo root does
not have a ".git" folder. If git is not installed on the system,
or cannot be found by subprocess, an IOError may also be raised.
"""
# Check the ".git" folder exists at the repo dir.
if not os.path.isdir(os.path.join(REPO_DIR, '.git')):
raise ValueError('{} is not a git repository.'.format(REPO_DIR))
# Call "git whatchanged" to get the details of all the files and when
# they were last changed.
output = subprocess.check_output(['git', 'whatchanged',
"--pretty=TIME:%ct"],
cwd=REPO_DIR)
output = output.decode().split('\n')
res = {}
for fname, dt in TestLicenseHeaders.whatchanged_parse(output):
if fname not in res or dt > res[fname]:
res[fname] = dt
return res
def test_license_headers(self):
exclude_patterns = ('setup.py',
'build/*',
'dist/*',
'docs/iris/example_code/*/*.py',
'docs/iris/src/developers_guide/documenting/*.py',
'docs/iris/src/sphinxext/gen_gallery.py',
'docs/iris/src/userguide/plotting_examples/*.py',
'docs/iris/src/userguide/regridding_plots/*.py',
'docs/iris/src/developers_guide/gitwash_dumper.py',
'docs/iris/build/*',
'lib/iris/analysis/_scipy_interpolate.py',
'lib/iris/fileformats/_pyke_rules/*',
'lib/iris/fileformats/grib/_grib_cf_map.py')
try:
last_change_by_fname = self.last_change_by_fname()
except ValueError:
# Caught the case where this is not a git repo.
return self.skipTest('Iris installation did not look like a '
'git repo.')
failed = False
for fname, last_change in sorted(last_change_by_fname.items()):
full_fname = os.path.join(REPO_DIR, fname)
if full_fname.endswith('.py') and os.path.isfile(full_fname) and \
not any(fnmatch(fname, pat) for pat in exclude_patterns):
with open(full_fname) as fh:
years = TestLicenseHeaders.years_of_license_in_file(fh)
if years is None:
print('The file {} has no valid header license and '
'has not been excluded from the license header '
'test.'.format(fname))
failed = True
elif last_change.year > years[1]:
print('The file header at {} is out of date. The last'
' commit was in {}, but the copyright states it'
' was {}.'.format(fname, last_change.year,
years[1]))
failed = True
if failed:
raise ValueError('There were license header failures. See stdout.')
class TestFutureImports(tests.IrisTest):
excluded = (
'*/iris/fileformats/_old_pp_packing.py',
'*/iris/fileformats/_pyke_rules/__init__.py',
'*/iris/fileformats/_pyke_rules/compiled_krb/__init__.py',
'*/iris/fileformats/_pyke_rules/compiled_krb/compiled_pyke_files.py',
'*/iris/fileformats/_pyke_rules/compiled_krb/fc_rules_cf_fc.py',
'*/docs/iris/example_code/*/*.py',
'*/docs/iris/src/examples/*/*.py',
'*/docs/iris/src/developers_guide/documenting/*.py',
)
future_imports_pattern = re.compile(
r"^from __future__ import \(absolute_import,\s*division,\s*"
r"print_function(,\s*unicode_literals)?\)$",
flags=re.MULTILINE)
six_import_pattern = re.compile(
r"^from six.moves import \(filter, input, map, range, zip\) # noqa$",
flags=re.MULTILINE)
def test_future_imports(self):
# Tests that every single Python file includes the appropriate
# __future__ import to enforce consistent behaviour.
check_paths = [os.path.dirname(iris.__file__)]
if DOCS_DIRS:
check_paths.extend(DOCS_DIRS)
failed = False
for dirpath, _, files in chain.from_iterable(os.walk(path)
for path in check_paths):
for fname in files:
full_fname = os.path.join(dirpath, fname)
if not full_fname.endswith('.py'):
continue
if not os.path.isfile(full_fname):
continue
if any(fnmatch(full_fname, pat) for pat in self.excluded):
continue
with open(full_fname, "r") as fh:
content = fh.read()
if re.search(self.future_imports_pattern, content) is None:
print('The file {} has no valid __future__ imports '
'and has not been excluded from the imports '
'test.'.format(full_fname))
failed = True
if re.search(self.six_import_pattern, content) is None:
print('The file {} has no valid six import '
'and has not been excluded from the imports '
'test.'.format(full_fname))
failed = True
if failed:
raise AssertionError('There were Python 3 compatibility import '
'check failures. See stdout.')
if __name__ == '__main__':
tests.main()
| gpl-3.0 | 2,708,981,606,005,563,000 | 40.373786 | 79 | 0.571688 | false |
maikodaraine/EnlightenmentUbuntu | bindings/python/python-efl/examples/elementary/test_hoversel.py | 1 | 3123 | #!/usr/bin/env python
# encoding: utf-8
import os
from efl.evas import EVAS_HINT_EXPAND
from efl import elementary
from efl.elementary.window import StandardWindow
from efl.elementary.box import Box
from efl.elementary.hoversel import Hoversel, ELM_ICON_STANDARD, ELM_ICON_FILE
from efl.elementary.icon import Icon
EXPAND_BOTH = EVAS_HINT_EXPAND, EVAS_HINT_EXPAND
WEIGHT_ZERO = 0.0, 0.0
ALIGN_CENTER = 0.5, 0.5
script_path = os.path.dirname(os.path.abspath(__file__))
img_path = os.path.join(script_path, "images")
def hoversel_clicked(obj):
win = StandardWindow("hoversel", "Hoversel", autodel=True, size=(320, 320))
if obj is None:
win.callback_delete_request_add(lambda o: elementary.exit())
bx = Box(win, size_hint_weight=EXPAND_BOTH)
win.resize_object_add(bx)
bx.show()
bt = Hoversel(win, hover_parent=win, text="Labels",
size_hint_weight=WEIGHT_ZERO, size_hint_align=ALIGN_CENTER)
bt.item_add("Item 1")
bt.item_add("Item 2")
bt.item_add("Item 3")
bt.item_add("Item 4 - Long Label Here")
bx.pack_end(bt)
bt.show()
bt = Hoversel(win, hover_parent=win, text="Some Icons",
size_hint_weight=WEIGHT_ZERO, size_hint_align=ALIGN_CENTER)
bt.item_add("Item 1")
bt.item_add("Item 2")
bt.item_add("Item 3", "home", ELM_ICON_STANDARD)
bt.item_add("Item 4", "close", ELM_ICON_STANDARD)
bx.pack_end(bt)
bt.show()
bt = Hoversel(win, hover_parent=win, text="All Icons",
size_hint_weight=WEIGHT_ZERO, size_hint_align=ALIGN_CENTER)
bt.item_add("Item 1", "apps", ELM_ICON_STANDARD)
bt.item_add("Item 2", "arrow_down", ELM_ICON_STANDARD)
bt.item_add("Item 3", "home", ELM_ICON_STANDARD)
bt.item_add("Item 4", "close", ELM_ICON_STANDARD)
bx.pack_end(bt)
bt.show()
bt = Hoversel(win, hover_parent=win, text="All Icons",
size_hint_weight=WEIGHT_ZERO, size_hint_align=ALIGN_CENTER)
bt.item_add("Item 1", "apps", ELM_ICON_STANDARD)
bt.item_add("Item 2", os.path.join(img_path, "logo_small.png"),
ELM_ICON_FILE)
bt.item_add("Item 3", "home", ELM_ICON_STANDARD)
bt.item_add("Item 4", "close", ELM_ICON_STANDARD)
bx.pack_end(bt)
bt.show()
bt = Hoversel(win, hover_parent=win, text="Disabled Hoversel",
disabled=True, size_hint_weight=WEIGHT_ZERO,
size_hint_align=ALIGN_CENTER)
bt.item_add("Item 1", "apps", ELM_ICON_STANDARD)
bt.item_add("Item 2", "close", ELM_ICON_STANDARD)
bx.pack_end(bt)
bt.show()
ic = Icon(win, file=os.path.join(img_path, "sky_03.jpg"))
bt = Hoversel(win, hover_parent=win, text="Icon + Label", content=ic,
size_hint_weight=WEIGHT_ZERO, size_hint_align=ALIGN_CENTER)
ic.show()
bt.item_add("Item 1", "apps", ELM_ICON_STANDARD)
bt.item_add("Item 2", "arrow_down", ELM_ICON_STANDARD)
bt.item_add("Item 3", "home", ELM_ICON_STANDARD)
bt.item_add("Item 4", "close", ELM_ICON_STANDARD)
bx.pack_end(bt)
bt.show()
win.show()
if __name__ == "__main__":
elementary.init()
hoversel_clicked(None)
elementary.run()
elementary.shutdown()
| unlicense | 7,743,831,754,863,671,000 | 31.53125 | 79 | 0.649376 | false |
StackStorm/mistral | mistral/utils/__init__.py | 1 | 13815 | # Copyright 2013 - Mirantis, Inc.
# Copyright 2015 - Huawei Technologies Co. Ltd
# Copyright 2016 - Brocade Communications Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import datetime
import functools
import json
import os
from os import path
import shutil
import socket
import sys
import tempfile
import threading
import eventlet
from eventlet import corolocal
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import timeutils
from oslo_utils import uuidutils
import pkg_resources as pkg
import random
from mistral import exceptions as exc
# Thread local storage.
_th_loc_storage = threading.local()
ACTION_TASK_TYPE = 'ACTION'
WORKFLOW_TASK_TYPE = 'WORKFLOW'
def generate_unicode_uuid():
return uuidutils.generate_uuid()
def is_valid_uuid(uuid_string):
return uuidutils.is_uuid_like(uuid_string)
def _get_greenlet_local_storage():
greenlet_id = corolocal.get_ident()
greenlet_locals = getattr(_th_loc_storage, "greenlet_locals", None)
if not greenlet_locals:
greenlet_locals = {}
_th_loc_storage.greenlet_locals = greenlet_locals
if greenlet_id in greenlet_locals:
return greenlet_locals[greenlet_id]
else:
return None
def has_thread_local(var_name):
gl_storage = _get_greenlet_local_storage()
return gl_storage and var_name in gl_storage
def get_thread_local(var_name):
if not has_thread_local(var_name):
return None
return _get_greenlet_local_storage()[var_name]
def set_thread_local(var_name, val):
if val is None and has_thread_local(var_name):
gl_storage = _get_greenlet_local_storage()
# Delete variable from greenlet local storage.
if gl_storage:
del gl_storage[var_name]
# Delete the entire greenlet local storage from thread local storage.
if gl_storage and len(gl_storage) == 0:
del _th_loc_storage.greenlet_locals[corolocal.get_ident()]
if val is not None:
gl_storage = _get_greenlet_local_storage()
if not gl_storage:
gl_storage = _th_loc_storage.greenlet_locals[
corolocal.get_ident()] = {}
gl_storage[var_name] = val
def log_exec(logger, level=logging.DEBUG):
"""Decorator for logging function execution.
By default, target function execution is logged with DEBUG level.
"""
def _decorator(func):
@functools.wraps(func)
def _logged(*args, **kw):
params_repr = ("[args=%s, kw=%s]" % (str(args), str(kw))
if args or kw else "")
func_repr = ("Called method [name=%s, doc='%s', params=%s]" %
(func.__name__, func.__doc__, params_repr))
logger.log(level, func_repr)
return func(*args, **kw)
_logged.__doc__ = func.__doc__
return _logged
return _decorator
def merge_dicts(left, right, overwrite=True):
"""Merges two dictionaries.
Values of right dictionary recursively get merged into left dictionary.
:param left: Left dictionary.
:param right: Right dictionary.
:param overwrite: If False, left value will not be overwritten if exists.
"""
if left is None:
return right
if right is None:
return left
for k, v in right.items():
if k not in left:
left[k] = v
else:
left_v = left[k]
if isinstance(left_v, dict) and isinstance(v, dict):
merge_dicts(left_v, v, overwrite=overwrite)
elif overwrite:
left[k] = v
return left
def update_dict(left, right):
"""Updates left dict with content from right dict
:param left: Left dict.
:param right: Right dict.
:return: the updated left dictionary.
"""
if left is None:
return right
if right is None:
return left
left.update(right)
return left
def get_file_list(directory):
base_path = pkg.resource_filename("mistral", directory)
return [path.join(base_path, f) for f in os.listdir(base_path)
if path.isfile(path.join(base_path, f))]
def cut_dict(d, length=100):
"""Removes dictionary entries according to the given length.
This method removes a number of entries, if needed, so that a
string representation would fit into the given length.
The intention of this method is to optimize truncation of string
representation for dictionaries where the exact precision is not
critically important. Otherwise, we'd always have to convert a dict
into a string first and then shrink it to a needed size which will
increase memory footprint and reduce performance in case of large
dictionaries (i.e. tens of thousands entries).
Note that the method, due to complexity of the algorithm, has some
non-zero precision which depends on exact keys and values placed into
the dict. So for some dicts their reduced string representations will
be only approximately equal to the given value (up to around several
chars difference).
:param d: A dictionary.
:param length: A length limiting the dictionary string representation.
:return: A dictionary which is a subset of the given dictionary.
"""
if not isinstance(d, dict):
raise ValueError("A dictionary is expected, got: %s" % type(d))
res = "{"
idx = 0
for key, value in d.items():
k = unicode(key)
v = unicode(value)
# Processing key.
new_len = len(res) + len(k)
is_str = isinstance(key, str)
if is_str:
new_len += 2
if new_len >= length:
res += "'%s..." % k[:length - new_len] if is_str else "%s..." % k
break
else:
res += "'%s'" % k if is_str else k
res += ": "
# Processing value.
new_len = len(res) + len(v)
is_str = isinstance(value, str)
if is_str:
new_len += 2
if new_len >= length:
res += "'%s..." % v[:length - new_len] if is_str else "%s..." % v
break
else:
res += "'%s'" % v if is_str else v
res += ', ' if idx < len(d) - 1 else '}'
if len(res) >= length:
res += '...'
break
idx += 1
return res
def cut_list(l, length=100):
if not isinstance(l, list):
raise ValueError("A list is expected, got: %s" % type(l))
res = '['
for idx, item in enumerate(l):
s = str(item)
new_len = len(res) + len(s)
is_str = isinstance(item, str)
if is_str:
new_len += 2
if new_len >= length:
res += "'%s..." % s[:length - new_len] if is_str else "%s..." % s
break
else:
res += "'%s'" % s if is_str else s
res += ', ' if idx < len(l) - 1 else ']'
return res
def cut_string(s, length=100):
if len(s) > length:
return "%s..." % s[:length]
return s
def cut(data, length=100):
if not data:
return data
if isinstance(data, list):
return cut_list(data, length=length)
if isinstance(data, dict):
return cut_dict(data, length=length)
return cut_string(str(data), length=length)
def cut_by_kb(data, kilobytes):
if kilobytes <= 0:
return cut(data)
length = get_number_of_chars_from_kilobytes(kilobytes)
return cut(data, length)
def cut_by_char(data, length):
return cut(data, length)
def iter_subclasses(cls, _seen=None):
"""Generator over all subclasses of a given class in depth first order."""
if not isinstance(cls, type):
raise TypeError('iter_subclasses must be called with new-style class'
', not %.100r' % cls)
_seen = _seen or set()
try:
subs = cls.__subclasses__()
except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls)
for sub in subs:
if sub not in _seen:
_seen.add(sub)
yield sub
for _sub in iter_subclasses(sub, _seen):
yield _sub
def random_sleep(limit=1):
"""Sleeps for a random period of time not exceeding the given limit.
Mostly intended to be used by tests to emulate race conditions.
:param limit: Float number of seconds that a sleep period must not exceed.
"""
seconds = random.Random().randint(0, limit * 1000) * 0.001
print("Sleep: %s sec..." % seconds)
eventlet.sleep(seconds)
class NotDefined(object):
"""Marker of an empty value.
In a number of cases None can't be used to express the semantics of
a not defined value because None is just a normal value rather than
a value set to denote that it's not defined. This class can be used
in such cases instead of None.
"""
pass
def get_number_of_chars_from_kilobytes(kilobytes):
bytes_per_char = sys.getsizeof('s') - sys.getsizeof('')
total_number_of_chars = int(kilobytes * 1024 / bytes_per_char)
return total_number_of_chars
def get_dict_from_string(string, delimiter=','):
if not string:
return {}
kv_dicts = []
for kv_pair_str in string.split(delimiter):
kv_str = kv_pair_str.strip()
kv_list = kv_str.split('=')
if len(kv_list) > 1:
try:
value = json.loads(kv_list[1])
except ValueError:
value = kv_list[1]
kv_dicts += [{kv_list[0]: value}]
else:
kv_dicts += [kv_list[0]]
return get_dict_from_entries(kv_dicts)
def get_dict_from_entries(entries):
"""Transforms a list of entries into dictionary.
:param entries: A list of entries.
If an entry is a dictionary the method simply updates the result
dictionary with its content.
If an entry is not a dict adds {entry, NotDefined} into the result.
"""
result = {}
for e in entries:
if isinstance(e, dict):
result.update(e)
else:
# NOTE(kong): we put NotDefined here as the value of
# param without value specified, to distinguish from
# the valid values such as None, ''(empty string), etc.
result[e] = NotDefined
return result
def get_process_identifier():
"""Gets current running process identifier."""
return "%s_%s" % (socket.gethostname(), os.getpid())
@contextlib.contextmanager
def tempdir(**kwargs):
argdict = kwargs.copy()
if 'dir' not in argdict:
argdict['dir'] = '/tmp/'
tmpdir = tempfile.mkdtemp(**argdict)
try:
yield tmpdir
finally:
try:
shutil.rmtree(tmpdir)
except OSError as e:
raise exc.DataAccessException(
"Failed to delete temp dir %(dir)s (reason: %(reason)s)" %
{'dir': tmpdir, 'reason': e}
)
def save_text_to(text, file_path, overwrite=False):
if os.path.exists(file_path) and not overwrite:
raise exc.DataAccessException(
"Cannot save data to file. File %s already exists."
)
with open(file_path, 'w') as f:
f.write(text)
def generate_key_pair(key_length=2048):
"""Create RSA key pair with specified number of bits in key.
Returns tuple of private and public keys.
"""
with tempdir() as tmpdir:
keyfile = os.path.join(tmpdir, 'tempkey')
args = [
'ssh-keygen',
'-q', # quiet
'-N', '', # w/o passphrase
'-t', 'rsa', # create key of rsa type
'-f', keyfile, # filename of the key file
'-C', 'Generated-by-Mistral' # key comment
]
if key_length is not None:
args.extend(['-b', key_length])
processutils.execute(*args)
if not os.path.exists(keyfile):
raise exc.DataAccessException(
"Private key file hasn't been created"
)
private_key = open(keyfile).read()
public_key_path = keyfile + '.pub'
if not os.path.exists(public_key_path):
raise exc.DataAccessException(
"Public key file hasn't been created"
)
public_key = open(public_key_path).read()
return private_key, public_key
def utc_now_sec():
"""Returns current time and drops microseconds."""
return timeutils.utcnow().replace(microsecond=0)
def datetime_to_str(val, sep=' '):
"""Converts datetime value to string.
If the given value is not an instance of datetime then the method
returns the same value.
:param val: datetime value.
:param sep: Separator between date and time.
:return: Datetime as a string.
"""
if isinstance(val, datetime.datetime):
return val.isoformat(sep)
return val
def datetime_to_str_in_dict(d, key, sep=' '):
"""Converts datetime value in te given dict to string.
:param d: A dictionary.
:param key: The key for which we need to convert the value.
:param sep: Separator between date and time.
"""
val = d.get(key)
if val is not None:
d[key] = datetime_to_str(d[key], sep=sep)
| apache-2.0 | 2,932,834,918,021,687,000 | 25.314286 | 78 | 0.603909 | false |
lavizhao/sentiment | nn.py | 1 | 3170 | #coding: utf-8
'''
打算用lda试一下,对于情感分析,可能lda效果不好,不过可以用在文本分类上,可以先用knn试一下
'''
import csv
import numpy as np
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer
from sklearn.feature_extraction.text import TfidfVectorizer
def read_csv():
f = open("train.csv","U")
reader = csv.reader(f)
train,label = [],[]
a = 0
for row in reader:
if a == 0:
a = a + 1
else:
train.append(row[1])
sub_row = row[4:]
sub_row = [float(i) for i in sub_row]
label.append(sub_row)
f.close()
f = open("test.csv","U")
reader = csv.reader(f)
test,ans = [],[]
a = 0
for row in reader:
if a == 0:
a = a + 1
else:
ans.append(int(row[0]))
test.append(row[1])
f.close()
return train,label,test,ans
def remain(a,n):
a = 1.0*a/np.sum(a)
return a
if __name__ == "__main__":
print "读文件"
train,label,test,ans = read_csv()
vectorizer = TfidfVectorizer(max_features=None,min_df=30,max_df=1.0,sublinear_tf=True,ngram_range=(1,1),smooth_idf=True,token_pattern=r'\w{1,}',analyzer='word',strip_accents='unicode',use_idf=True,binary=False)
length_train = len(train)
x_all = train + test
print "转化成tf-idf矩阵"
x_all = vectorizer.fit_transform(x_all)
x = x_all[:length_train]
t = x_all[length_train:]
label = np.array(label)
length_test = len(test)
n = label.shape[1]
print "x shape",x.shape
print "t shape",t.shape
x = x.toarray()
t = t.toarray()
ran = [[0.0,1.0] for i in range(100)]
print "建立神经网络"
#建立神经网络
fnn = buildNetwork(x.shape[1],1000,20,24,bias=True)
print "建立数据集"
#建立数据集
ds = SupervisedDataSet(x.shape[1], 24)
for i in range(len(train)):
ds.addSample(x[i],label[i])
print "构造bp"
#构造bp训练集
trainer = BackpropTrainer( fnn, ds, momentum=0.1, verbose=True, weightdecay=0.01)
print "开始训练"
trainer.trainEpochs(epochs=100)
print "开始返回结果"
out = SupervisedDataSet(x.shape[1], 24)
for i in range(len(test)):
temp = [0 for j in range(24)]
out.addSample(t[i],temp)
out = fnn.activateOnDataset(out)
s = out[:,0:5]
w = out[:,5:9]
k = out[:,9:24]
print "write"
head = "id,s1,s2,s3,s4,s5,w1,w2,w3,w4,k1,k2,k3,k4,k5,k6,k7,k8,k9,k10,k11,k12,k13,k14,k15"
t = open("new_nn.csv","w")
t.write(head+"\n")
for i in xrange(len(test)):
ts,tw,tk = s[i],w[i],k[i]
#ts = remain(ts,2)
#tw = remain(tw,2)
#tk = remain(tk,12)
str_s = [str(j) for j in ts]
str_w = [str(j) for j in tw]
str_k = [str(j) for j in tk]
str_s = ','.join(str_s)
str_w = ','.join(str_w)
str_k = ','.join(str_k)
t.write("%s,%s,%s,%s\n"%(ans[i],str_s,str_w,str_k))
| apache-2.0 | 992,082,911,486,289,500 | 21.526316 | 214 | 0.553071 | false |
stefanaspect/lovetz | orig/sc_parse_history.py | 1 | 2221 | import os
from xml.etree.ElementTree import parse
import re
import sys
if len(sys.argv) == 2:
tree = parse(sys.argv[1])
else:
sys.exit(0)
target = ""
domre = re.compile('HOST')
secre = re.compile('[Ss][Ee][Cc][Uu][Rr][Ee];?')
htore = re.compile('[Hh][Tt]{2}[Pp]-[Oo][Nn][Ll][Yy];?')
class HeaderDict(object):
def __init__(self, allow_multiple=False):
self._storage = {}
self.allow_multiple = allow_multiple
def __getitem__(self, name):
name = name.lower()
return self._storage.get(name)
def __setitem__(self, name, value):
name = name.lower()
if self.allow_multiple and name in self._storage:
tmp = self._storage.get(name)
if isinstance(tmp, list):
self._storage[name].append(value)
else:
self._storage[name] = [tmp, value]
else:
self._storage[name] = value
return None
def __contains__(self, key):
key = key.lower()
return key in self._storage
def get(self, key, value=None):
key = key.lower()
if key in self._storage:
return self._storage[key]
return value
def keys(self):
return self._storage.keys()
htcookies = set()
sccookies = set()
for item in tree.iterfind('./item'):
url, response = "", ""
for c in item.getchildren():
if c.tag == "url":
url = c.text
elif c.tag == "response":
try:
response = c.text.decode('base64')
except:
response = c.text
if domre.search(url) is None:
continue
if response is None:
continue
tmp = response.split('\r\n\r\n')
tmp = tmp[0].split('\r\n')
headers = HeaderDict(allow_multiple=True)
for t in tmp:
if ':' in t:
k,v = t.split(': ', 1)
headers[k] = v
if 'set-cookie' in headers:
v = headers['set-cookie']
if isinstance(v, list):
for value in v:
if secre.search(value) is None:
sccookies.add(value)
if htore.search(value) is None:
htcookies.add(value)
else:
if secre.search(v) is None:
sccookies.add(v)
if htore.search(v) is None:
htcookies.add(v)
for cookie in sccookies:
print "Cookie missing 'secure' flag: {0}".format(cookie)
for cookie in htcookies:
print "Cookie missing 'http-only' flag: {0}".format(cookie)
| mit | 2,034,688,851,428,894,700 | 20.21 | 60 | 0.607384 | false |
qtproject/pyside-shiboken | tests/samplebinding/objecttypeoperators_test.py | 1 | 2129 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#############################################################################
##
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the test suite of PySide2.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
import unittest
from sample import *
class ObjectTypeOperatorsTest(unittest.TestCase):
def testIt(self):
a = ObjectTypeOperators("a")
b = ObjectTypeOperators("b")
self.assertFalse(a == b)
self.assertEqual(a, a < b)
# this should change a.key() and return nothing.
self.assertEqual(None, a > b)
self.assertEqual(a.key(), "aoperator>")
def testPointerOpeators(self):
a = ObjectTypeOperators("a")
b = ObjectTypeOperators("b")
self.assertEqual(a + "bc", "abc")
self.assertEqual("bc" + a, "bca")
self.assertEqual("a", a)
self.assertEqual(a, "a")
def testOperatorInjection(self):
a = ObjectTypeOperators("a")
self.assertNotEqual(a, "b")
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 8,073,784,716,059,793,000 | 34.483333 | 77 | 0.631752 | false |
JervyShi/python-utils | fileEncodingChange/change.py | 1 | 1853 | # -*- coding:utf-8 -*-
__author__ = 'jervyshi'
import chardet
import os
class Change(object):
def __init__(self, readPath, writePath, fromEncoding, toEncoding):
self.readPath = readPath
self.writePath = writePath
self.fromEncoding = fromEncoding
self.toEncoding = toEncoding
def change(self, file_path):
if file_path.find('.svn') > 0:
return
if file_path.find('.idea') > 0:
return
if os.path.isfile(file_path):
self.copy_file(file_path)
elif os.path.isdir(file_path):
to_path = self.get_to_path(file_path)
if not os.path.exists(to_path):
os.mkdir(to_path)
file_list = [file_path+os.sep+x for x in os.listdir(file_path)]
for x in file_list:
self.change(x)
def get_to_path(self, file_path):
return file_path.replace(self.readPath, self.writePath)
def copy_file(self, file_path):
to_path = self.get_to_path(file_path)
with open(file_path, 'r') as f:
content = f.read()
coding = chardet.detect(content)
with open(to_path, 'w') as w:
if coding['encoding'].lower() == self.fromEncoding:
print 'path:%s,encoding change' % file_path
w.write(content.decode(self.fromEncoding).encode(self.toEncoding))
else:
print 'copy:%s, encoding:%s' % (file_path, coding['encoding'])
w.write(content)
def work(self):
self.change(self.readPath)
if __name__ == '__main__':
change = Change('/home/jervyshi/workspace/branches/pop-order-work20140702-encoding-change', '/home/jervyshi/workspace/branches/change/pop-order-work20140702-encoding-change', 'gb2312', 'utf-8')
change.work() | gpl-2.0 | 5,217,071,650,522,374,000 | 33.981132 | 197 | 0.571506 | false |
jmakov/ggrc-core | src/ggrc_basic_permissions/__init__.py | 1 | 24214 | # Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
import datetime
from flask import Blueprint, session, g
import sqlalchemy.orm
from sqlalchemy.orm import aliased
from sqlalchemy.orm.attributes import get_history
from sqlalchemy import and_, or_, case, literal
from ggrc import db, settings
from ggrc.login import get_current_user, login_required
from ggrc.models import all_models
from ggrc.models.person import Person
from ggrc.models.audit import Audit
from ggrc.models.response import Response
from ggrc.models.relationship import Relationship
from ggrc.models.context import Context
from ggrc.models.program import Program
from ggrc.rbac import permissions
from ggrc.rbac.permissions_provider import DefaultUserPermissions
from ggrc.services.registry import service
from ggrc.services.common import Resource
from ggrc.services.common import _get_cache_manager
from . import basic_roles
from ggrc.utils import benchmark
from .contributed_roles import lookup_role_implications
from .models import Role, UserRole, ContextImplication
from ggrc_basic_permissions.converters.handlers import COLUMN_HANDLERS
from ggrc_basic_permissions.models import get_ids_related_to
blueprint = Blueprint(
'permissions',
__name__,
template_folder='templates',
static_folder='static',
static_url_path='/static/ggrc_basic_permissions',
)
def get_public_config(current_user):
"""Expose additional permissions-dependent config to client.
Specifically here, expose GGRC_BOOTSTRAP_ADMIN values to ADMIN users.
"""
public_config = {}
if permissions.is_admin():
if hasattr(settings, 'BOOTSTRAP_ADMIN_USERS'):
public_config['BOOTSTRAP_ADMIN_USERS'] = settings.BOOTSTRAP_ADMIN_USERS
return public_config
def objects_via_relationships_query(user_id, context_not_role=False):
"""Creates a query that returns objects a user can access via program.
Args:
user_id: id of the user
context_not_role: use context instead of the role for the third column
in the search api we need to return (obj_id, obj_type, context_id),
but in ggrc_basic_permissions we need a role instead of a
context_id (obj_id, obj_type, role_name)
Returns:
db.session.query object that selects the following columns:
| id | type | role_name or context |
"""
_role = aliased(all_models.Role, name="r")
_program = aliased(all_models.Program, name="p")
_relationship = aliased(all_models.Relationship, name="rl")
_user_role = aliased(all_models.UserRole, name="ur")
def _add_relationship_join(query):
return query.join(_program, or_(
and_(_relationship.source_type == 'Program',
_program.id == _relationship.source_id),
and_(_relationship.destination_type == 'Program',
_program.id == _relationship.destination_id))).\
join(_user_role, _program.context_id == _user_role.context_id).\
join(_role, _user_role.role_id == _role.id).\
filter(and_(_user_role.person_id == user_id, _role.name.in_(
('ProgramEditor', 'ProgramOwner', 'ProgramReader'))))
objects = _add_relationship_join(db.session.query(
case([
(_relationship.destination_type == "Program",
_relationship.source_id.label('id'))
], else_=_relationship.destination_id.label('id')),
case([
(_relationship.destination_type == "Program",
_relationship.source_type.label('type'))
], else_=_relationship.destination_type.label('id')),
literal(None).label('context_id') if context_not_role else _role.name))
# We also need to return relationships themselves:
relationships = _add_relationship_join(db.session.query(_relationship.id, literal("Relationship"), _relationship.context_id))
return objects.union(relationships)
class CompletePermissionsProvider(object):
def __init__(self, settings):
pass
def permissions_for(self, user):
ret = UserPermissions()
# force the permissions to be loaded into session, otherwise templates
# that depend on the permissions being available in session may assert
# the user has no permissions!
ret.check_permissions()
return ret
def handle_admin_user(self, user):
pass
class BasicUserPermissions(DefaultUserPermissions):
"""User permissions that aren't kept in session."""
def __init__(self, user):
self.user = user
with benchmark('BasicUserPermissions > load permissions for user'):
self.permissions = load_permissions_for(user)
def _permissions(self):
return self.permissions
class UserPermissions(DefaultUserPermissions):
@property
def _request_permissions(self):
return getattr(g, '_request_permissions', None)
@_request_permissions.setter
def _request_permissions(self, value):
setattr(g, '_request_permissions', value)
def _permissions(self):
self.check_permissions()
return self._request_permissions
def check_permissions(self):
if not self._request_permissions:
self.load_permissions()
def get_email_for(self, user):
return user.email if hasattr(user, 'email') else 'ANONYMOUS'
def load_permissions(self):
user = get_current_user()
email = self.get_email_for(user)
self._request_permissions = {}
self._request_permissions['__user'] = email
if user is None or user.is_anonymous():
self._request_permissions = {}
else:
with benchmark('load_permissions > load permissions for user'):
self._request_permissions = load_permissions_for(user)
def collect_permissions(src_permissions, context_id, permissions):
for action, resource_permissions in src_permissions.items():
if not resource_permissions:
permissions.setdefault(action, dict())
for resource_permission in resource_permissions:
if type(resource_permission) in [str, unicode]:
resource_type = str(resource_permission)
condition = None
else:
resource_type = str(resource_permission['type'])
condition = resource_permission.get('condition', None)
terms = resource_permission.get('terms', [])
permissions.setdefault(action, {})\
.setdefault(resource_type, dict())\
.setdefault('contexts', list())
if context_id is not None:
permissions[action][resource_type]['contexts'].append(context_id)
elif condition is None:
permissions[action][resource_type]['contexts'].append(context_id)
if condition:
permissions[action][resource_type]\
.setdefault('conditions', dict())\
.setdefault(context_id, list())\
.append({
'condition': condition,
'terms': terms,
})
def load_permissions_for(user):
"""Permissions is dictionary that can be exported to json to share with
clients. Structure is:
..
permissions[action][resource_type][contexts]
[conditions][context][context_conditions]
'action' is one of 'create', 'read', 'update', 'delete'.
'resource_type' is the name of a valid gGRC resource type.
'contexts' is a list of context_id where the action is allowed.
'conditions' is a dictionary of 'context_conditions' indexed by 'context'
where 'context' is a context_id.
'context_conditions' is a list of dictionaries with 'condition' and 'terms'
keys.
'condition' is the string name of a conditional operator, such as 'contains'.
'terms' are the arguments to the 'condition'.
"""
PERMISSION_CACHE_TIMEOUT = 1800 # 30 minutes
permissions = {}
key = 'permissions:{}'.format(user.id)
cache = None
if getattr(settings, 'MEMCACHE_MECHANISM', False):
cache = _get_cache_manager().cache_object.memcache_client
cached_keys_set = cache.get('permissions:list') or set()
if key not in cached_keys_set:
# We set the permissions:list variable so that we are able to batch
# remove all permissions related keys from memcache
cached_keys_set.add(key)
cache.set('permissions:list', cached_keys_set, PERMISSION_CACHE_TIMEOUT)
else:
permissions_cache = cache.get(key)
if permissions_cache:
# If the key is both in permissions:list and in memcache itself
# it is safe to return the cached permissions
return permissions_cache
# Add default `Help` and `NotificationConfig` permissions for everyone
# FIXME: This should be made into a global base role so it can be extended
# from extensions
default_permissions = {
"read": [
"Help",
"CustomAttributeDefinition",
{
"type": "CustomAttributeValue",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "NotificationConfig",
"terms": {
"property_name": "person",
"value": "$current_user"
},
"condition": "is"
},
],
"create": [
{
"type": "NotificationConfig",
"terms": {
"property_name": "person",
"value": "$current_user"
},
"condition": "is"
},
],
"update": [
{
"type": "NotificationConfig",
"terms": {
"property_name": "person",
"value": "$current_user"
},
"condition": "is"
},
]
}
collect_permissions(default_permissions, None, permissions)
# Add `ADMIN_PERMISSION` for "bootstrap admin" users
if hasattr(settings, 'BOOTSTRAP_ADMIN_USERS') \
and user.email in settings.BOOTSTRAP_ADMIN_USERS:
admin_permissions = {
DefaultUserPermissions.ADMIN_PERMISSION.action: [
DefaultUserPermissions.ADMIN_PERMISSION.resource_type
]
}
collect_permissions(
admin_permissions,
DefaultUserPermissions.ADMIN_PERMISSION.context_id,
permissions)
# Now add permissions from all DB-managed roles
user_roles = db.session.query(UserRole)\
.options(
sqlalchemy.orm.undefer_group('UserRole_complete'),
sqlalchemy.orm.undefer_group('Role_complete'),
sqlalchemy.orm.joinedload('role'))\
.filter(UserRole.person_id == user.id)\
.order_by(UserRole.updated_at.desc())\
.all()
source_contexts_to_rolenames = {}
for user_role in user_roles:
source_contexts_to_rolenames.setdefault(
user_role.context_id, list()).append(user_role.role.name)
if isinstance(user_role.role.permissions, dict):
collect_permissions(
user_role.role.permissions, user_role.context_id, permissions)
# apply role implications per context implication
all_context_implications = db.session.query(ContextImplication)
keys = [k for k in source_contexts_to_rolenames.keys() if k is not None]
if keys and None in source_contexts_to_rolenames:
all_context_implications = all_context_implications.filter(
or_(
ContextImplication.source_context_id == None,
ContextImplication.source_context_id.in_(keys),
)).all()
elif keys:
all_context_implications = all_context_implications.filter(
ContextImplication.source_context_id.in_(keys)).all()
elif None in source_contexts_to_rolenames:
all_context_implications = all_context_implications.filter(
ContextImplication.source_context_id == None).all()
else:
all_context_implications = []
# Gather all roles required by context implications
implied_context_to_implied_roles = {}
all_implied_roles_set = set()
for context_implication in all_context_implications:
for rolename in source_contexts_to_rolenames.get(
context_implication.source_context_id, []):
implied_role_names_list = implied_context_to_implied_roles.setdefault(
context_implication.context_id, list())
implied_role_names = lookup_role_implications(
rolename, context_implication)
all_implied_roles_set.update(implied_role_names)
implied_role_names_list.extend(implied_role_names)
# If some roles are required, query for them in bulk
all_implied_roles_by_name = {}
if implied_context_to_implied_roles and all_implied_roles_set:
implied_roles = db.session.query(Role)\
.filter(Role.name.in_(all_implied_roles_set))\
.options(sqlalchemy.orm.undefer_group('Role_complete'))\
.all()
for implied_role in implied_roles:
all_implied_roles_by_name[implied_role.name] = implied_role
# Now aggregate permissions resulting from these roles
for implied_context_id, implied_rolenames \
in implied_context_to_implied_roles.items():
if implied_context_id is None:
continue
for implied_rolename in implied_rolenames:
implied_role = all_implied_roles_by_name[implied_rolename]
collect_permissions(
implied_role.permissions, implied_context_id, permissions)
# Agregate from owners:
for object_owner in user.object_owners:
for action in ["read", "create", "update", "delete", "view_object_page"]:
permissions.setdefault(action, {})\
.setdefault(object_owner.ownable_type, {})\
.setdefault('resources', list())\
.append(object_owner.ownable_id)
for res in objects_via_relationships_query(user.id).all():
id_, type_, role_name = res
actions = ["read", "view_object_page"]
if role_name in ("ProgramEditor", "ProgramOwner"):
actions += ["create", "update", "delete"]
for action in actions:
permissions.setdefault(action, {})\
.setdefault(type_, {})\
.setdefault('resources', list())\
.append(id_)
personal_context = _get_or_create_personal_context(user)
permissions.setdefault('__GGRC_ADMIN__', {})\
.setdefault('__GGRC_ALL__', dict())\
.setdefault('contexts', list())\
.append(personal_context.id)
if cache is not None:
cached_keys_set = cache.get('permissions:list') or set()
if key in cached_keys_set:
# We only add the permissions to the cache if the
# key still exists in the permissions:list after
# the query has executed.
cache.set(key, permissions, PERMISSION_CACHE_TIMEOUT)
return permissions
def _get_or_create_personal_context(user):
personal_context = user.get_or_create_object_context(
context=1,
name='Personal Context for {0}'.format(user.id),
description='')
personal_context.modified_by = get_current_user()
db.session.add(personal_context)
db.session.flush()
return personal_context
@Resource.model_posted.connect_via(Program)
def handle_program_post(sender, obj=None, src=None, service=None):
db.session.flush()
# get the personal context for this logged in user
user = get_current_user()
personal_context = _get_or_create_personal_context(user)
context = obj.build_object_context(
context=personal_context,
name='{object_type} Context {timestamp}'.format(
object_type=service.model.__name__,
timestamp=datetime.datetime.now()),
description='',
)
context.modified_by = get_current_user()
db.session.add(obj)
db.session.flush()
db.session.add(context)
db.session.flush()
obj.contexts.append(context)
obj.context = context
# add a user_roles mapping assigning the user creating the program
# the ProgramOwner role in the program's context.
program_owner_role = basic_roles.program_owner()
user_role = UserRole(
person=get_current_user(),
role=program_owner_role,
context=context,
modified_by=get_current_user(),
)
#pass along a temporary attribute for logging the events.
user_role._display_related_title = obj.title
db.session.add(user_role)
db.session.flush()
#Create the context implication for Program roles to default context
db.session.add(ContextImplication(
source_context=context,
context=None,
source_context_scope='Program',
context_scope=None,
modified_by=get_current_user(),
))
if not src.get('private'):
# Add role implication - all users can read a public program
add_public_program_context_implication(context)
def add_public_program_context_implication(context, check_exists=False):
if check_exists and db.session.query(ContextImplication)\
.filter(
and_(
ContextImplication.context_id == context.id,
ContextImplication.source_context_id == None))\
.count() > 0:
return
db.session.add(ContextImplication(
source_context=None,
context=context,
source_context_scope=None,
context_scope='Program',
modified_by=get_current_user(),
))
# When adding a private program to an Audit Response, ensure Auditors
# can read it
@Resource.model_posted.connect_via(Relationship)
def handle_relationship_post(sender, obj=None, src=None, service=None):
db.session.flush()
db.session.add(obj)
db.session.flush()
if isinstance(obj.source, Response) \
and isinstance(obj.destination, Program) \
and obj.destination.private \
and db.session.query(ContextImplication) \
.filter(
and_(
ContextImplication.context_id == obj.destination.context.id,
ContextImplication.source_context_id == obj.source.context.id))\
.count() < 1:
#Create the audit -> program implication for the Program added to the Response
parent_program = obj.source.request.audit.program
if parent_program != obj.destination:
db.session.add(ContextImplication(
source_context=obj.source.context,
context=obj.destination.context,
source_context_scope='Audit',
context_scope='Program',
modified_by=get_current_user(),
))
db.session.add(ContextImplication(
source_context=parent_program.context,
context=obj.destination.context,
source_context_scope='Program',
context_scope='Program',
modified_by=get_current_user(),
))
# When adding a private program to an Audit Response, ensure Auditors
# can read it
@Resource.model_deleted.connect_via(Relationship)
def handle_relationship_delete(sender, obj=None, src=None, service=None):
db.session.flush()
if isinstance(obj.source, Response) \
and isinstance(obj.destination, Program) \
and obj.destination.private:
#figure out if any other responses in this audit are still mapped to the same prog
responses = [r for req in obj.source.request.audit.requests for r in req.responses]
relationships = [rel for resp in responses for rel in resp.related_destinations
if rel != obj.destination]
matching_programs = [p.destination for p in relationships
if p.destination == obj.destination]
#Delete the audit -> program implication for the Program removed from the Response
if len(matching_programs) < 1:
db.session.query(ContextImplication)\
.filter(
ContextImplication.context_id == obj.destination.context_id,
ContextImplication.source_context_id == obj.source.context_id)\
.delete()
db.session.query(ContextImplication)\
.filter(
ContextImplication.context_id == obj.destination.context_id,
ContextImplication.source_context_id == obj.source.context_id)\
.delete()
@Resource.model_put.connect_via(Program)
def handle_program_put(sender, obj=None, src=None, service=None):
#Check to see if the private property of the program has changed
if get_history(obj, 'private').has_changes():
if obj.private:
# Ensure that any implications from null context are removed
db.session.query(ContextImplication)\
.filter(
ContextImplication.context_id == obj.context_id,
ContextImplication.source_context_id == None)\
.delete()
db.session.flush()
else:
#ensure that implications from null are present
add_public_program_context_implication(obj.context, check_exists=True)
db.session.flush()
@Resource.model_posted.connect_via(Audit)
def handle_audit_post(sender, obj=None, src=None, service=None):
db.session.flush()
#Create an audit context
context = obj.build_object_context(
context=obj.context,
name='Audit Context {timestamp}'.format(
timestamp=datetime.datetime.now()),
description='',
)
context.modified_by = get_current_user()
db.session.add(context)
db.session.flush()
#Create the program -> audit implication
db.session.add(ContextImplication(
source_context=obj.context,
context=context,
source_context_scope='Program',
context_scope='Audit',
modified_by=get_current_user(),
))
#Create the audit -> program implication
db.session.add(ContextImplication(
source_context=context,
context=obj.context,
source_context_scope='Audit',
context_scope='Program',
modified_by=get_current_user(),
))
db.session.add(obj)
#Create the role implication for Auditor from Audit for default context
db.session.add(ContextImplication(
source_context=context,
context=None,
source_context_scope='Audit',
context_scope=None,
modified_by=get_current_user(),
))
db.session.flush()
#Place the audit in the audit context
obj.context = context
@Resource.model_deleted.connect
def handle_resource_deleted(sender, obj=None, service=None):
if obj.context \
and obj.context.related_object_id \
and obj.id == obj.context.related_object_id \
and obj.__class__.__name__ == obj.context.related_object_type:
db.session.query(UserRole) \
.filter(UserRole.context_id == obj.context_id) \
.delete()
db.session.query(ContextImplication) \
.filter(
or_(
ContextImplication.context_id == obj.context_id,
ContextImplication.source_context_id == obj.context_id
))\
.delete()
# Deleting the context itself is problematic, because unattached objects
# may still exist and cause a database error. Instead of implicitly
# cascading to delete those, just leave the `Context` object in place.
# It and its objects will be visible *only* to Admin users.
#db.session.delete(obj.context)
# Removed because this is now handled purely client-side, but kept
# here as a reference for the next one.
# @BaseObjectView.extension_contributions.connect_via(Program)
def contribute_to_program_view(sender, obj=None, context=None):
if obj.context_id != None and \
permissions.is_allowed_read('Role', None, 1) and \
permissions.is_allowed_read('UserRole', None, obj.context_id) and \
permissions.is_allowed_create('UserRole', None, obj.context_id) and \
permissions.is_allowed_update('UserRole', None, obj.context_id) and \
permissions.is_allowed_delete('UserRole', None, obj.context_id):
return 'permissions/programs/_role_assignments.haml'
return None
from ggrc.app import app
@app.context_processor
def authorized_users_for():
return {'authorized_users_for': UserRole.role_assignments_for,}
def contributed_services():
"""The list of all collections provided by this extension."""
return [
service('roles', Role),
service('user_roles', UserRole),
]
def contributed_object_views():
from ggrc.views.registry import object_view
return [
object_view(Role)
]
def contributed_column_handlers():
return COLUMN_HANDLERS
from .contributed_roles import BasicRoleDeclarations, BasicRoleImplications
ROLE_DECLARATIONS = BasicRoleDeclarations()
ROLE_IMPLICATIONS = BasicRoleImplications()
contributed_get_ids_related_to = get_ids_related_to
| apache-2.0 | -688,696,619,076,752,400 | 35.577039 | 127 | 0.669076 | false |
Buntworthy/jowr | jowr/calibration.py | 1 | 10814 | import os
import zipfile
import pickle
import glob
import jowr
import cv2
import numpy as np
# TODO method to write the calibration to plain text
class Calibrator(object):
""" Class to help with camera calibration.
Run calibration using a chequerboard pattern calibration can be performed
on a jowr reader (typically a CameraReader).
Examples:
Calibrate an attached camera, save the raw image files, and save the
calibration result:
>>> calibrator = jowr.Calibrator()
>>> calibration = calibrator.calibrate(jowr.CameraReader(0),
... save_name='my_images.zip')
>>> calibrator.save('my_calibration.p')
Load an existing calibration from a file:
>>> calibrator = jowr.Calibrator()
>>> calibration = calibrator.load('my_calibration.p')
Run calibration from existing image zip file
>>> calibrator = jowr.Calibrator()
>>> calibration = calibrator.calibrate('my_images.zip')
"""
def __init__(self,
chequer_size=(9, 6),
chequer_scale=25.0):
""" Create the Calibrator object.
Optionally specify the chequerboard size to be used. Download one here:
http://docs.opencv.org/2.4/_downloads/pattern.png
Args:
chequer_size (Tuple[int]): The (columns, rows) of the chequerboard
chequer_scale (int): The size of a square in mm
"""
# TODO show result option
self.calibration = {}
self.object_points = [] # 3d point in real world space
self.img_points = [] # 2d points in image plane.
self.resolution = None # resolution of the images used for calibration
self.showFrames = True # Display frames being processed
self.chequer_size = chequer_size
self.chequer_scale = chequer_scale
self.chequer_points = self.generate_chequer_points(self.chequer_size,
self.chequer_scale)
def calibrate(self, cam, save_name=''):
""" Calibrate a camera, video, zipfile, or directory of images.
Args:
cam: Source for calibration, this could be a jowr.BaseReader, path
to a zipfile, or a directory.
save_name (Optional[str]): Path to zipfile to save images. If empty
no images are saved.
"""
# A camera/video
# TODO think about a consistent interface to image collections
if isinstance(cam, jowr.Capture):
self.calibrate_reader(cam, save_name)
# An existing zip file of images
elif zipfile.is_zipfile(cam):
self.calibrate_zip(cam)
# An existing folder of images
elif os.path.isdir(cam):
self.calibrate_folder(cam)
# I don't know what this is
else:
raise TypeError("Unknown input type, "
"not a camera, video, zipfile or directory.")
return self.calibration
def calibrate_zip(self, cam):
""" Calibrate all the png files in a zip archive.
Args:
cam (str): Path to the zipfile.
"""
with zipfile.ZipFile(cam, 'r') as zip_file:
zip_members = [f.filename for f in zip_file.filelist]
# TODO add other extension
is_png = [this_file.endswith('.png')
for this_file in zip_members]
# Check there are some png files
if not any(is_png):
raise TypeError("No png files found in zip")
# Loop over files in zip file
for zipinfo, filename, png in \
zip(zip_file.filelist, zip_members, is_png):
if png:
# cv2's imread expect a file, so we extract
zip_file.extract(zipinfo)
image = cv2.imread(filename)
# TODO be careful here!
os.remove(filename)
self.check_resolution(image)
self.process(image, '')
self.calculate_calibration()
def calibrate_reader(self, reader, save_name):
""" Calibrate images selected from a camera or video.
Args:
reader (jowr.BaseReader): Image source.
save_name (str): Path to zipfile to save images.
"""
print("Press s key to capture an image. Press Esc to finish.")
self.resolution = reader.resolution
with reader.open_frames() as frames:
for frame in frames():
# Detect corners for each image during acquisition
stop = jowr.show(frame, 'Camera',
wait_time=1,
callbacks={
# Process a frame on s key pressed
's': lambda: self.process(frame,
save_name)
},
auto_close=False)
if stop:
break
self.calculate_calibration()
def calibrate_folder(self, folder):
""" Calibrate all the png files in a directory.
Args:
folder (str): directory to search for images (not including
subdirectories).
"""
for filename in jowr.find_images(folder):
image = cv2.imread(filename)
self.check_resolution(image)
self.process(image, '')
self.calculate_calibration()
def save(self, filename):
""" Save the current calibration to a file.
Args:
filename (str): path to save file.
"""
# I'd like to use json to make it readable, but numpy arrays are awkward
with open(filename, 'wb') as cal_file:
pickle.dump(self.calibration, cal_file)
# TODO some sort of validation
def load(self, filename):
""" Load a calibration from file.
Args:
filename (str): path to the previously pickled file.
"""
with open(filename, 'rb') as cal_file:
self.calibration = pickle.load(cal_file)
if not isinstance(self.calibration, dict):
raise TypeError("Loaded calibation is not a dictionary")
elif not all([this_key in self.calibration.keys()
for this_key in ('error', 'matrix', 'distortion')]):
raise TypeError("Calibration dictionary "
"doesn't have all the information I need")
return self.calibration
def process(self, frame, save_name):
""" Find the chessboard corners in a single image.
Args:
frame Colour image with channel ordering BGR
save_name Name of zip file to save image to
"""
if jowr.channels(frame) is 3:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
else:
gray = frame.copy()
# Find the chess board corners
ret, corners = \
cv2.findChessboardCorners(gray,
(self.chequer_size[0],
self.chequer_size[1]),
cv2.CALIB_CB_FAST_CHECK)
# If found, add object points, image points (after refining them)
if not ret:
print("Failed to find chequerboard points")
else:
self.object_points.append(self.chequer_points)
self.img_points.append(corners)
# Draw and display the corners
modified_frame = frame.copy()
modified_frame = cv2.drawChessboardCorners(modified_frame,
(self.chequer_size[0],
self.chequer_size[1]),
corners, ret)
if self.showFrames:
jowr.show(modified_frame,
window_name='Detected_corners',
wait_time=500)
if save_name:
# Add to the zip file
jowr.add_to_zip(frame, save_name)
return ret
def check_resolution(self, image):
resolution = jowr.resolution(image)
if self.resolution and self.resolution != resolution:
raise IOError(
"Calibration images are different resolutions.")
self.resolution = resolution
def calculate_calibration(self):
# Check we have everything we need
if not self.object_points:
raise ValueError("No chessboard points to work with.")
if not self.img_points:
raise ValueError("No image points to work with.")
if not self.resolution:
raise ValueError("Image resolution not detected")
# Could use a namedtuple, but then a simple dict is a bit more
# convenient for external use?
(self.calibration['error'],
self.calibration['matrix'],
self.calibration['distortion'],
_, _) = cv2.calibrateCamera(self.object_points,
self.img_points,
self.resolution,
None, None)
self.calibration['resolution'] = self.resolution
def print_to_file(self, filename):
# if not self.calibration:
# TODO raise an error
with open(filename, 'w') as cal_file:
for key, val in self.calibration.items():
cal_file.write('{}:\n'.format(key))
cal_file.write('{}\n'.format(val))
@staticmethod
def generate_chequer_points(chequer_size, chequer_scale):
"""Generate an array of corner point positions."""
chequer_points = np.zeros((chequer_size[0] * chequer_size[1], 3),
np.float32)
chequer_points[:, :2] = np.mgrid[0:chequer_size[0],
0:chequer_size[1]].T.reshape(-1, 2)
# adjust scale
chequer_points *= chequer_scale
return chequer_points
def undistort(frame, calibration):
if not jowr.resolution(frame) == calibration['resolution']:
raise ValueError("Resolution of image not equal to that of calibration")
return cv2.undistort(frame,
calibration['matrix'],
calibration['distortion'])
if __name__ == '__main__':
reader = jowr.Camera(0)
c = Calibrator(chequer_scale=50)
c.calibrate(reader, 'test.zip')
c.save('test_cal.p')
# c.calibrate('test.zip')
| agpl-3.0 | 4,572,051,952,089,296,000 | 35.657627 | 80 | 0.539023 | false |
kevroy314/msl-iposition-pipeline | cogrecon/misc/heatmap_transform.py | 1 | 1398 | from matplotlib import pyplot as plt
from matplotlib import cm as cm
from matplotlib import mlab as ml
import numpy as np
from draggable_points import DraggablePoint
import matplotlib.patches as patches
def test_heatmap_transform():
"""
This function test the dynamic heatmap module used to help perform 2D histograms in a hex grid.
This module is mot current used in typical analysis or visualized.
"""
circles = [patches.Circle((0, 0), 0.25, fc='r', alpha=0.5)]
drs = []
fig = plt.figure()
ax = fig.add_subplot(111)
x = y = np.linspace(-5, 5, 100)
X, Y = np.meshgrid(x, y)
Z1 = ml.bivariate_normal(X, Y, 2, 2, 0, 0)
z = Z1.ravel()
x = X.ravel()
y = Y.ravel()
gridsize = 30
hex_plt = plt.hexbin(x, y, C=z, gridsize=gridsize, cmap=cm.jet, bins=None)
plt.axis([x.min(), x.max(), y.min(), y.max()])
# noinspection PyUnusedLocal
def update(*args):
global fig, ax, drs, hex_plt, x, y, z, gridsize
Z = ml.bivariate_normal(X, Y, 2, 2, drs[0].point.center[0], drs[0].point.center[1])
z = Z.ravel()
hex_plt = plt.hexbin(x, y, C=z, gridsize=gridsize, cmap=cm.jet, bins=None)
for circ in circles:
ax.add_patch(circ)
dr = DraggablePoint(circ)
dr.connect(update)
drs.append(dr)
# anim = animation.FuncAnimation(fig, update, interval=10)
plt.show()
| gpl-3.0 | -8,197,172,101,991,800,000 | 28.744681 | 99 | 0.620887 | false |
AlexanderSk/fail2ban | config/action.d/smtp.py | 1 | 6020 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import socket
import smtplib
from email.mime.text import MIMEText
from email.utils import formatdate, formataddr
from fail2ban.server.actions import ActionBase, CallingMap
messages = {}
messages['start'] = \
"""Hi,
The jail %(jailname)s has been started successfully.
Regards,
Fail2Ban"""
messages['stop'] = \
"""Hi,
The jail %(jailname)s has been stopped.
Regards,
Fail2Ban"""
messages['ban'] = {}
messages['ban']['head'] = \
"""Hi,
The IP %(ip)s has just been banned for %(bantime)s seconds
by Fail2Ban after %(failures)i attempts against %(jailname)s.
"""
messages['ban']['tail'] = \
"""
Regards,
Fail2Ban"""
messages['ban']['matches'] = \
"""
Matches for this ban:
%(matches)s
"""
messages['ban']['ipmatches'] = \
"""
Matches for %(ip)s:
%(ipmatches)s
"""
messages['ban']['ipjailmatches'] = \
"""
Matches for %(ip)s for jail %(jailname)s:
%(ipjailmatches)s
"""
class SMTPAction(ActionBase):
"""Fail2Ban action which sends emails to inform on jail starting,
stopping and bans.
"""
def __init__(
self, jail, name, host="localhost", user=None, password=None,
sendername="Fail2Ban", sender="fail2ban", dest="root", matches=None):
"""Initialise action.
Parameters
----------
jail : Jail
The jail which the action belongs to.
name : str
Named assigned to the action.
host : str, optional
SMTP host, of host:port format. Default host "localhost" and
port "25"
user : str, optional
Username used for authentication with SMTP server.
password : str, optional
Password used for authentication with SMTP server.
sendername : str, optional
Name to use for from address in email. Default "Fail2Ban".
sender : str, optional
Email address to use for from address in email.
Default "fail2ban".
dest : str, optional
Email addresses of intended recipient(s) in comma space ", "
delimited format. Default "root".
matches : str, optional
Type of matches to be included from ban in email. Can be one
of "matches", "ipmatches" or "ipjailmatches". Default None
(see man jail.conf.5).
"""
super(SMTPAction, self).__init__(jail, name)
self.host = host
#TODO: self.ssl = ssl
self.user = user
self.password =password
self.fromname = sendername
self.fromaddr = sender
self.toaddr = dest
self.matches = matches
self.message_values = CallingMap(
jailname = self._jail.name,
hostname = socket.gethostname,
bantime = self._jail.actions.getBanTime,
)
def _sendMessage(self, subject, text):
"""Sends message based on arguments and instance's properties.
Parameters
----------
subject : str
Subject of the email.
text : str
Body of the email.
Raises
------
SMTPConnectionError
Error on connecting to host.
SMTPAuthenticationError
Error authenticating with SMTP server.
SMTPException
See Python `smtplib` for full list of other possible
exceptions.
"""
msg = MIMEText(text)
msg['Subject'] = subject
msg['From'] = formataddr((self.fromname, self.fromaddr))
msg['To'] = self.toaddr
msg['Date'] = formatdate()
smtp = smtplib.SMTP()
try:
self._logSys.debug("Connected to SMTP '%s', response: %i: %s",
self.host, *smtp.connect(self.host))
if self.user and self.password:
smtp.login(self.user, self.password)
failed_recipients = smtp.sendmail(
self.fromaddr, self.toaddr.split(", "), msg.as_string())
except smtplib.SMTPConnectError:
self._logSys.error("Error connecting to host '%s'", self.host)
raise
except smtplib.SMTPAuthenticationError:
self._logSys.error(
"Failed to authenticate with host '%s' user '%s'",
self.host, self.user)
raise
except smtplib.SMTPException:
self._logSys.error(
"Error sending mail to host '%s' from '%s' to '%s'",
self.host, self.fromaddr, self.toaddr)
raise
else:
if failed_recipients:
self._logSys.warning(
"Email to '%s' failed to following recipients: %r",
self.toaddr, failed_recipients)
self._logSys.debug("Email '%s' successfully sent", subject)
finally:
try:
self._logSys.debug("Disconnected from '%s', response %i: %s",
self.host, *smtp.quit())
except smtplib.SMTPServerDisconnected:
pass # Not connected
def start(self):
"""Sends email to recipients informing that the jail has started.
"""
self._sendMessage(
"[Fail2Ban] %(jailname)s: started on %(hostname)s" %
self.message_values,
messages['start'] % self.message_values)
def stop(self):
"""Sends email to recipients informing that the jail has stopped.
"""
self._sendMessage(
"[Fail2Ban] %(jailname)s: stopped on %(hostname)s" %
self.message_values,
messages['stop'] % self.message_values)
def ban(self, aInfo):
"""Sends email to recipients informing that ban has occurred.
Parameters
----------
aInfo : dict
Dictionary which includes information in relation to
the ban.
"""
aInfo.update(self.message_values)
message = "".join([
messages['ban']['head'],
messages['ban'].get(self.matches, ""),
messages['ban']['tail']
])
self._sendMessage(
"[Fail2Ban] %(jailname)s: banned %(ip)s from %(hostname)s" %
aInfo,
message % aInfo)
Action = SMTPAction
| gpl-2.0 | 407,124,162,790,699,600 | 25.875 | 81 | 0.683887 | false |
OCA/event | website_event_questions_template/models/event_question_template.py | 1 | 1480 | # Copyright 2017 Tecnativa - Sergio Teruel
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).
from odoo import fields, models
class TemplateEventQuestion(models.Model):
_name = 'event.question.template'
_description = 'Event questions template'
name = fields.Char(required=True)
question_ids = fields.One2many(
comodel_name='event.question.template.question',
inverse_name='template_id',
required=True,
string='Questions',
)
class EventQuestionTemplateQuestion(models.Model):
_inherit = 'event.question'
_name = 'event.question.template.question'
_description = 'Questions for event template'
# Field not required for a template
event_id = fields.Many2one(required=False)
answer_ids = fields.One2many(
comodel_name='event.question.template.answer',
inverse_name='question_id',
string="Answers",
required=True,
)
template_id = fields.Many2one(
comodel_name='event.question.template',
string='Event Question Template',
required=True,
ondelete='cascade',
)
class EventQuestionTemplateAnswer(models.Model):
_inherit = 'event.answer'
_name = 'event.question.template.answer'
_description = 'Answers for question template'
_order = 'sequence,id'
question_id = fields.Many2one(
comodel_name='event.question.template.question',
required=True,
ondelete='cascade',
)
| agpl-3.0 | -1,958,689,535,332,021,500 | 28.019608 | 63 | 0.666892 | false |
jucacrispim/toxicbuild | tests/unit/master/test_slave.py | 1 | 29183 | # -*- coding: utf-8 -*-
# Copyright 2016-2019 Juca Crispim <[email protected]>
# This file is part of toxicbuild.
# toxicbuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# toxicbuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with toxicbuild. If not, see <http://www.gnu.org/licenses/>.
import asyncio
import datetime
from unittest import TestCase
from unittest.mock import Mock, MagicMock, patch
from uuid import uuid4
from toxicbuild.core.utils import datetime2string
from toxicbuild.master import slave, build, repository, users
from tests import async_test, AsyncMagicMock
@patch.object(slave, 'build_started', Mock())
@patch.object(slave, 'build_finished', Mock())
@patch.object(slave, 'step_started', Mock())
@patch.object(slave, 'step_finished', Mock())
@patch.object(slave, 'step_output_arrived', Mock())
class SlaveTest(TestCase):
@async_test
async def setUp(self):
super().setUp()
self.owner = users.User(email='[email protected]', password='adsf')
await self.owner.save()
self.slave = slave.Slave(name='slave', host='127.0.0.1', port=7777,
token='asdf', owner=self.owner)
@async_test
async def tearDown(self):
await slave.Slave.drop_collection()
await build.BuildSet.drop_collection()
await build.Builder.drop_collection()
await repository.RepositoryRevision.drop_collection()
await repository.Repository.drop_collection()
await users.User.drop_collection()
super().tearDown()
@async_test
async def test_create(self):
slave_inst = await slave.Slave.create(name='name',
host='somewhere.net',
port=7777,
token='asdf',
owner=self.owner)
self.assertTrue(slave_inst.id)
@async_test
async def test_to_dict(self):
slave_inst = await slave.Slave.create(name='name',
host='somewhere.net',
port=7777,
token='asdf',
owner=self.owner)
slave_dict = slave_inst.to_dict()
self.assertTrue(slave_dict['id'])
self.assertTrue(slave_dict['full_name'])
@async_test
async def test_to_dict_id_as_str(self):
slave_inst = await slave.Slave.create(name='name',
host='somewhere.net',
port=7777,
token='asdf',
owner=self.owner)
slave_dict = slave_inst.to_dict(id_as_str=True)
self.assertIsInstance(slave_dict['id'], str)
@async_test
async def test_get(self):
slave_inst = await slave.Slave.create(name='name',
host='somewhere.net',
port=7777,
token='asdf',
owner=self.owner)
slave_id = slave_inst.id
slave_inst = await slave.Slave.get(name='name',
host='somewhere.net',
port=7777)
self.assertEqual(slave_id, slave_inst.id)
@patch('toxicbuild.master.client.BuildClient.connect',
AsyncMagicMock(spec='toxicbuild.master.client.BuildClient.connect'))
@async_test
async def test_get_client(self, *a, **kw):
client = await self.slave.get_client()
self.assertTrue(client.connect.called)
@async_test
async def test_healthcheck(self):
async def gc():
client = MagicMock()
async def hc(): # x no pé!
return True
client.__enter__.return_value.healthcheck = hc
return client
self.slave.get_client = gc
r = await self.slave.healthcheck()
self.assertTrue(r)
@patch.object(slave.asyncio, 'sleep', AsyncMagicMock())
@patch.object(slave.Slave, 'healthcheck', AsyncMagicMock(
side_effect=ConnectionRefusedError))
@async_test
async def test_wait_service_start_timeout(self):
with self.assertRaises(TimeoutError):
await self.slave.wait_service_start()
@patch.object(slave.Slave, 'healthcheck', AsyncMagicMock())
@async_test
async def test_wait_service_start(self):
r = await self.slave.wait_service_start()
self.assertIs(r, True)
@patch.object(slave.Slave, 'healthcheck',
AsyncMagicMock(side_effect=slave.ToxicClientException))
@async_test
async def test_wait_service_client_exception(self):
with self.assertRaises(slave.ToxicClientException):
await self.slave.wait_service_start()
@patch.object(build.BuildSet, 'notify', AsyncMagicMock(
spec=build.BuildSet.notify))
@async_test
async def test_list_builders(self):
await self._create_test_data()
async def gc():
client = MagicMock()
async def lb(repo_url, vcs_type, branch, named_tree):
return ['builder-1', 'builder-2']
client.__enter__.return_value.list_builders = lb
return client
self.slave.get_client = gc
builders = await self.slave.list_builders(self.revision)
self.assertEqual(builders, [self.builder, self.other_builder])
@patch.object(build.BuildSet, 'notify', AsyncMagicMock(
spec=build.BuildSet.notify))
@async_test
async def test_finish_build_start_exception(self):
await self._create_test_data()
await self.slave._finish_build_start_exception(
self.build, self.repo, '')
self.assertEqual(self.build.status, 'exception')
@patch.object(build.BuildSet, 'notify', AsyncMagicMock(
spec=build.BuildSet.notify))
@async_test
async def test_build_bad_start(self):
await self._create_test_data()
self.slave.start_instance = AsyncMagicMock(side_effect=Exception)
r = await self.slave.build(self.build)
self.assertIs(r, False)
@patch.object(build.BuildSet, 'notify', AsyncMagicMock(
spec=build.BuildSet.notify))
@async_test
async def test_build(self):
await self._create_test_data()
client = MagicMock()
async def gc():
async def b(build, envvars, process_coro):
client.build()
return []
client.__enter__.return_value.build = b
return client
self.slave.get_client = gc
await self.slave.build(self.build)
self.assertTrue(client.build.called)
@patch.object(build.BuildSet, 'notify', AsyncMagicMock(
spec=build.BuildSet.notify))
@async_test
async def test_build_with_exception(self):
await self._create_test_data()
client = MagicMock()
async def gc():
async def b(build, envvars, process_coro):
raise slave.ToxicClientException
client.__enter__.return_value.build = b
return client
self.slave.get_client = gc
build_info = await self.slave.build(self.build)
self.assertEqual(self.build.status, 'exception')
self.assertTrue(self.build.finished)
self.assertEqual(len(build_info['steps']), 1)
@patch.object(build.BuildSet, 'notify', AsyncMagicMock(
spec=build.BuildSet.notify))
@patch.object(slave, 'build_started', Mock())
@patch.object(slave.notifications, 'publish', AsyncMagicMock(
spec=slave.notifications.publish))
@async_test
async def test_process_info_with_build_started(self):
await self._create_test_data()
tz = datetime.timezone(-datetime.timedelta(hours=3))
now = datetime2string(datetime.datetime.now(tz=tz))
build_info = {'status': 'running', 'steps': [],
'started': now, 'finished': None,
'info_type': 'build_info'}
await self.slave._process_info(self.build, self.repo, build_info)
self.assertTrue(slave.build_started.send.called)
self.assertTrue(slave.notifications.publish.called)
@patch.object(build.BuildSet, 'notify', AsyncMagicMock(
spec=build.BuildSet.notify))
@patch.object(slave, 'build_finished', Mock())
@patch.object(slave.notifications, 'publish', AsyncMagicMock(
spec=slave.notifications.publish))
@async_test
async def test_process_info_with_build_finished(self):
await self._create_test_data()
tz = datetime.timezone(-datetime.timedelta(hours=3))
now = datetime.datetime.now(tz=tz)
formate_now = datetime2string(now)
future_now = now + datetime.timedelta(seconds=2)
future_formated_now = datetime2string(future_now)
self.build.steps = [
build.BuildStep(repository=self.repo, command='ls', name='ls')]
build_info = {
'status': 'running', 'steps': [
{'status': 'success',
'finished': future_formated_now}],
'started': formate_now, 'finished': future_formated_now,
'info_type': 'build_info',
'total_time': 2}
await self.slave._process_info(self.build, self.repo, build_info)
self.assertEqual(self.build.total_time, 2)
self.assertTrue(slave.build_finished.send.called)
self.assertTrue(slave.notifications.publish.called)
@patch.object(build.BuildSet, 'notify', AsyncMagicMock(
spec=build.BuildSet.notify))
@async_test
async def test_process_info_with_step(self):
await self._create_test_data()
tz = datetime.timezone(-datetime.timedelta(hours=3))
now = datetime.datetime.now(tz=tz)
build_info = {'status': 'running', 'cmd': 'ls', 'name': 'ls',
'started': now, 'finished': None, 'output': '',
'index': 0, 'info_type': 'step_info'}
process_step_info = MagicMock(spec=self.slave._process_step_info)
self.slave._process_step_info = asyncio.coroutine(
lambda *a, **kw: process_step_info())
await self.slave._process_info(self.build, self.repo, build_info)
self.assertTrue(process_step_info.called)
@patch.object(build.BuildSet, 'notify', AsyncMagicMock(
spec=build.BuildSet.notify))
@async_test
async def test_process_info_with_step_output(self):
await self._create_test_data()
info = {'info_type': 'step_output_info'}
process_step_info = MagicMock(spec=self.slave._process_step_info)
self.slave._process_step_output_info = asyncio.coroutine(
lambda *a, **kw: process_step_info())
await self.slave._process_info(self.build, self.repo, info)
self.assertTrue(process_step_info.called)
@patch.object(slave.notifications, 'publish', AsyncMagicMock(
spec=slave.notifications.publish))
@async_test
async def test_process_step_info_new(self):
await self._create_test_data()
tz = datetime.timezone(-datetime.timedelta(hours=3))
now = datetime.datetime.now(tz=tz)
started = now.strftime('%w %m %d %H:%M:%S %Y %z')
finished = None
step_info = {'status': 'running', 'cmd': 'ls', 'name': 'run ls',
'output': '', 'started': started, 'finished': finished,
'index': 0, 'uuid': uuid4()}
await self.slave._process_step_info(self.build, self.repo, step_info)
self.assertEqual(len(self.build.steps), 1)
self.assertTrue(slave.notifications.publish.called)
@patch.object(slave.notifications, 'publish', AsyncMagicMock(
spec=slave.notifications.publish))
@async_test
async def test_process_step_info(self):
await self._create_test_data()
tz = datetime.timezone(-datetime.timedelta(hours=3))
now = datetime.datetime.now(tz=tz)
started = now.strftime('%w %m %d %H:%M:%S %Y %z')
finished = (now + datetime.timedelta(seconds=2)).strftime(
'%w %m %d %H:%M:%S %Y %z')
a_uuid = str(uuid4())
other_uuid = str(uuid4())
info = {'cmd': 'ls', 'name': 'run ls', 'status': 'running',
'output': '', 'started': started, 'finished': None,
'index': 0, 'uuid': a_uuid}
await self.slave._process_step_info(self.build, self.repo, info)
info = {'cmd': 'echo "oi"', 'name': 'echo', 'status': 'running',
'output': '', 'started': started, 'finished': None,
'index': 1, 'uuid': other_uuid}
await self.slave._process_step_info(self.build, self.repo, info)
info = {'cmd': 'echo "oi"', 'name': 'echo', 'status': 'success',
'output': '', 'started': started, 'finished': finished,
'index': 1, 'uuid': other_uuid, 'total_time': 2}
await self.slave._process_step_info(self.build, self.repo, info)
info = {'cmd': 'ls', 'name': 'run ls', 'status': 'success',
'output': 'somefile.txt\n', 'started': started,
'finished': finished, 'total_time': 2,
'index': 0, 'uuid': a_uuid}
await self.slave._process_step_info(self.build, self.repo, info)
build = await type(self.build).get(self.build.uuid)
self.assertEqual(build.steps[1].status, 'success')
self.assertEqual(len(build.steps), 2)
self.assertTrue(build.steps[1].total_time)
self.assertTrue(slave.notifications.publish.called)
@patch.object(slave.notifications, 'publish', AsyncMagicMock(
spec=slave.notifications.publish))
@async_test
async def test_process_step_info_exception(self):
await self._create_test_data()
tz = datetime.timezone(-datetime.timedelta(hours=3))
now = datetime.datetime.now(tz=tz)
started = now.strftime('%w %m %d %H:%M:%S %Y %z')
finished = (now + datetime.timedelta(seconds=2)).strftime(
'%w %m %d %H:%M:%S %Y %z')
a_uuid = str(uuid4())
info = {'cmd': 'ls', 'name': 'run ls', 'status': 'running',
'output': 'some-output', 'started': started, 'finished': None,
'index': 0, 'uuid': a_uuid}
await self.slave._process_step_info(self.build, self.repo, info)
info = {'cmd': 'ls', 'name': 'run ls', 'status': 'exception',
'output': 'shit happens', 'started': started,
'finished': finished, 'total_time': 2,
'index': 0, 'uuid': a_uuid}
await self.slave._process_step_info(self.build, self.repo, info)
build = await type(self.build).get(self.build.uuid)
self.assertEqual(build.steps[0].status, 'exception')
self.assertEqual(build.steps[0].output, 'some-outputshit happens')
@patch.object(slave.notifications, 'publish', AsyncMagicMock(
spec=slave.notifications.publish))
@async_test
async def test_process_step_info_exception_no_output(self):
await self._create_test_data()
tz = datetime.timezone(-datetime.timedelta(hours=3))
now = datetime.datetime.now(tz=tz)
started = now.strftime('%w %m %d %H:%M:%S %Y %z')
finished = (now + datetime.timedelta(seconds=2)).strftime(
'%w %m %d %H:%M:%S %Y %z')
a_uuid = str(uuid4())
info = {'cmd': 'ls', 'name': 'run ls', 'status': 'running',
'output': None, 'started': started, 'finished': None,
'index': 0, 'uuid': a_uuid}
await self.slave._process_step_info(self.build, self.repo, info)
info = {'cmd': 'ls', 'name': 'run ls', 'status': 'exception',
'output': 'shit happens', 'started': started,
'finished': finished, 'total_time': 2,
'index': 0, 'uuid': a_uuid}
await self.slave._process_step_info(self.build, self.repo, info)
build = await type(self.build).get(self.build.uuid)
self.assertEqual(build.steps[0].status, 'exception')
self.assertEqual(build.steps[0].output, 'shit happens')
@patch.object(slave.notifications, 'publish', AsyncMagicMock(
spec=slave.notifications.publish))
@async_test
async def test_update_build_step_less_than_cache(self):
build = Mock()
step_info = {'uuid': 'some-uuid', 'output': 'bla'}
r = await self.slave._update_build_step_info(build, step_info)
self.assertFalse(r)
@patch.object(slave.notifications, 'publish', AsyncMagicMock(
spec=slave.notifications.publish))
@async_test
async def test_update_build_step_already_updating(self):
self.slave._step_output_cache_time['some-uuid'] = 10
build = Mock()
step_info = {'uuid': 'some-uuid', 'output': 'bla'}
self.slave._step_output_is_updating['some-uuid'] = True
r = await self.slave._update_build_step_info(build, step_info)
self.assertFalse(r)
@patch.object(slave.notifications, 'publish', AsyncMagicMock(
spec=slave.notifications.publish))
@async_test
async def test_process_step_output_info(self):
await self._create_test_data()
tz = datetime.timezone(-datetime.timedelta(hours=3))
now = datetime.datetime.now(tz=tz)
started = now.strftime('%w %m %d %H:%M:%S %Y %z')
a_uuid = str(uuid4())
self.slave._step_output_cache_time[a_uuid] = 10
info = {'cmd': 'ls', 'name': 'run ls', 'status': 'running',
'output': '', 'started': started, 'finished': None,
'index': 0, 'uuid': a_uuid}
await self.slave._process_step_info(self.build, self.repo, info)
info = {'uuid': a_uuid, 'output': 'somefile.txt\n'}
await self.slave._process_step_output_info(self.build, self.repo, info)
step = await self.slave._get_step(self.build, a_uuid)
self.assertTrue(step.output)
self.assertTrue(slave.notifications.publish.called)
self.assertFalse(self.slave._step_output_is_updating[a_uuid])
@patch.object(slave.notifications, 'publish', AsyncMagicMock(
spec=slave.notifications.publish))
@async_test
async def test_process_step_output_info_step_finished(self):
await self._create_test_data()
tz = datetime.timezone(-datetime.timedelta(hours=3))
now = datetime.datetime.now(tz=tz)
started = now.strftime('%w %m %d %H:%M:%S %Y %z')
a_uuid = str(uuid4())
info = {'cmd': 'ls', 'name': 'run ls', 'status': 'running',
'output': '', 'started': started, 'finished': None,
'index': 0, 'uuid': a_uuid}
self.slave._step_output_cache_time[a_uuid] = 10
self.slave._step_finished[a_uuid] = True
await self.slave._process_step_info(self.build, self.repo, info)
info = {'uuid': a_uuid, 'output': 'somefile.txt\n'}
slave.notifications.publish = AsyncMagicMock()
await self.slave._process_step_output_info(self.build, self.repo, info)
self.assertFalse(slave.notifications.publish.called)
@patch.object(slave.notifications, 'publish', AsyncMagicMock(
spec=slave.notifications.publish))
@async_test
async def test_get_step_wait(self):
await self._create_test_data()
build = self.buildset.builds[0]
step = await self.slave._get_step(build, 'dont-exist', wait=True)
self.assertIsNone(step)
@patch.object(slave.notifications, 'publish', AsyncMagicMock(
spec=slave.notifications.publish))
@async_test
async def test_fix_last_step_output(self):
await self._create_test_data()
tz = datetime.timezone(-datetime.timedelta(hours=3))
now = datetime.datetime.now(tz=tz)
started = now.strftime('%w %m %d %H:%M:%S %Y %z')
a_uuid = str(uuid4())
other_uuid = str(uuid4())
info = {'cmd': 'ls', 'name': 'run ls', 'status': 'running',
'output': '', 'started': started, 'finished': None,
'index': 0, 'uuid': a_uuid,
'last_step_status': None,
'last_step_finished': None}
await self.slave._process_step_info(self.build, self.repo, info)
info = {'cmd': 'echo "oi"', 'name': 'echo', 'status': 'running',
'output': '', 'started': started, 'finished': None,
'index': 1, 'uuid': other_uuid,
'last_step_status': 'success',
'last_step_finished': started}
await self.slave._process_step_info(self.build, self.repo, info)
b = await build.Build.get(self.build.uuid)
for step in b.steps:
if str(step.uuid) == a_uuid:
break
self.assertEqual(step.status, 'success')
@patch('toxicbuild.master.aws.settings')
def test_instance(self, *a, **kw):
self.slave.instance_type = 'ec2'
self.slave.instance_confs = {'instance_id': 'some-id',
'region': 'us-east-2'}
self.assertIsInstance(self.slave.instance, slave.EC2Instance)
@async_test
async def test_start_instance_not_on_demand(self):
self.slave.on_demand = False
r = await self.slave.start_instance()
self.assertFalse(r)
@patch.object(slave.EC2Instance, 'start', AsyncMagicMock())
@patch.object(slave.EC2Instance, 'is_running', AsyncMagicMock(
return_value=True))
@patch.object(slave.EC2Instance, 'get_ip', AsyncMagicMock(
return_value='192.168.0.1'))
@patch.object(slave.Slave, 'wait_service_start', AsyncMagicMock())
@patch('toxicbuild.master.aws.settings')
@async_test
async def test_start_instance_already_running(self, *a, **kw):
self.slave.on_demand = True
self.slave.instance_type = 'ec2'
self.slave.instance_confs = {'instance_id': 'some-id',
'region': 'us-east-2'}
r = await self.slave.start_instance()
self.assertEqual(r, '192.168.0.1')
self.assertFalse(slave.EC2Instance.start.called)
@patch.object(slave.EC2Instance, 'is_running', AsyncMagicMock(
return_value=False))
@patch.object(slave.EC2Instance, 'start', AsyncMagicMock())
@patch.object(slave.EC2Instance, 'get_ip', AsyncMagicMock(
return_value='192.168.0.1'))
@patch.object(slave.Slave, 'wait_service_start', AsyncMagicMock())
@patch('toxicbuild.master.aws.settings')
@async_test
async def test_start_instance_ok(self, *a, **kw):
self.slave.on_demand = True
self.slave.host = slave.Slave.DYNAMIC_HOST
self.slave.instance_type = 'ec2'
self.slave.instance_confs = {'instance_id': 'some-id',
'region': 'us-east-2'}
await self.slave.start_instance()
self.assertEqual(self.slave.host, '192.168.0.1')
@async_test
async def test_stop_instance_not_on_demand(self):
self.slave.on_demand = False
r = await self.slave.stop_instance()
self.assertFalse(r)
@patch.object(slave.EC2Instance, 'is_running', AsyncMagicMock(
return_value=False))
@patch('toxicbuild.master.aws.settings')
@async_test
async def test_stop_instance_already_stopped(self, *a, **kw):
self.slave.on_demand = True
self.slave.instance_type = 'ec2'
self.slave.instance_confs = {'instance_id': 'some-id',
'region': 'us-east-2'}
r = await self.slave.stop_instance()
self.assertFalse(r)
self.assertTrue(slave.EC2Instance.is_running.called)
@async_test
async def test_stop_instance_with_queue(self):
self.slave.on_demand = True
self.slave.queue_count = 1
r = await self.slave.stop_instance()
self.assertFalse(r)
@async_test
async def test_stop_instance_with_running(self):
self.slave.on_demand = True
self.slave.running_count = 1
r = await self.slave.stop_instance()
self.assertFalse(r)
@patch.object(slave.EC2Instance, 'is_running', AsyncMagicMock(
return_value=True))
@patch.object(slave.EC2Instance, 'stop', AsyncMagicMock())
@patch('toxicbuild.master.aws.settings')
@async_test
async def test_stop_instance_ok(self, *a, **kw):
self.slave.on_demand = True
self.slave.instance_type = 'ec2'
self.slave.instance_confs = {'instance_id': 'some-id',
'region': 'us-east-2'}
r = await self.slave.stop_instance()
self.assertTrue(r)
@async_test
async def test_save_dynamic_host(self):
self.slave.on_demand = True
self.slave.host = None
await self.slave.save()
self.assertEqual(self.slave.host, self.slave.DYNAMIC_HOST)
@async_test
async def test_add_running_repo(self):
await self.slave.save()
self.slave.host = 'a-host-that-shouldnt-be'
await self.slave.add_running_repo('some-repo')
await self.slave.reload()
self.assertTrue(self.slave.running_repos)
self.assertTrue(self.slave.running_count)
self.assertFalse(self.slave.host == 'a-host-that-shouldnt-be')
@async_test
async def test_rm_running_repo(self):
await self.slave.save()
self.slave.host = 'a-host-that-shouldnt-be'
await self.slave.add_running_repo('some-repo')
await self.slave.rm_running_repo('some-repo')
await self.slave.reload()
self.assertFalse(self.slave.running_repos)
self.assertFalse(self.slave.running_count)
self.assertFalse(self.slave.host == 'a-host-that-shouldnt-be')
@async_test
async def test_enqueue_build(self):
await self.slave.save()
build = Mock(uuid='asdf')
r = await self.slave.enqueue_build(build)
await self.slave.reload()
self.assertTrue(r)
self.assertEqual(len(self.slave.enqueued_builds), 1)
self.assertEqual(self.slave.queue_count, 1)
@async_test
async def test_enqueue_build_already_enqueued(self):
await self.slave.save()
build = Mock(uuid='asdf')
await self.slave.enqueue_build(build)
await self.slave.reload()
r = await self.slave.enqueue_build(build)
self.assertFalse(r)
self.assertEqual(len(self.slave.enqueued_builds), 1)
self.assertEqual(self.slave.queue_count, 1)
@async_test
async def test_dequeue_build(self):
await self.slave.save()
build = Mock(uuid='asdf')
await self.slave.enqueue_build(build)
r = await self.slave.dequeue_build(build)
self.assertTrue(r)
self.assertEqual(len(self.slave.enqueued_builds), 0)
self.assertEqual(self.slave.queue_count, 0)
@async_test
async def test_dequeue_build_not_enqueued(self):
await self.slave.save()
build = Mock(uuid='asdf')
r = await self.slave.dequeue_build(build)
self.assertFalse(r)
self.assertEqual(len(self.slave.enqueued_builds), 0)
self.assertEqual(self.slave.queue_count, 0)
async def _create_test_data(self):
await self.slave.save()
self.repo = repository.Repository(
name='reponame', url='git@somewhere', update_seconds=300,
vcs_type='git', slaves=[self.slave], owner=self.owner)
await self.repo.save()
self.revision = repository.RepositoryRevision(
repository=self.repo, branch='master', commit='bgcdf3123',
commit_date=datetime.datetime.now(),
author='tião', title='something'
)
await self.revision.save()
self.buildset = await build.BuildSet.create(
repository=self.repo, revision=self.revision)
await self.buildset.save()
self.builder = build.Builder(repository=self.repo, name='builder-1')
await self.builder.save()
self.other_builder = build.Builder(repository=self.repo,
name='builder-2')
await self.other_builder.save()
await self.builder.save()
self.build = build.Build(repository=self.repo, slave=self.slave,
branch='master', named_tree='v0.1',
builder=self.builder)
self.buildset.builds.append(self.build)
await self.buildset.save()
| agpl-3.0 | 1,354,189,133,162,162,000 | 38.648098 | 79 | 0.595867 | false |
prismskylabs/pycounters | example/munin_plugin.py | 1 | 1576 | #!/usr/bin/python
from pycounters.utils.munin import Plugin
config = [
{
"id" : "requests_per_sec",
"global" : {
# graph global options: http://munin-monitoring.org/wiki/protocol-config
"title" : "Request Frequency",
"category" : "PyCounters example"
},
"data" : [
{
"counter" : "requests_frequency",
"label" : "requests per second",
"draw" : "LINE2",
}
]
},
{
"id" : "requests_time",
"global" : {
# graph global options: http://munin-monitoring.org/wiki/protocol-config
"title" : "Request Average Handling Time",
"category" : "PyCounters example"
},
"data" : [
{
"counter" : "requests_time",
"label" : "Average time per request",
"draw" : "LINE2",
}
]
},
{
"id" : "requests_total_data",
"global" : {
# graph global options: http://munin-monitoring.org/wiki/protocol-config
"title" : "Total data processed",
"category" : "PyCounters example"
},
"data" : [
{
"counter" : "requests_data_len",
"label" : "total bytes",
"draw" : "LINE2",
}
]
}
]
p = Plugin("/tmp/server.counters.json",config) # initialize the plugin
p.process_cmd() # process munin command and output requested data or config | apache-2.0 | -5,098,438,413,878,477,000 | 27.160714 | 84 | 0.460025 | false |
graphql-python/graphene | graphene/types/interface.py | 1 | 2228 | from .base import BaseOptions, BaseType
from .field import Field
from .utils import yank_fields_from_attrs
# For static type checking with Mypy
MYPY = False
if MYPY:
from typing import Dict # NOQA
class InterfaceOptions(BaseOptions):
fields = None # type: Dict[str, Field]
class Interface(BaseType):
"""
Interface Type Definition
When a field can return one of a heterogeneous set of types, a Interface type
is used to describe what types are possible, what fields are in common across
all types, as well as a function to determine which type is actually used
when the field is resolved.
.. code:: python
from graphene import Interface, String
class HasAddress(Interface):
class Meta:
description = "Address fields"
address1 = String()
address2 = String()
If a field returns an Interface Type, the ambiguous type of the object can be determined using
``resolve_type`` on Interface and an ObjectType with ``Meta.possible_types`` or ``is_type_of``.
Meta:
name (str): Name of the GraphQL type (must be unique in schema). Defaults to class
name.
description (str): Description of the GraphQL type in the schema. Defaults to class
docstring.
fields (Dict[str, graphene.Field]): Dictionary of field name to Field. Not recommended to
use (prefer class attributes).
"""
@classmethod
def __init_subclass_with_meta__(cls, _meta=None, **options):
if not _meta:
_meta = InterfaceOptions(cls)
fields = {}
for base in reversed(cls.__mro__):
fields.update(yank_fields_from_attrs(base.__dict__, _as=Field))
if _meta.fields:
_meta.fields.update(fields)
else:
_meta.fields = fields
super(Interface, cls).__init_subclass_with_meta__(_meta=_meta, **options)
@classmethod
def resolve_type(cls, instance, info):
from .objecttype import ObjectType
if isinstance(instance, ObjectType):
return type(instance)
def __init__(self, *args, **kwargs):
raise Exception("An Interface cannot be initialized")
| mit | -3,083,070,874,993,569,000 | 30.380282 | 99 | 0.639587 | false |
digitie/magneto | db/schema.py | 1 | 26888 | # -*- coding: utf-8 -*-
# to workaround sqlalchemy's get_characterset_info bug, which only applies to py2k.
#import mysql.connector
#mysql.connector.MySQLConnection.get_characterset_info=lambda cls:cls.charset
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.schema import DDLElement
from sqlalchemy.sql import table
from sqlalchemy.ext import compiler
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy import event
from sqlalchemy.ext.hybrid import hybrid_property
#import pystaggrelite3
Base = declarative_base()
meta = MetaData()
class CreateView(DDLElement):
def __init__(self, name, selectable):
self.name = name
self.selectable = selectable
class DropView(DDLElement):
def __init__(self, name):
self.name = name
@compiler.compiles(CreateView)
def compile(element, compiler, **kw):
return "CREATE VIEW %s AS %s" % (element.name, compiler.sql_compiler.process(element.selectable))
@compiler.compiles(DropView)
def compile(element, compiler, **kw):
return "DROP VIEW %s" % (element.name)
def view(name, metadata, selectable):
t = table(name)
for c in selectable.c:
c._make_proxy(t)
CreateView(name, selectable).execute_at('after-create', metadata)
DropView(name).execute_at('before-drop', metadata)
return t
def on_connect(dbapi_conn, connection_rec):
'''
dbapi_conn.create_aggregate("stdev", 1, pystaggrelite3.stdev)
dbapi_conn.create_aggregate("stdevp", 1, pystaggrelite3.stdevp)
dbapi_conn.create_aggregate("var", 1, pystaggrelite3.var)
dbapi_conn.create_aggregate("varp", 1, pystaggrelite3.varp)
dbapi_conn.create_aggregate("median", 1, pystaggrelite3.median)
for (name,arity,func) in pystaggrelite3.getaggregators():
dbapi_conn.create_aggregate(name,arity,func)
'''
pass
'''
class ExpDCCoilProp(Base):
__tablename__ = 'exp_dc_coil_prop'
id = Column(Integer, primary_key=True)
exp = relationship("Exp", backref="dc_coil")
radius = Column(Float, nullable=False)
wire_diameter = Column(Float, nullable=False)
turn = Column(Float, nullable=False)
comment = Column(String(1000))
def __init__(self, \
radius, \
wire_diameter, \
turn, \
comment = None):
self.radius = radius
self.wire_diameter = wire_diameter
self.turn = turn
self.comment = comment
'''
exp_smith_stats = Table('exp_smith_stats',meta,
Column('exp_id', Integer, ForeignKey('exp.id'), nullable=False, primary_key=True),
Column('max_imp_re', Float, nullable=True), #1
Column('max_adj_imp_re', Float, nullable=True), #1
Column('max_imp_re_freq', Float, nullable=True), #1
Column('max_imp_im', Float, nullable=True), #1
Column('max_imp_im_freq', Float, nullable=True), #1
Column('max_imp_mag', Float, nullable=True), #1
Column('max_adj_imp_mag', Float, nullable=True), #1
Column('max_imp_mag_freq', Float, nullable=True), #1
Column('max_adm_re', Float, nullable=True), #1
Column('max_adm_re_freq', Float, nullable=True), #1
Column('max_adm_im', Float, nullable=True), #1
Column('max_adm_im_freq', Float, nullable=True), #1
Column('max_adm_mag', Float, nullable=True), #1
Column('max_adm_mag_freq', Float, nullable=True), #1
Column('imp_q_freq0', Float, nullable=True), #1
Column('imp_q_freq1', Float, nullable=True), #1
Column('adj_imp_q_freq0', Float, nullable=True), #1
Column('adj_imp_q_freq1', Float, nullable=True), #1
Column('res_q_freq0', Float, nullable=True), #1
Column('res_q_freq1', Float, nullable=True), #1
Column('adj_res_q_freq0', Float, nullable=True), #1
Column('adj_res_q_freq1', Float, nullable=True), #1
Column('adm_q_freq0', Float, nullable=True), #1
Column('adm_q_freq1', Float, nullable=True), #1
Column('max_imp_parallel_ind', Float, nullable=True), #1
)
class ExpSmithStats(Base):
__table__ = exp_smith_stats
def __init__(self, \
exp_id, \
max_imp_re = None, \
max_adj_imp_re = None, \
max_imp_re_freq = None, \
max_imp_im = None, \
max_imp_im_freq = None, \
max_imp_mag = None, \
max_adj_imp_mag = None, \
max_imp_mag_freq = None, \
max_adm_re = None, \
max_adm_re_freq = None, \
max_adm_im = None, \
max_adm_im_freq = None, \
max_adm_mag = None, \
max_adm_mag_freq = None,\
imp_q_freq0 = None, \
imp_q_freq1 = None, \
adj_imp_q_freq0 = None, \
adj_imp_q_freq1 = None, \
res_q_freq0 = None, \
res_q_freq1 = None, \
adj_res_q_freq0 = None, \
adj_res_q_freq1 = None, \
adm_q_freq0 = None, \
adm_q_freq1 = None, \
max_imp_parallel_ind = None):
self.exp_id = exp_id
self.max_imp_re = max_imp_re
self.max_adj_imp_re = max_adj_imp_re
self.max_imp_re_freq = max_imp_re_freq
self.max_imp_im = max_imp_im
self.max_imp_im_freq = max_imp_im_freq
self.max_imp_mag = max_imp_mag
self.max_adj_imp_mag = max_adj_imp_mag
self.max_imp_mag_freq = max_imp_mag_freq
self.max_adm_re = max_adm_re
self.max_adm_re_freq = max_adm_re_freq
self.max_adm_im = max_adm_im
self.max_adm_im_freq = max_adm_im_freq
self.max_adm_mag = max_adm_mag
self.max_adm_mag_freq = max_adm_mag_freq
self.imp_q_freq0 = imp_q_freq0
self.imp_q_freq1 = imp_q_freq1
self.adj_imp_q_freq0 = adj_imp_q_freq0
self.adj_imp_q_freq1 = adj_imp_q_freq1
self.res_q_freq0 = res_q_freq0
self.res_q_freq1 = res_q_freq1
self.adj_res_q_freq0 = adj_res_q_freq0
self.adj_res_q_freq1 = adj_res_q_freq1
self.adm_q_freq0 = adm_q_freq0
self.adm_q_freq1 = adm_q_freq1
self.max_imp_parallel_ind = max_imp_parallel_ind
def __repr__(self):
return "<ExpSmithStats(ExpId = %d, Max Re(z) = %f@%fHz, Max Im(z) = %f@%fHz,, Max Mag(z) = %f@%fHz,, Max Re(y) = %f@%fHz,, Max Im(y) = %f@%fHz,, Max Mag(y) = %f@%fHz,)>" % \
(self.exp_id, \
self.max_imp_re, self.max_imp_re_freq, \
self.max_imp_im, self.max_imp_im_freq, \
self.max_imp_mag, self.max_imp_mag_freq, \
self.max_adm_re, self.max_adm_re_freq, \
self.max_adm_im, self.max_adm_im_freq, \
self.max_adm_mag, self.max_adm_mag_freq)
exp_dc_coil_prop = Table('exp_dc_coil_prop',meta,
Column('id', Integer, primary_key=True),
Column('radius', Float, nullable=False), #1
Column('wire_diameter', Float, nullable=False), #1
Column('turn', Float, nullable=False), #1
Column('comment', String(1000)), #1
)
class ExpDCCoilProp(Base):
__table__ = exp_dc_coil_prop
def __init__(self, \
radius, \
wire_diameter, \
turn, \
comment = None):
self.radius = radius
self.wire_diameter = wire_diameter
self.turn = turn
self.comment = comment
def __repr__(self):
return "<ExpDCCoilProp(Rad = %f, Wire Dia = %f, turn = %d, %s)>" % \
(self.radius, self.wire_diameter, self.turn, self.comment)
exp_ac_coil_prop = Table('exp_ac_coil_prop',meta,
Column('id', Integer, primary_key=True),
Column('type', Integer, nullable=False), #1
Column('width', Float, nullable=False), #1
Column('height', Float, nullable=False), #1
Column('length', Float, nullable=False), #1
Column('wire_diameter', Float, nullable=False), #1
Column('turn', Integer, nullable=False), #1
Column('comment', String(1000)), #1
)
class ExpACCoilProp(Base):
__table__ = exp_ac_coil_prop
def __init__(self, type, width, height, length, wire_diameter, turn, comment = None):
self.type = type
self.width = width
self.height = height
self.length = length
self.wire_diameter = wire_diameter
self.turn = turn
self.comment = comment
def __repr__(self):
return "<ExpACCoilProp(%d, WxHxL = %fx%fx%f, Wire Dia = %d, turn = %d, %s)>" % \
(self.type, self.width, self.height, self.length, self.wire_diameter, self.turn, self.comment)
@property
def typeAsString(self):
if self.type == 1:
return "Circle"
elif self.type == 2:
return "Ellipse"
elif self.type == 3:
return "Square"
'''
class ExpACCoilProp(Base):
__tablename__ = 'exp_ac_coil_prop'
id = Column(Integer, primary_key=True)
exp = relationship("Exp", backref="ac_coil")
type = Column(Enum('','','','','',''), nullable=False)
width = Column(Float, nullable=False)
height = Column(Float, nullable=False)
length = Column(Float, nullable=False)
wire_diameter = Column(Float, nullable=False)
turn = Column(Integer, nullable=False)
comment = Column(String(1000))
def __init__(self, type, width, height, length, wire_diameter, turn, comment = None):
self.type = type
self.width = width
self.height = height
self.length = length
self.wire_diameter = wire_diameter
self.turn = turn
self.comment = comment
'''
exp_material_prop = Table('exp_material_prop',meta,
Column('id', Integer, primary_key=True),
Column('name', String(30), nullable=False), #1
Column('youngs_modulus', Float, nullable=False), #1
Column('density', Float, nullable=False), #1
Column('poissons_ratio', Float, nullable=False), #1
Column('shear_modulus', Float, nullable=False), #1
Column('comment', String(1000)), #1
)
class ExpMaterialProp(Base):
__table__ = exp_material_prop
def __init__(self, name, youngs_modulus, density, poissons_ratio, shear_modulus, comment = None):
self.name = name
self.youngs_modulus = youngs_modulus
self.density = density
self.poissons_ratio = poissons_ratio
self.shear_modulus = shear_modulus
self.comment = comment
def __repr__(self):
return "<ExpMaterialProp(%s, %f, %f, %f, %f, %s)>" % \
(self.name, self.youngs_modulus, self.density, self.poissons_ratio, self.shear_modulus, self.comment)
'''
class ExpMaterialProp(Base):
__tablename__ = 'exp_material_prop'
id = Column(Integer, primary_key=True)
name = Column(String(30), nullable=False)
youngs_modulus = Column(Float, nullable=False)
density = Column(Float, nullable=False)
poissons_ratio = Column(Float, nullable=False)
shear_modulus = Column(Float, nullable=False)
patch = relationship("ExpPatchProp", backref="material")
comment = Column(String(1000))
def __init__(self, name, youngs_modulus, density, poissons_ratio, shear_modulus, comment = None):
self.name = name
self.youngs_modulus = youngs_modulus
self.density = density
self.poissons_ratio = poissons_ratio
self.shear_modulus = shear_modulus
self.comment = comment
'''
exp_patch_prop = Table('exp_patch_prop',meta,
Column('id', Integer, primary_key=True),
Column('material_id',Integer, ForeignKey('exp_material_prop.id'), nullable=False),
Column('width', Float, nullable=False), #1
Column('height', Float, nullable=False), #1
Column('grain_orientation', String(20), nullable=False), #1
Column('comment', String(1000)), #1
)
class ExpPatchProp(Base):
__table__ = exp_patch_prop
def __init__(self, material_id, width, height, grain_orientation, comment = None):
self.material_id = material_id
self.width = width
self.height = height
self.grain_orientation = grain_orientation
self.comment = comment
@hybrid_property
def aspect_ratio(self):
return self.width / self.height
def __repr__(self):
return "<ExpPatchProp(%d, WxL = %fx%f, %s, %s)>" % \
(self.material_id, self.width, self.height, self.grain_orientation, self.comment)
exp_vis_prop = Table('exp_vis_prop',meta,
Column('id', Integer, primary_key=True),
Column('name', String(50), nullable=False), #1
Column('kinetic_vis', Float, nullable=True), #1
Column('density', Float, nullable=True), #1
Column('weight_percent', Float, nullable=True), #1
Column('comment', String(1000)), #1
)
class ExpVisProp(Base):
__table__ = exp_vis_prop
def __init__(self, name, kinetic_vis = None, density = None, weight_percent = None, comment = None):
self.name = name
self.kinetic_vis = kinetic_vis
self.density = density
self.weight_percent = weight_percent
self.comment = comment
def __repr__(self):
return "<ExpVisProp(name = %s, kv = %f, comment = %s)>" % \
(self.name, self.kinetic_vis, self.comment)
'''
class ExpPatchProp(Base):
__tablename__ = 'exp_patch_prop'
id = Column(Integer, primary_key=True)
material_id = Column(Integer, ForeignKey('material.id'))
width = Column(Float, nullable=False)
length = Column(Float, nullable=False)
grain_orientation = Column(Enum(u'Horizontally', u'Vertically', name = 'grain_orientation'), nullable=False)
exp = relationship("Exp", uselist=False, backref="patch")
comment = Column(String(1000))
def __init__(self, material_id, width, length, grain_orientation, comment = None):
self.material_id = material_id
self.width = width
self.length = length
self.grain_orientation = grain_orientation
self.comment = comment
@hybrid_property
def aspect_ratio(self):
return self.width / self.length
'''
exp = Table('exp',meta,
Column('id', Integer, primary_key=True),
Column('ac_coil_id', Integer, ForeignKey('exp_ac_coil_prop.id'), nullable=False),
Column('dc_coil_id', Integer, ForeignKey('exp_dc_coil_prop.id'), nullable=False),
Column('patch_id', Integer, ForeignKey('exp_patch_prop.id'), nullable=False),
Column('vis_id', Integer, ForeignKey('exp_vis_prop.id'), nullable=False),
Column('dc_current', Float(precision=3), nullable=False), #1
Column('dc_field', Float, nullable=False), #1
Column('temperature', Float, nullable=False), #1
Column('comment', String(1000)), #1
Column('exp_date', Date, nullable=False),
Column('patch_included', Enum(u'Y', u'N'), nullable=True, default = 'Y'),
Column('subtract_exp_id', Integer, ForeignKey('exp.id'), nullable=True),
)
class Exp(Base):
__table__ = exp
def __init__(self, ac_coil_id, dc_coil_id, patch_id, vis_id, exp_date, dc_current, dc_field, temperature, comment = None, patch_included = 'Y', subtract_exp_id = None):
self.ac_coil_id = ac_coil_id
self.dc_coil_id = dc_coil_id
self.patch_id = patch_id
self.vis_id = vis_id
self.exp_date = exp_date
self.dc_current = dc_current
self.dc_field = dc_field
self.temperature = temperature
self.comment = comment
self.patch_included = patch_included
self.subtract_exp_id = subtract_exp_id
def __repr__(self):
if self.id is not None:
return "<Exp(#%d, AC#%d, DC#%d, P#%d, %s, %f, %f, %s)>" % \
(self.id, self.ac_coil_id, self.dc_coil_id, self.patch_id, self.exp_date, self.dc_current, self.dc_field, self.comment)
return "<Exp(#AC#%d, DC#%d, P#%d, %s, %f, %f, %s)>" % \
(self.ac_coil_id, self.dc_coil_id, self.patch_id, self.exp_date, self.dc_current, self.dc_field, self.comment)
'''
class Exp(Base):
__tablename__ = 'exp'
id = Column(Integer, primary_key=True)
ac_coil_id = Column(Integer, ForeignKey('ac_coil.id'))
dc_coil_id = Column(Integer, ForeignKey('dc_coil.id'))
patch_id = Column(Integer, ForeignKey('patch.id'))
exp_date = Column(Date, nullable=False)
dc_current = Column(Integer, nullable=False)
dc_field = Column(Integer, nullable=False)
comment = Column(String(1000))
exp_vna = relationship("ExpVNA", uselist=False, backref="exp", cascade="all, delete-orphan")
exp_smith = relationship("ExpSmith", backref="exp", cascade="all, delete-orphan")
def __init__(self, ac_coil_id, dc_coil_id, patch_id, exp_date, dc_current, dc_field, comment = None):
self.ac_coil_id = ac_coil_id
self.dc_coil_id = dc_coil_id
self.patch_id = patch_id
self.exp_date = exp_date
self.dc_current = dc_current
self.dc_field = dc_field
self.comment = comment
'''
exp_vna = Table('exp_vna',meta,
Column('id', Integer, primary_key=True),
Column('exp_id',Integer, ForeignKey('exp.id'), nullable=False),
Column('if_bandwidth', Float, nullable=False), #1
Column('number_of_points', Integer, nullable=False), #1
Column('format_type', String(40), nullable=False), #1
Column('sweep_type', String(40), nullable=False), #1
Column('channel', Integer, nullable=False), #1
Column('source_power', Float, nullable=False), #1
Column('measure_type', String(10), nullable=False), #1
Column('sweep_time', Float, nullable=False), #1
)
class ExpVNA(Base):
__table__ = exp_vna
def __init__(self, exp_id, \
if_bandwidth, number_of_points, format_type, sweep_type, channel, \
source_power, measure_type, sweep_time):
self.exp_id = exp_id
self.if_bandwidth = if_bandwidth
self.number_of_points = number_of_points
self.format_type = format_type
self.sweep_type = sweep_type
self.channel = channel
self.source_power = source_power
self.measure_type = measure_type
self.sweep_time = sweep_time
'''
class ExpVNA(Base):
__tablename__ = 'exp_vna'
id = Column(Integer, primary_key=True)
exp_id = Column(Integer, ForeignKey('exp.id'))
if_bandwidth = Column(Float, nullable=False)
number_of_points = Column(Integer, nullable=False)
format_type = Column(String(40), nullable=False)
sweep_type = Column(String(40), nullable=False)
channel = Column(Integer, nullable=False)
source_power = Column(Float, nullable=False)
measure_type = Column(String(10), nullable=False)
sweep_time = Column(Float, nullable=False)
def __init__(self, exp_id, if_bandwidth, number_of_points, format_type, sweep_type, channel, source_power, measure_type, sweep_time):
self.exp_id = exp_id
self.if_bandwidth = if_bandwidth
self.number_of_points = number_of_points
self.format_type = format_type
self.sweep_type = sweep_type
self.channel = channel
self.source_power = source_power
self.measure_type = measure_type
self.sweep_time = sweep_time
'''
exp_smith = Table('exp_smith',meta,
Column('id', Integer, primary_key=True),
Column('exp_id',Integer, ForeignKey("exp.id"), nullable=False),
Column('freq',Float, nullable=False),
Column('re',Float, nullable=False),
Column('im',Float, nullable=False), #1
Column('imp_re',Float, nullable=False),
Column('imp_im',Float, nullable=False), #1
)
class ExpSmith(Base):
__table__ = exp_smith
def __init__(self, exp_id, freq, re, im):
self.exp_id = exp_id
self.freq = freq
self.re = re
self.im = im
self.imp_re = (1-re**2-im**2)/((1-re)**2+im**2)
self.imp_im = 2*im/((1-re)**2+im**2)
def __repr__(self):
return "<ExpSmith(Exp#%d, %.3f+%.3fi @ %.2fHz)>" % (self.exp_id, self.imp_re * 50, self.imp_im * 50, self.freq)
exp_smith_filtered = Table('exp_smith_filtered',meta,
Column('id', Integer, primary_key=True),
Column('exp_id',Integer, ForeignKey("exp.id"), nullable=False),
Column('freq',Float, nullable=False),
Column('imp_re',Float, nullable=False),
Column('imp_im',Float, nullable=False), #1
)
class ExpSmithFiltered(Base):
__table__ = exp_smith_filtered
def __init__(self, exp_id, freq, imp_re, imp_im):
self.exp_id = exp_id
self.freq = freq
self.imp_re = (1-re**2-im**2)/((1-re)**2+im**2)
self.imp_im = 2*im/((1-re)**2+im**2)
def __repr__(self):
return "<ExpSmith(Exp#%d, %.3f+%.3fi @ %.2fHz)>" % (self.exp_id, self.imp_re * 50, self.imp_im * 50, self.freq)
'''
class ExpSmith(Base):
__tablename__ = 'exp_smith'
id = Column(Integer, primary_key=True)
exp_id = Column(Integer, ForeignKey('exp.id'))
freq = Column(Float, nullable=False)
re = Column(Float, nullable=False)
im = Column(Float, nullable=False)
imp_re = Column(Float, nullable=False)
imp_im = Column(Float, nullable=False)
mem_re = Column(Float, nullable=False)
mem_im = Column(Float, nullable=False)
def __init__(self, exp_id, freq, re, im, mem_re, mem_im):
self.exp_id = exp_id
self.freq = freq
self.re = re
self.im = im
self.imp_re = (1-re**2-im**2)/((1-re)**2+im**2)
self.imp_im = 2*im/((1-re)**2+im**2)
def __repr__(self):
return "<ExpSmith(%d, %03f %03fi @ %f)>" % (self.exp_id, self.imp_re * 50, self.imp_im * 50, self.freq)
'''
'''
self.name = name
self.youngs_modulus = youngs_modulus
self.density = density
self.poissons_ratio = poissons_ratio
self.shear_modulus = shear_modulus
self.comment = comment
self.width = width
self.length = length
self.grain_orientation = grain_orientation
self.comment = comment
self.exp_id = exp_id
self.if_bandwidth = if_bandwidth
self.number_of_points = number_of_points
self.format_type = format_type
self.sweep_type = sweep_type
self.channel = channel
self.source_power = source_power
self.measure_type = measure_type
self.sweep_time = sweep_time
'''
patch_info = view("exp_patch_info", meta,
select([
exp_material_prop.c.id.label('material_id'),
exp_material_prop.c.name,
exp_material_prop.c.density,
exp_material_prop.c.poissons_ratio,
exp_material_prop.c.youngs_modulus,
exp_material_prop.c.shear_modulus,
exp_material_prop.c.comment.label('material_comment'),
exp_patch_prop.c.id.label('patch_id'),
exp_patch_prop.c.width,
exp_patch_prop.c.height,
exp_patch_prop.c.grain_orientation,
exp_patch_prop.c.comment.label('patch_comment'),
]).select_from(\
exp_material_prop.join(exp_patch_prop, exp_material_prop.c.id == exp_patch_prop.c.material_id)
)
)
class ExpPatchInfo(Base):
__table__ = patch_info
def __repr__(self):
return "<ExpPatchInfo(%s)>" % (self.name)
@property
def grainAsString(self):
if self.grain_orientation == 1:
return "Vertically"
elif self.grain_orientation == 2:
return "Horizontally"
else:
return "Unknown"
@property
def proper_ar(self):
ar = self.width / self.height
if ar < 1:
ar = 1/ar
return ar
exp_vna_info = view("exp_vna_info", meta,
select([
exp.c.id,
exp.c.patch_id,
exp.c.exp_date,
exp.c.dc_current,
exp.c.dc_field,
exp.c.comment.label('exp_comment'),
exp.c.ac_coil_id,
exp.c.dc_coil_id,
exp.c.patch_included,
exp.c.subtract_exp_id,
exp_vna.c.if_bandwidth,
exp_vna.c.number_of_points,
exp_vna.c.format_type,
exp_vna.c.sweep_type,
exp_vna.c.channel,
exp_vna.c.source_power,
exp_vna.c.measure_type,
exp_vna.c.sweep_time,
]).select_from(exp.\
join(exp_vna, exp.c.id == exp_vna.c.exp_id)
)
)
exp_info = view("exp_info", meta,
select([
exp_vna_info.c.id,
exp_vna_info.c.patch_id,
exp_vna_info.c.exp_date,
exp_vna_info.c.dc_current,
exp_vna_info.c.dc_field,
exp_vna_info.c.exp_comment,
exp_vna_info.c.ac_coil_id,
exp_vna_info.c.dc_coil_id,
exp_vna_info.c.patch_included,
exp_vna_info.c.subtract_exp_id,
patch_info.c.material_id,
patch_info.c.name,
patch_info.c.density,
patch_info.c.poissons_ratio,
patch_info.c.shear_modulus,
patch_info.c.material_comment,
patch_info.c.width,
patch_info.c.height,
patch_info.c.grain_orientation,
patch_info.c.patch_comment,
]).select_from(exp_vna_info.\
join(patch_info, exp_vna_info.c.patch_id == patch_info.c.patch_id).\
join(exp_ac_coil_prop, exp_vna_info.c.ac_coil_id == exp_ac_coil_prop.c.id).\
join(exp_dc_coil_prop, exp_vna_info.c.dc_coil_id == exp_dc_coil_prop.c.id)\
))
'''
exp_info = view("exp_info", meta,
select([
exp.c.id,
exp.c.engine_load,
exp.c.rpm,
exp.c.ign,
exp.c.volt,
exp.c.efstart,
exp.c.efend,
exp.c.comment,
func.avg(expcycle.c.rpm).label("rpm_avg"),
func.avg(expcycle.c.pmax).label("pmax_avg"),
func.avg(expcycle.c.pmax_pos).label("pmax_pos_avg"),
func.avg(expcycle.c.soc).label("soc_avg"),
func.avg(expcycle.c.i05).label("i05_avg"),
func.avg(expcycle.c.i10).label("i10_avg"),
func.avg(expcycle.c.i50).label("i50_avg"),
func.avg(expcycle.c.i90).label("i90_avg"),
func.avg(expcycle.c.eoc).label("eoc_avg"),
func.avg(expcycle.c.pmep).label("pmep_avg"),
(func.stdev(expcycle.c.pmep) / func.avg(expcycle.c.pmep)).label("pmep_cov"),
func.avg(expcycle.c.imepg).label("imepg_avg"),
(func.stdev(expcycle.c.imepg) / func.avg(expcycle.c.imepg)).label("imepg_cov"),
func.avg(expcycle.c.imepn).label("imepn_avg"),
(func.stdev(expcycle.c.imepn) / func.avg(expcycle.c.imepn)).label("imepn_cov"),
func.avg(expcycle.c.power).label("power_avg"),
func.avg(expcycle.c.torque).label("torque_avg"),
func.avg(expcycle.c.work).label("work_avg"),
]).select_from(exp.join(exp_vna, exp.c.id == exp_vna.c.exp_id)).group_by(exp.c.id)
#where(exp.c.rpm != None).
)
'''
engine = create_engine('mysql://magneto:[email protected]/magneto')
event.listen(engine, 'connect', on_connect)
meta.bind = engine
session = scoped_session(sessionmaker(
autoflush=False,
autocommit=False,
bind=engine))
| unlicense | 2,794,319,458,875,567,600 | 34.707835 | 181 | 0.600863 | false |
GianlucaBortoli/enhanced-clustering | src/graphics.py | 1 | 3378 | # coding: utf-8
from __future__ import print_function
import numpy.random as rnd
from dbb import get_EIA
from matplotlib import pyplot as plt, transforms
from matplotlib.patches import Ellipse
from pylab import figure
from scipy.interpolate import interp1d
FIGSIZE = (20, 14)
EXT = "png"
def extract_dname(dname):
return dname.split('/')[-1].split('.')[0]
def plot_cool_figure(xs, ys, hx, hy, centroids, px, py, dname, picbound):
# Whole figure
plt.figure(figsize=FIGSIZE, frameon=False)
# Extract boundaries
minx, maxx, miny, maxy = picbound
# Top bar
x_density = plt.subplot2grid((4, 4), (0, 1), colspan=3)
plot_density(minx, maxx, hx[0], hx[1], px, x_density)
x_density.tick_params(axis='x', which='both',
bottom='off', top='on',
labelbottom='off', labeltop='on')
x_density.tick_params(axis='y', which='both',
left='off', right='on',
labelleft='off', labelright='on')
plt.grid(which='major', axis='x')
# Left Bar
y_density = plt.subplot2grid((4, 4), (1, 0), rowspan=3)
plot_density(miny, maxy, hy[0], hy[1], py, y_density, rotation=90)
y_density.tick_params(axis='x', which='both',
bottom='on', top='off',
labelbottom='on', labeltop='off')
plt.xticks(rotation=90)
plt.grid(which='major', axis='y')
# Actual data
data = plt.subplot2grid((4, 4), (1, 1), rowspan=3, colspan=3)
data.scatter(xs, ys)
data.scatter(*zip(*centroids))
data.tick_params(axis='y', which='both',
left='off', right='on',
labelleft='off', labelright='on')
data.set_ylim([miny, maxy])
data.set_xlim([minx, maxx])
plt.grid()
plt.tight_layout()
plt.savefig('img/%s_coolfig.%s' % (extract_dname(dname), EXT),
transparent=True, bbox_inches='tight', pad_inches=0)
def plot_density(mins, maxs, hist_x, hist_y, peaks, ax, rotation=0):
# Rotation
base = ax.transData
rot = transforms.Affine2D().rotate_deg(rotation)
# Density interpolation
f = interp1d(hist_x, hist_y, kind=3, assume_sorted=False)
ax.plot(hist_x, f(hist_x), 'g--', transform=rot + base)
if rotation in [0, 180]:
ax.set_xlim([mins, maxs])
else:
ax.set_ylim([mins, maxs])
# peaks
peaks_x, peaks_y = zip(*[(hist_x[z], hist_y[z]) for z in peaks])
ax.plot(peaks_x, peaks_y, 'kD', transform=rot + base)
def plot_density_ellipses(xs, ys, ellipses, dname, i, picbound):
fig = figure(figsize=FIGSIZE, frameon=False)
ax = fig.add_subplot(111, aspect='equal')
# The points
ax.scatter(xs, ys)
# The ellipses
for (c, ((xmean, xstd, wx), (ymean, ystd, wy))) in ellipses:
loc = (xmean, ymean)
w, h = get_EIA((c, ((xmean, xstd, wx), (ymean, ystd, wy))))
ellipse = Ellipse(xy=loc, width=w, height=h, color='black')
ellipse.set_alpha(0.45)
ellipse.set_facecolor(rnd.rand(3))
ellipse.set_clip_box(ax.bbox)
ax.add_patch(ellipse)
ax.scatter(*loc, color='r')
ax.set_ylim(picbound[2:])
ax.set_xlim(picbound[:2])
plt.grid()
plt.savefig('img/%s_density_%d.%s' % (extract_dname(dname), i, EXT),
transparent=True, bbox_inches='tight', pad_inches=0)
| bsd-3-clause | -1,914,083,688,656,876,000 | 31.480769 | 73 | 0.583777 | false |
reed-college/lemur | lemur/tests/helper_random.py | 1 | 9341 | # This file consists of the functions that generate random values or
# create objects in db for testing
import sys
sys.path.append('../..')
# Libraries
# Standard library
from random import choice, randint, shuffle
from string import ascii_lowercase, ascii_uppercase, digits
# Local
from lemur import models as m
from lemur.utility_generate_and_convert import (generate_lab_id,
generate_experiment_id,
generate_observation_id,
generate_class_id)
from lemur.utility_find_and_get import (get_power,
get_role)
# --- Some helper functions used to add randomness into our tests ---
# generate a random combination of letters(both upper and lower) and digits
# with a random length within the interval
def randlength_word(min_len=5, max_len=10):
return ''.join(choice(ascii_lowercase + ascii_uppercase +
digits) for i in range(randint(min_len, max_len)))
# generate a random lab status
def rand_lab_status(status=('Activated', 'Unactivated', 'Downloadable')):
return status[randint(0, len(status)-1)]
# generate a random experiment order
def rand_order(min=1, max=100):
return randint(min, max)
# generate a random value type
def rand_value_type(value_types=('Text', 'Number')):
return value_types[randint(0, len(value_types)-1)]
# generate a random value range in required format
def rand_value_range(min=0, max=10000):
return (str(randint(min, max))+'.'+str(randint(min, max)) +
'-'+str(randint(min, max))+'.'+str(randint(min, max)))
# generate a list of random value candidates(the number of which is random)
# in required format
def rand_value_candidates(min_number=1, max_number=10):
candidates = randlength_word()
for _ in range(randint(min_number-1, max_number-1)):
candidates += (randlength_word()+',')
return candidates
# generate a random lab id in the required format
def rand_lab_id():
return randlength_word()+':'+randlength_word()
# generate a random experiment id in the required format
def rand_experiment_id():
return rand_lab_id()+':'+randlength_word()
# generate a random value for classTime attribute in Class class
def rand_classtime(classtime_list=['FALL2016', 'SPRING2017', 'FALL2017']):
return classtime_list[randint(0, len(classtime_list)-1)]
def rand_classid():
return randlength_word()+'_'+rand_classtime()
# generate a power lists
def rand_powers():
m.Role.insert_roles()
permission_list = [m.Permission.DATA_ENTRY,
m.Permission.DATA_EDIT,
m.Permission.LAB_SETUP,
m.Permission.ADMIN,
m.Permission.LAB_MANAGE,
m.Permission.USER_MANAGE,
m.Permission.SUPERADMIN]
shuffle(permission_list)
return [get_power(p) for p in
permission_list[0:randint(1, len(permission_list))]]
# Generate a random value for role_name attribute in User class
def rand_role(roles=['SuperAdmin', 'Admin', 'Student']):
return roles[randint(0, len(roles)-1)]
# Generate a random value for role_name attribute in User class
# among all the possible admin role names
def rand_admin(roles=['SuperAdmin', 'Admin']):
return roles[randint(0, len(roles)-1)]
# Generate a random integer which is the number of a loop in many testing
# functions
def rand_round(min_round=1, max_round=5):
return randint(min_round, max_round)
# Generate a random list of students in string format with comma as delimitor
def rand_student_names(number=5):
s = ''
for _ in range(number):
username = randlength_word()
# Avoid repetition of username
while s.find(username) != -1:
username = randlength_word()
s += (username+',')
s = s.rstrip(',')
return s
def rand_observations_group_by_experiment_name():
observations_group_by_experiment_name = []
lab_id = rand_lab_id()
student_num = rand_round()
student_name_list = [randlength_word() for _ in range(student_num)]
for i in range(rand_round()):
experiment_name = randlength_word()
experiment_id = generate_experiment_id(lab_id, experiment_name)
experiment = {'experiment_id': experiment_id, 'experiment_name': experiment_name,'observations':[]}
for i in range(student_num):
student_name = student_name_list[i]
observation_id = generate_observation_id(experiment_id, student_name)
observation_datum = randlength_word()
observation = {'observation_id': observation_id,
'student_name': student_name,
'observation_data': observation_datum,
'lab_id': lab_id,
'experiment_id': experiment_id}
experiment['observations'].append(observation)
observations_group_by_experiment_name.append(experiment)
return observations_group_by_experiment_name
# - A list of helper functions for creating a random object in database -
def create_class(db):
name = randlength_word()
time = randlength_word()
class_id = generate_class_id(name, time)
while db.session.query(m.Class).filter(m.Class.id == class_id).count() != 0:
name = randlength_word()
time = randlength_word()
class_id = generate_class_id(name, time)
the_class = m.Class(id=class_id, name=name, time=time)
db.session.add(the_class)
db.session.commit()
class_query = db.session.query(m.Class).filter(m.Class.id == class_id).first()
return class_query
def create_lab(db):
the_class = create_class(db)
name = randlength_word()
class_id = the_class.id
description = randlength_word()
status = 'Activated'
lab_id = generate_lab_id(name, class_id)
while db.session.query(m.Lab).filter(m.Lab.id == lab_id).count() != 0:
class_id = rand_classid()
description = randlength_word()
lab_id = generate_lab_id(name, class_id)
lab = m.Lab(id=lab_id, name=name, the_class=the_class,
description=description, status=status)
db.session.add(lab)
db.session.commit()
lab_query = db.session.query(m.Lab).filter(m.Lab.id == lab_id).first()
return lab_query
def create_experiment(db, lab_id=rand_lab_id()):
name = randlength_word()
description = randlength_word()
order = rand_order()
value_type = rand_value_type()
value_range = rand_value_range()
value_candidates = rand_value_candidates()
experiment_id = generate_experiment_id(lab_id, name)
while db.session.query(m.Experiment).filter(m.Experiment.id == experiment_id).count() != 0:
name = randlength_word()
lab_id = rand_lab_id()
experiment_id = generate_experiment_id(lab_id, name)
experiment = m.Experiment(id=experiment_id, name=name,
description=description, order=order,
value_type=value_type,
value_range=value_range,
value_candidates=value_candidates)
db.session.add(experiment)
db.session.commit()
experiment_query = db.session.query(m.Experiment).filter(m.Experiment.id == experiment_id).first()
return experiment_query
def create_observation(db, experiment_id=rand_experiment_id()):
student_name = randlength_word()
datum = randlength_word()
observation_id = generate_observation_id(experiment_id, student_name)
while db.session.query(m.Observation).filter(m.Observation.id == observation_id).count() != 0:
student_name = randlength_word()
experiment_id = rand_experiment_id()
observation_id = generate_observation_id(experiment_id,
student_name)
observation = m.Observation(id=observation_id,
student_name=student_name,
datum=datum)
db.session.add(observation)
db.session.commit()
observation_query = db.session.query(m.Observation).filter(m.Observation.id == observation_id).first()
return observation_query
def create_user(db):
username = randlength_word()
while db.session.query(m.User).filter(m.User.id == username).count() != 0:
username = randlength_word()
name = randlength_word()
user = m.User(id=username,
name=name)
db.session.add(user)
db.session.commit()
user_query = db.session.query(m.User).filter(m.User.id == username).first()
return user_query
def create_role(db):
name = randlength_word()
while db.session.query(m.Role).filter(m.Role.name == name).count() != 0:
name = randlength_word()
powers = rand_powers()
role = m.Role(name=name, powers=powers)
db.session.add(role)
db.session.commit()
role_query = get_role(name)
return role_query
def create_power(db):
id = randlength_word()
while db.session.query(m.Power).filter(m.Power.id == id).count() != 0:
id = randlength_word()
power = m.Power(id=id)
db.session.add(power)
db.session.commit()
power_query = get_power(id)
return power_query
| mit | -3,815,309,735,058,515,000 | 35.488281 | 107 | 0.632588 | false |
datawire/quark | quarkc/test/ffi/expected/py/signatures/inheritance/use_before_def/__init__.py | 1 | 1353 | from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from builtins import str as unicode
from quark_runtime import *
_lazyImport.plug("inheritance.use_before_def")
import quark.reflect
class Bar(_QObject):
def _init(self):
pass
def __init__(self): self._init()
def go(self):
pass
def _getClass(self):
return u"inheritance.use_before_def.Bar"
def _getField(self, name):
return None
def _setField(self, name, value):
pass
Bar.inheritance_use_before_def_Bar_ref = None
class Foo(_QObject):
def _init(self):
self.name = None
def __init__(self): self._init()
def _getClass(self):
return u"inheritance.use_before_def.Foo"
def _getField(self, name):
if ((name) == (u"name")):
return (self).name
return None
def _setField(self, name, value):
if ((name) == (u"name")):
(self).name = _cast(value, lambda: unicode)
Foo.inheritance_use_before_def_Foo_ref = None
def _lazy_import_quark_ffi_signatures_md():
import quark_ffi_signatures_md
globals().update(locals())
_lazyImport("import quark_ffi_signatures_md", _lazy_import_quark_ffi_signatures_md)
_lazyImport.pump("inheritance.use_before_def")
| apache-2.0 | -9,112,272,500,425,932,000 | 22.327586 | 83 | 0.63932 | false |
KEMPtechnologies/python-kemptech-api | python_kemptech_api/objects.py | 1 | 71363 | import re
import logging
from collections import OrderedDict
from python_kemptech_api.api_xml import (
get_data,
is_successful,
get_error_msg)
from python_kemptech_api.exceptions import (
KempTechApiException,
SubVsCannotCreateSubVs,
RealServerMissingVirtualServiceInfo,
RealServerMissingLoadmasterInfo,
VirtualServiceACLMissingVirtualServiceInfo,
ValidationError,
RuleMissingLoadmasterInfo,
RangeMissingLoadmasterInfo,
LoadMasterParameterError,
SiteMissingFQDNInfo,
SiteMissingLoadmasterInfo,
ClusterMissingLoadmasterInfo,
CertificateMissingLoadmasterInfo,
CipherListInvalid,
RangeMaskInvalid,
VirtualServiceMissingLoadmasterInfo)
from python_kemptech_api.generic import BaseKempObject
from python_kemptech_api.utils import (
validate_ip,
validate_port,
validate_protocol,
get_sub_vs_list_from_data,
send_response,
cast_to_list,
falsey_to_none, build_object)
log = logging.getLogger(__name__)
class VirtualService(BaseKempObject):
_API_ADD = "/addvs"
_API_MOD = "/modvs"
_API_DELETE = "/delvs"
_API_GET = "/showvs"
_API_EXPORT = "/exportvstmplt"
_API_LIST = "/listvs"
API_TAG = "VS"
API_INIT_PARAMS = {
"vs": "VSAddress",
"port": "VSPort",
"prot": "Protocol"
}
_API_BASE_PARAMS = [
"vs",
"port",
"prot"
]
_API_DEFAULT_ATTRIBUTES = {
"status": "Status",
"index": "Index",
"vs": "VSAddress",
"altaddress": "AltAddress",
"extraports": "ExtraPorts",
"enable": "Enable",
"vsaddress": "VSAddress",
"vstype": "VStype",
"mastervsid": "MasterVSID",
"nickname": "NickName",
# Scheduling and Persistence
"schedule": "Schedule",
"adaptive": "Adaptive",
"persist": "Persist",
"persisttimeout": "PersistTimeout",
"querytag": "QueryTag",
"cookie": "Cookie",
# Advanced
"standbyaddr": "StandbyAddr",
"standbyport": "StandbyPort",
"defaultgw": "DefaultGW",
# HTTP
"errorcode": "ErrorCode",
"errorurl": "ErrorUrl",
"errorpage": "ErrorPage",
# Healthcheck
"checktype": "CheckType",
"checkport": "CheckPort",
"checkurl": "CheckUrl",
"checkheaders": "CheckHeaders",
"checkuse1_1": "CheckUse1.1",
"checkuseget": "CheckUseGet",
"checkpostdata": "CheckPostData",
"checkpattern": "CheckPattern",
"checkcodes": "CheckCodes",
"matchlen": "MatchLen",
"enhancedhealthchecks": "EnhancedHealthChecks",
"rsminimum": "RsMinimum",
# L7
"forcel7": "ForceL7",
"transparent": "Transparent",
"subnetoriginating": "SubnetOriginating",
"useforsnat": "UseforSnat",
"localbindaddrs": "LocalBindAddrs",
"serverinit": "ServerInit",
"idletime": "Idletime",
"addvia": "AddVia",
"extrahdrkey": "ExtraHdrKey",
"extrahdrvalue": "ExtraHdrValue",
"qos": "QoS",
# Content Rules
"rsruleprecedence": "RSRulePrecedence",
"rsruleprecedencepos": "RSRulePrecedencePos",
# SSL
"sslacceleration": "SSLAcceleration",
"sslrewrite": "SSLRewrite",
"sslreverse": "SSLReverse",
"sslreencrypt": "SSLReencrypt",
"starttlsmode": "StartTLSMode",
"tlstype": "TlsType",
"cipherset": "CipherSet",
"certfile": "CertFile",
"clientcert": "ClientCert",
"ocspverify": "OCSPVerify",
"reversesnihostname": "ReverseSNIHostname",
"needhostname": "NeedHostName",
# AFE
"multiconnect": "MultiConnect",
"verify": "Verify",
"compress": "Compress",
"cache": "Cache",
"cachepercent": "CachePercent",
# WAF
"alertthreshold": "AlertThreshold",
"intercept": "Intercept",
# ESP
"espenabled": "EspEnabled",
}
_ESP_PARAMS = [
"espenabled"
]
_WAF_PARAMS = [
"alertthreshold",
"intercept"
]
_SSL_PARAMS = [
"sslrewrite",
"sslreverse",
"sslreencrypt",
"starttlsmode",
"tlstype",
"cipherset",
"certfile",
"clientcert",
"ocspverify",
"reversesnihostname",
"needhostname",
]
def __init__(self, loadmaster_info, vs, port=80, prot="tcp",
is_sub_vs=False):
"""Construct VirtualService object.
:param loadmaster_info: The loadmaster dict with the endpoint params.
:param vs: IP or index of the VS. When creating a subvs you
must pass the index and set the is_sub_vs flag to true in order
for createsubvs to behave correctly. The index will be
overwritten with the index of the newly created subvs on save().
:param port: Port of the virtual service.
:param prot: Protocol of the virtual service.
:param is_sub_vs: Whether or not it is a subvs, mark this as true and
pass the parent VS index as the ip_or_index parameter.
"""
self.index = None # to avoid AttributeErrors later
self._is_sub_vs = is_sub_vs
self.subvs_data = None
self.subvs_entries = []
self.real_servers = []
self.sslrewrite = None
self.certfile = None
self._waf = False
self._esp = False
self._ssl = False
if not is_sub_vs:
# Skip validation when it is a subvs as they do not have ip/port
self.vs = vs
self.port = port
self.prot = prot
validate_ip(vs)
validate_port(port)
validate_protocol(prot)
else:
self.index = self.vs = vs
try:
self.endpoint = loadmaster_info["endpoint"]
except KeyError:
raise VirtualServiceMissingLoadmasterInfo("endpoint")
try:
self.ip_address = loadmaster_info["ip_address"]
except KeyError:
raise VirtualServiceMissingLoadmasterInfo("ip_address")
super(VirtualService, self).__init__(loadmaster_info)
self.cert = loadmaster_info.get("cert")
def __str__(self):
try:
if int(self.vs):
return 'Sub Virtual Service {} on LoadMaster {}'.format(
self.vs, self.ip_address)
except ValueError:
return 'Virtual Service {} {}:{} on LoadMaster {}'.format(
self.prot.upper(), self.vs, self.port, self.ip_address)
@property
def servers(self):
return {int(rs.rsindex): rs for rs in self.get_real_servers()}
def to_api_dict(self):
api = super(VirtualService, self).to_api_dict()
def delete_non_existing_parameters(api_params, mode, params):
if not mode:
for entry in params:
try:
del api_params[entry]
except KeyError:
# If it doesn't exist don't do anything
pass
return api_params
api = delete_non_existing_parameters(api, self._waf, self._WAF_PARAMS)
api = delete_non_existing_parameters(api, self._esp, self._ESP_PARAMS)
api = delete_non_existing_parameters(api, self._ssl, self._SSL_PARAMS)
try:
if self._is_sub_vs:
del api['enable']
except KeyError:
pass
return api
def export(self):
return self._get(self._API_EXPORT, self._get_base_parameters())
def _get_base_parameters(self):
"""Returns the bare minimum VS parameters. IP, port and protocol"""
if self.index is None:
return {
"vs": self.vs,
"port": self.port,
"prot": self.prot,
}
else:
return {"vs": self.index}
def _subvs_to_dict(self):
return {
"vs": self.subvs_data['parentvs'],
"rs": "!{}".format(self.subvs_data['RsIndex']),
"name": self.subvs_data['Name'],
"forward": self.subvs_data['Forward'],
"weight": self.subvs_data['Weight'],
"limit": self.subvs_data['Limit'],
"critical": self.subvs_data['Critical'],
"enable": self.subvs_data['Enable']
}
@property
def checkuse1_1(self):
"""This property exists because . can not be in a variable name.
vs.checkuse1_1 is used for access to this variable, but internally the
data is stored as obj.checkuse1.1 - this is because in order to write
to the API, the parameter uses the string literal 'checkuse1.1' """
return self.__dict__.get('checkuse1.1', None)
@checkuse1_1.setter
def checkuse1_1(self, value):
"""This property exists because . can not be in a variable name.
vs.checkuse1_1 is used for access to this variable, but internally the
data is stored as obj.checkuse1.1 - this is because in order to write
to the API, the parameter uses the string literal 'checkuse1.1' """
self.__dict__['checkuse1.1'] = value
def save(self, update=False):
# Parse certfile field if SSL acceleration is enabled
if hasattr(self, "sslacceleration") and self.sslacceleration == "Y":
if hasattr(self, "certfile"):
if isinstance(self.certfile, list):
self.certfile = " ".join(self.certfile)
if isinstance(self.certfile, str):
self.certfile = self.certfile.strip()
else:
self.certfile = None
# Clear the persist timeout if persistence is not used
if hasattr(self, "persist") and self.persist is None:
self.persisttimeout = None
if self.subvs_entries:
self.enhancedhealthchecks = None
if not update:
if self._is_sub_vs:
# Hell, thy name be subvs
response = self._get("/showvs", self._get_base_parameters())
data = get_data(response)
existing_subvs_entries = get_sub_vs_list_from_data(data)[0]
params = self._get_base_parameters()
params["createsubvs"] = ""
response = self._get("/modvs", params)
data = get_data(response)
new_subvs_entries, subvs_data = get_sub_vs_list_from_data(data)
s = set(existing_subvs_entries)
# Subtracts the existing ID's from the new IDs to know what ID
# to use when populating the self's attributes
created_subvs_id = [x for x in new_subvs_entries
if x not in s]
newly_created_vs_params = {"vs": created_subvs_id}
self.subvs_data = subvs_data[created_subvs_id[0]]
self.subvs_data['parentvs'] = self.vs
response = self._get("/showvs", newly_created_vs_params)
else:
response = self._get("/addvs", self.to_api_dict())
else: # Update
if self._is_sub_vs:
# Update the underlying "Rs" part of the subvs as well
self._get("/modrs", self._subvs_to_dict())
response = self._get("/modvs", self.to_api_dict())
if is_successful(response):
vs_data = get_data(response)
self.populate_default_attributes(vs_data)
else:
raise KempTechApiException(get_error_msg(response))
def create_sub_virtual_service(self):
"""VirtualService factory with pre-configured LoadMaster connection
When creating a virtual service that is a sub virtual service you must
pass the parent index to the constructor and mark the is_sub_vs flag
as true. This will allow the save() method on the newly created subvs
instance to be able to create a subvs against the parent vs. The index
attribute will then be overwritten on save with the subvs's index.
"""
if self._is_sub_vs:
raise SubVsCannotCreateSubVs()
return VirtualService(self.access_info, self.index, is_sub_vs=True)
def create_real_server(self, ip, port=80):
"""RealServer factory with pre-configured LoadMaster connection."""
return RealServer(self.access_info, ip, port)
def create_access_control(self, addvs, addr):
"""AccessControl factory with pre-configured LoadMaster connection and
ACL definition."""
log.info("This method has been deprecated, please manipualte ACL "
"objects directly")
acl = self.acl
if addvs == "black":
acl.blacklist[addr] = ""
elif addvs == "white":
acl.whitelist[addr] = ""
else:
log.warning("ACL List %s is not valid, ACLs have not been modified",
addvs)
acl.update()
return acl
@property
def acl(self):
return self.get_vs_acl()
def get_vs_acl(self):
access_info = self.access_info
access_info["ip"] = self.vs
access_info["port"] = self.port
access_info["prot"] = self.prot
return VirtualServiceACL(self.access_info)
def get_real_server(self, real_server_address=None, real_server_port=None):
validate_port(real_server_port)
if self.index is None:
server_id = {
"vs": self.vs,
"port": self.port,
"prot": self.prot,
"rs": real_server_address,
"rsport": real_server_port,
}
else:
server_id = {
"vs": self.index,
"rs": real_server_address,
"rsport": real_server_port,
}
response = self._get("/showrs", server_id)
response_data = get_data(response)
server = response_data.get("Rs", {})
# if there is no Rs key, the following will fail with a ValidationError
# which is the best we can do for now
real_server = self.build_real_server(server)
return real_server
def get_real_servers(self):
response = self._get("/showvs", self._get_base_parameters())
data = get_data(response)
real_servers = []
servers = data.get('Rs', [])
servers = cast_to_list(servers)
for server in servers:
real_server = self.build_real_server(server)
real_servers.append(real_server)
return real_servers
def build_real_server(self, server):
if "Addr" not in server:
raise ValidationError('"Addr" key not present {}'.format(server))
if "Port" not in server:
raise ValidationError('"Port" key not present {}'.format(server))
real_server = build_object(RealServer, self.access_info, server)
return real_server
def populate_default_attributes(self, service):
"""Populate VirtualService instance with standard defaults"""
# pylint: disable=too-many-branches,too-many-statements
#super(VirtualService, self).populate_default_attributes(dictionary)
self.status = service.get('Status', None)
self.index = service.get('Index', None)
self.enable = service.get('Enable', None)
self.forcel7 = service.get('ForceL7', None)
self.vstype = service.get('VStype', None)
self.schedule = service.get('Schedule', None)
self.nickname = service.get('NickName', None)
self.altaddress = service.get('AltAddress', None)
self.transparent = service.get('Transparent', None)
self.useforsnat = service.get('UseforSnat', None)
self.persist = service.get('Persist', None)
self.cookie = service.get('Cookie', None)
self.extraports = service.get('ExtraPorts', None)
self.qos = service.get('QoS', None)
self.idletime = service.get('Idletime', None)
self.mastervsid = service.get('MasterVSID', None)
self.querytag = service.get('QueryTag', None)
self.serverinit = service.get('ServerInit', None)
self.addvia = service.get('AddVia', None)
self.subnetoriginating = service.get('SubnetOriginating', None)
self.localbindaddrs = service.get('LocalBindAddrs', None)
self.defaultgw = service.get('DefaultGW', None)
#self.followvsid = falsey_to_none(int(service.get('FollowVSID', 0)))
self.standbyaddr = service.get('StandbyAddr', None)
self.standbyport = service.get('StandbyPort', None)
self.errorcode = service.get('ErrorCode', None)
self.errorurl = service.get('ErrorUrl', None)
self.errorpage = service.get('ErrorPage', None)
# WAF
self.alertthreshold = service.get('AlertThreshold', None)
self.intercept = service.get('Intercept', None)
# ESP
self.espenabled = service.get('EspEnabled', None)
# Set meta values for whether WAF and ESP are enabled
if self.alertthreshold is None or int(self.alertthreshold) == 0:
self._waf = False
else:
self._waf = True
if self.espenabled is None or self.espenabled == 'N':
self._esp = False
else:
self._esp = True
self.multiconnect = service.get('MultiConnect', None)
self.verify = service.get('Verify', None)
self.compress = service.get('Compress', None)
self.cache = service.get('Cache', None)
self.cachepercent = service.get('CachePercent', None)
self.sslacceleration = service.get('SSLAcceleration', None)
self.sslrewrite = service.get('SSLRewrite', None)
self.sslreverse = service.get('SSLReverse', None)
self.sslreencrypt = service.get('SSLReencrypt', None)
self.starttlsmode = service.get('StartTLSMode', None)
self.tlstype = service.get('TlsType', None)
self.cipherset = service.get('CipherSet', None)
self.certfile = service.get('CertFile', None)
self.clientcert = service.get('ClientCert', None)
self.ocspverify = service.get('OCSPVerify', None)
self.reversesnihostname = service.get('ReverseSNIHostname', None)
self.needhostname = service.get('NeedHostName', None)
if self.sslacceleration is None or self.sslacceleration == 'N':
self._ssl = False
else:
self._ssl = True
# If SSL Acceleration is not enabled, clear the TLS type and Ciphers
# These are not valid to set if Acceleration is off
if not self._ssl:
self.tlstype = None
self.cipherset = None
self.ciphers = None
self.needhostname = None
else:
# Rewrite the SSL Rewrite value based on the table:
# SSL Rewrite cannot be set as an integer, even
# though it outputs as an integer
sslrewrite = {
"0": None,
"1": "http",
"2": "https"
}
try:
# Try casting to an int in the case that the end user passes
# the string version of the int.
self.sslrewrite = sslrewrite[int(self.sslrewrite)]
except (KeyError, TypeError):
self.sslrewrite = None
#
if self.certfile is not None:
log.info("Splitting certfile field into a list")
self.certfile = str(self.certfile).split()
else:
self.certfile = []
# If there's just one certificate, identified by a 32 character
# hex string, it's a self signed certificate and the list should
# be cleared since setting this value is invalid.
if len(self.certfile) == 1:
if re.match("[0-9a-f]{32}", self.certfile[0]) is not None:
self.certfile = []
# Real servers section
self.checktype = service.get('CheckType', None)
self.checkhost = service.get('CheckHost', None)
self.checkpattern = service.get('CheckPattern', None)
self.checkurl = service.get('CheckUrl', None)
self.checkcodes = service.get('CheckCodes', None)
self.checkheaders = service.get('CheckHeaders', None)
self.matchlen = service.get('MatchLen', None)
self.checkuse1_1 = service.get('CheckUse1.1', None)
self.checkport = falsey_to_none(int(service.get('CheckPort', 0)))
self.checkuseget = service.get('CheckUseGet', None)
self.extrahdrkey = service.get('ExtraHdrKey', None)
self.extrahdrvalue = service.get('ExtraHdrValue', None)
self.checkpostdata = service.get('CheckPostData', None)
self.rsruleprecedence = service.get('RSRulePrecedence', None)
self.rsruleprecedencepos = service.get('RSRulePrecedencePos', None)
self.enhancedhealthchecks = service.get('EnhancedHealthChecks', None)
# Handle non-standard behavior of Adaptive and Schedule parameters
self.adaptive = service.get('Adaptive', None)
if self.adaptive == 'http_rs':
self.adaptive = None
self.schedule = 'adaptive'
elif self.adaptive == 'sdn_gstats':
self.adaptive = None
self.schedule = 'sdn-adaptive'
# Disable enable argument if it's a SubVS
if self.vs is None:
self.enable = None
self.persisttimeout = falsey_to_none(int(service.get(
'PersistTimeout', 0)))
self.rsminimum = falsey_to_none(int(service.get('RsMinimum', 0)))
class RealServer(BaseKempObject):
_API_ADD = "/addrs"
_API_MOD = "/modrs"
_API_DELETE = "/delrs"
_API_GET = "/showrs"
_API_LIST = "/showvs"
API_TAG = "Rs"
API_INIT_PARAMS = {
"ip": "Addr",
"port": "Port"
}
_API_BASE_PARAMS = [
"vs",
"port",
"prot",
"rs",
"rsport"
]
_API_DEFAULT_ATTRIBUTES = {
"addr": "Addr",
"status": "Status",
"rsindex": "RsIndex",
"vsindex": "VsIndex",
"enable": "Enable",
"forward": "Forward",
"weight": "Weight",
"limit": "Limit",
"critical": "Critical",
"follow": "Follow",
"dnsname": "DnsName"
}
@property
def rs(self):
if hasattr(self, "dnsname") and self.dnsname is not None:
return self.dnsname
else:
return self.addr
@rs.setter
def rs(self, value):
try:
validate_ip(value)
except ValidationError:
self.dnsname = value
else:
self.addr = value
def to_api_dict(self):
# Populate RS field into dictionary manually
as_dict = super(RealServer, self).to_api_dict()
as_dict['rs'] = self.rs
as_dict.pop('addr')
return as_dict
def __init__(self, loadmaster_virt_service_info, ip, port=80):
self.rsindex = None
self.rs = ip
self.rsport = port
validate_port(port)
try:
self.vs = loadmaster_virt_service_info["vs"]
except KeyError:
raise RealServerMissingVirtualServiceInfo("vs")
self.port = loadmaster_virt_service_info.get("port", None)
self.prot = loadmaster_virt_service_info.get("prot", None)
try:
self.endpoint = loadmaster_virt_service_info["endpoint"]
except KeyError:
raise RealServerMissingLoadmasterInfo("endpoint")
try:
self.ip_address = loadmaster_virt_service_info["ip_address"]
except KeyError:
raise RealServerMissingLoadmasterInfo("ip_address")
super(RealServer, self).__init__(loadmaster_virt_service_info)
self.cert = loadmaster_virt_service_info.get("cert")
def __str__(self):
return 'Real Server {} on {}'.format(self.rs, self.vs)
def save(self, update=False):
super(RealServer, self).save(update)
# If a new port is set, update the assigned port value
# in order to correctly look up the updated RS
# If .save fails, this will never be performed
if update and getattr(self, 'newport', None):
self.rsport = self.newport
self.refresh()
class BaseACLObject(BaseKempObject):
_API = "/aclcontrol"
_API_ADD = ""
_API_DEL = ""
_API_LIST = ""
API_INIT_PARAMS = {
}
_API_BASE_PARAMS = [
]
_API_DEFAULT_ATTRIBUTES = {
}
def __init__(self, loadmaster_info):
# Attach to the LoadMaster
self.appliance = loadmaster_info['appliance']
self.blacklist = {}
self.whitelist = {}
super(BaseACLObject, self).__init__(loadmaster_info)
self.refresh()
def save(self, update=False):
self._sync()
self.refresh()
def _sync(self):
# Sync the blacklist and whitelist to the LoadMaster
# Grab the new data and save it before refreshing to get the old data
new_blacklist = self.blacklist
new_whitelist = self.whitelist
self.refresh()
old_blacklist = self.blacklist
old_whitelist = self.whitelist
# Handle the blacklist changes
for address, comment in old_blacklist.items():
if address not in new_blacklist.keys():
self._delete_entry('black', address)
else:
if new_blacklist[address] != comment:
self._delete_entry('black', address)
self._add_entry('black', address, new_blacklist[address])
for address, comment in {key: value for key, value in new_blacklist.items()
if key not in old_blacklist.keys()}.items():
self._add_entry('black', address, comment)
# Now handle the whitelist
for address, comment in old_whitelist.items():
if address not in new_whitelist.keys():
self._delete_entry('white', address)
else:
if new_whitelist[address] != comment:
self._delete_entry('white', address)
self._add_entry('white', address, new_whitelist[address])
for address, comment in {key: value for key, value in new_whitelist.items()
if key not in old_whitelist.keys()}.items():
self._add_entry('white', address, comment)
def _add_entry(self, list_type, address, comment=None):
parameters = self._get_base_parameters()
parameters[self._API_ADD] = list_type
parameters['addr'] = address
# Only valid on 7.2.37.0 and higher
if self.appliance['version'] >= "7.2.37.0":
parameters['comment'] = comment
response = self._get( # pylint: disable=protected-access
self._API,
parameters)
if not is_successful(response):
raise KempTechApiException(get_error_msg(response))
def _delete_entry(self, list_type, address):
parameters = self._get_base_parameters()
parameters[self._API_DEL] = list_type
parameters['addr'] = address
response = self._get( # pylint: disable=protected-access
self._API,
parameters)
if not is_successful(response):
raise KempTechApiException(get_error_msg(response))
def refresh(self):
# First handle whitelist
parameters = self._get_base_parameters()
parameters[self._API_LIST] = "white"
whitelist_response = self._get( # pylint: disable=protected-access
self._API,
parameters)
whitelist_data = get_data(whitelist_response)
if isinstance(self, VirtualServiceACL):
whitelist_xml = whitelist_data['VS']['Whitelist']
else:
whitelist_xml = whitelist_data['Whitelist']
if whitelist_xml is None:
self.whitelist = {}
# Handle pre-7.2.37.0 cases
elif "addr" in whitelist_xml.keys():
self.whitelist = {
address: "" for
address in cast_to_list(whitelist_xml['addr'])
}
else:
self.whitelist = {
ip['addr']: ip['comment'] or ""
for ip in cast_to_list(whitelist_xml['IP'])
}
# Next verse, same as the first!
parameters = self._get_base_parameters()
parameters[self._API_LIST] = "black"
blacklist_response = self._get( # pylint: disable=protected-access
self._API,
parameters)
blacklist_data = get_data(blacklist_response)
if isinstance(self, VirtualServiceACL):
blacklist_xml = blacklist_data['VS']['Blacklist']
else:
blacklist_xml = blacklist_data['Blacklist']
if blacklist_xml is None:
self.blacklist = {}
# Handle pre-7.2.37.0 cases
elif "addr" in blacklist_xml.keys():
self.blacklist = {
address: ""
for address in cast_to_list(blacklist_xml['addr'])
}
else:
self.blacklist = {
ip['addr']: ip['comment'] or ""
for ip in cast_to_list(blacklist_xml['IP'])
}
class GlobalACL(BaseACLObject):
_API_ADD = "add"
_API_DEL = "del"
_API_LIST = "list"
def __repr__(self):
return 'Global ACL on {}'.format(self.appliance)
class VirtualServiceACL(BaseACLObject):
_API_ADD = "addvs"
_API_DEL = "delvs"
_API_LIST = "listvs"
_API_BASE_PARAMS = [
"vsip",
"vsport",
"vsprot"
]
def __init__(self, loadmaster_virt_service_info):
try:
self.vsip = loadmaster_virt_service_info["vs"]
except KeyError:
raise VirtualServiceACLMissingVirtualServiceInfo("vs")
try:
self.vsport = loadmaster_virt_service_info.get("port", None)
except KeyError:
raise VirtualServiceACLMissingVirtualServiceInfo("port")
try:
self.vsprot = loadmaster_virt_service_info.get("prot", None)
except KeyError:
raise VirtualServiceACLMissingVirtualServiceInfo("prot")
super(VirtualServiceACL, self).__init__(loadmaster_virt_service_info)
def __repr__(self):
return 'Virtual Service ACL on {}/{}:{}'.format(
self.vsprot,
self.vsip,
self.vsport)
class Template(BaseKempObject):
_API_ADD = ""
_API_MOD = ""
_API_DELETE = "/deltemplate"
_API_GET = "/listtemplates"
_API_LIST = "/listtemplates"
_API_APPLY = "/addvs"
_API_UPLOAD = "/uploadtemplate"
API_TAG = "template"
API_INIT_PARAMS = {
"name": "name"
}
_API_BASE_PARAMS = {
"name": "name"
}
_API_DEFAULT_ATTRIBUTES = {
"name": "name",
"comment": "comment",
"certified": "certified"
}
def __init__(self, loadmaster_info, name):
self.name = name
self.file = None
super(Template, self).__init__(loadmaster_info)
def save(self, update=False):
raise KempTechApiException("Templates are read-only objects")
class Rule(BaseKempObject):
_API_ADD = "/addrule"
_API_MOD = "/modrule"
_API_DELETE = "/delrule"
_API_GET = "/showrule"
_API_LIST = "/showrule"
API_INIT_PARAMS = {
"name": "Name",
"pattern": "Pattern"
}
_API_BASE_PARAMS = {
"name": "Name",
"type": "Type",
"pattern": "Pattern"
}
_API_DEFAULT_ATTRIBUTES = {
"name": "Name",
"type": "Type",
"pattern": "Pattern",
"matchtype": "MatchType",
"addhost": "AddHost",
"negate": "Negate",
"caseindependant": "CaseIndependent",
"includequery": "IncludeQuery",
"header": "Header",
"mustfail": "MustFail",
"headervalue": "HeaderValue",
"replacement": "Replacement",
"setflagonmatch": "SetFlagOnMatch",
"onlyonflag": "OnlyOnFlag"
}
@property
def type_string(self):
types = {
"0": "MatchContentRule",
"1": "AddHeaderRule",
"2": "DeleteHeaderRule",
"3": "ReplaceHeaderRule",
"4": "ModifyURLRule"
}
if self.type is None:
return None
else:
return types[str(self.type)]
@type_string.setter
def type_string(self, value):
types = {
"MatchContentRule": "0",
"AddHeaderRule": "1",
"DeleteHeaderRule": "2",
"ReplaceHeaderRule": "3",
"ModifyURLRule": "4"
}
if value is None:
self.type = None
else:
self.type = types[value]
def __init__(self, loadmaster_info, name, pattern):
self.populate_default_attributes({})
self.name = name
self.pattern = pattern
try:
self.endpoint = loadmaster_info["endpoint"]
except KeyError:
raise RuleMissingLoadmasterInfo("endpoint")
try:
self.ip_address = loadmaster_info["ip_address"]
except KeyError:
raise RuleMissingLoadmasterInfo("ip_address")
super(Rule, self).__init__(loadmaster_info)
def __str__(self):
return 'Rule {} on LoadMaster {}'.format(
self.name, self.ip_address)
def _get_base_parameters(self):
base_parameters = super(Rule, self)._get_base_parameters()
# Pattern is not necessary for AddHeader rules
if self.type == 1:
base_parameters.pop("pattern")
return base_parameters
def populate_default_attributes(self, parameters):
"""Populate object instance with standard defaults"""
# Get data from inside tag
# Tag is unknown since different rule types have
# different tag names. The generic code using API_TAG
# isn't usable in this case.
#parameters = parameters.popitem()[1]
for attribute, tag in self._API_DEFAULT_ATTRIBUTES.items():
setattr(self, attribute, parameters.get(tag, None))
self.type_string = self.type
class Sso(BaseKempObject):
def __init__(self, loadmaster_info, name):
self.name = name
try:
self.endpoint = loadmaster_info["endpoint"]
except KeyError:
raise RangeMissingLoadmasterInfo("endpoint")
super(Sso, self).__init__(loadmaster_info)
def __str__(self):
return 'SSO {} on LoadMaster {}'.format(
self.name, self.ip_address)
def _get_base_parameters(self):
"""Returns the bare minimum FQDN parameters."""
return {
"domain": self.name
}
def save(self, update=False):
if not update:
response = self._get("/adddomain", self._get_base_parameters())
if not is_successful(response):
raise KempTechApiException(get_error_msg(response))
response = self._get("/moddomain", self.to_api_dict())
if is_successful(response):
sso_data = get_data(response)
self.populate_default_attributes(sso_data)
else:
raise KempTechApiException(get_error_msg(response))
def delete(self):
response = self._get("/deldomain", self._get_base_parameters())
return send_response(response)
def populate_default_attributes(self, sso):
"""Populate SSO instance with standard defaults"""
self.id = sso.get('Id', None)
self.name = sso.get('Name', None)
self.testuser = sso.get('testuser', None)
self.ldap_version = sso.get('ldap_version', None)
self.server_side = sso.get('server_side', None)
self.auth_type = sso.get('auth_type', None)
self.logon_fmt = sso.get('logon_fmt', None)
self.logon_fmt2 = sso.get('logon_fmt2', None)
self.logon_transcode = sso.get('logon_transcode', None)
self.logon_domain = sso.get('logon_domain', None)
self.kerberos_domain = sso.get('kerberos_domain', None)
self.kerberos_kdc = sso.get('kerberos_kdc', None)
self.kcd_username = sso.get('kcd_username', None)
self.max_failed_auths = sso.get('max_failed_auths', None)
self.reset_fail_tout = sso.get('reset_fail_tout', None)
self.unblock_tout = sso.get('unblock_tout', None)
self.sess_tout_type = sso.get('sess_tout_type', None)
self.sess_tout_idle_pub = sso.get('sess_tout_idle_pub', None)
self.sess_tout_duration_pub = sso.get('sess_tout_duration_pub', None)
self.sess_tout_idle_priv = sso.get('sess_tout_idle_priv', None)
self.sess_tout_duration_priv = sso.get('sess_tout_duration_priv', None)
self.cert_check_asi = sso.get('cert_check_asi', None)
class Fqdn(BaseKempObject):
_API_ADD = "/addfqdn"
_API_MOD = "/modfqdn"
_API_DELETE = "/delfqdn"
_API_GET = "/showfqdn"
_API_LIST = "/listfqdns"
API_TAG = "fqdn"
API_INIT_PARAMS = {
"fqdn": "FullyQualifiedDomainName"
}
_API_BASE_PARAMS = [
"fqdn"
]
_API_DEFAULT_ATTRIBUTES = {
"fqdn": "FullyQualifiedDomainName",
"status": "Status",
"selectioncriteria": "SelectionCriteria",
"failtime": "FailTime",
"siterecoverymode": "SiteRecoveryMode",
"failover": "failover",
"publicrequestvalue": "publicRequestValue",
"privaterequestvalue": "privateRequestValue",
"localsettings": "LocalSettings",
"localttl": "LocalTTL",
"localsticky": "LocalSticky",
"unanimouschecks": "UnanimousChecks"
}
def __init__(self, loadmaster_info, fqdn):
self.fqdn = fqdn # to avoid AttributeErrors later
super(Fqdn, self).__init__(loadmaster_info)
def __str__(self):
return 'FQDN {} on LoadMaster {}'.format(
self.fqdn, self.ip_address)
def save(self, update=False):
try:
if self.selectioncriteria != "lb":
# Failover is not available when not using Location Based
del self.failover
except AttributeError:
pass
super(Fqdn, self).save(update)
self.refresh()
def populate_default_attributes(self, dictionary):
super(Fqdn, self).populate_default_attributes(dictionary)
# Failtime is set by minute, but recorded by second
try:
# Try to cast to integer first
self.failtime = int(self.failtime)
# Check if failtime is a non-zero factor of 60
if self.failtime > 0 and self.failtime % 60 == 0:
# Convert from seconds to minutes
self.failtime = int(self.failtime / 60)
except (TypeError, AttributeError):
self.failtime = None
@property
def sites(self):
return {site.ipaddress: site for site in self.get_sites()}
def create_site(self, ip):
"""Site factory with pre-configured LoadMaster connection."""
return Site(self.access_info, ip)
def get_site(self, ip):
validate_ip(ip)
service_id = {
"fqdn": self.fqdn,
"ipaddress": ip
}
response = self._get("/showfqdn", service_id)
xml_object = get_data(response)
maps = xml_object["fqdn"].get(Site.API_TAG, {})
if not isinstance(maps, list):
maps = [maps]
map = [m for m in maps if m['IPAddress'] == service_id["ipaddress"]]
# This shouldn't happen, but we should catch it anyway
if len(map) != 1:
raise LoadMasterParameterError(
"Unexpected number of matching sites specified.", map)
return build_object(Site, self.access_info, map[0])
def get_sites(self):
fqdn = {
"fqdn": self.fqdn
}
try:
response = self._get(self._API_LIST, fqdn)
data = get_data(response)
xml_object = data[self.API_TAG].get(Site.API_TAG, [])
except KempTechApiException:
xml_object = []
obj_list = []
# If there is no API_TAG key, build will fail with a
# ValidationError, which is the best we can do for now
# (without changing the upstream code and raising an
# exception earlier, possibly retrying)
xml_object = cast_to_list(xml_object)
for x in xml_object:
obj = self.build_site(x)
obj_list.append(obj)
return obj_list
def build_site(self, site):
"""Create a object instance with standard defaults"""
build_parameters = {}
for parameter, tag in Site.API_INIT_PARAMS.items():
build_parameters[parameter] = site.get(tag)
obj = Site(self.access_info, **build_parameters)
obj.populate_default_attributes(site)
return obj
class Site(BaseKempObject):
_API_ADD = "/addmap"
_API_MOD = "/modmap"
_API_DELETE = "/delmap"
_API_GET = "/showfqdn"
_API_LIST = "/showfqdn"
API_TAG = "Map"
API_INIT_PARAMS = {
"ip": "IPAddress"
}
_API_BASE_PARAMS = {
"fqdn": "fqdn",
"ip": "ip"
}
_API_DEFAULT_ATTRIBUTES = {
"index": "Index",
"status": "Status",
"clustervsaddress": "ClusterVSAddress",
"checker": "Checker",
"checkeraddr": "checkerAddr",
"checkerport": "CheckerPort",
"weight": "Weight",
"enable": "Enable",
"locationlatitude": "LocationLatitude",
"locationlongitude": "LocationLongitude",
"continent": "continent",
"country": "country",
"customlocation": "customLocation",
"cluster": "Cluster",
"mapaddress": "MappedAddress",
"mapport": "MappedPort"
}
_API_IGNORE = (
"log_urls", "ip_address", "endpoint", "index", "status",
"continent", "country", "customlocation", "ipaddress"
)
# Remap ipaddress to ip because the API is inconsistent
@property
def ipaddress(self):
return self.ip
@ipaddress.setter
def ipaddress(self, value):
self.ip = value
@property
def mappedaddress(self):
return self.mapaddress
@mappedaddress.setter
def mappedaddress(self, value):
self.mapaddress = value
@property
def mappedport(self):
return self.mapport
@mappedport.setter
def mappedport(self, value):
self.mapport = value
def __init__(self, loadmaster_fqdn_info, ip):
self.fqdn = loadmaster_fqdn_info["fqdn"]
self.ip = ip
validate_ip(ip)
try:
self.fqdn = loadmaster_fqdn_info["fqdn"]
except KeyError:
raise SiteMissingFQDNInfo("fqdn")
try:
self.endpoint = loadmaster_fqdn_info["endpoint"]
except KeyError:
raise SiteMissingLoadmasterInfo("endpoint")
try:
self.ip_address = loadmaster_fqdn_info["ip_address"]
except KeyError:
raise SiteMissingLoadmasterInfo("ip_address")
super(Site, self).__init__(loadmaster_fqdn_info)
def __str__(self):
return 'Site {} in FQDN {} on LoadMaster {}'.format(
self.ip, self.fqdn, self.ip_address)
def _get_base_parameters(self):
return {
"fqdn": self.fqdn,
"ip": self.ip
}
def populate_default_attributes(self, dictionary):
super(Site, self).populate_default_attributes(dictionary)
# Fix annoying API inconsistencies
# Normalize location lists so we always get a regular list
if not isinstance(self.continent, list):
if self.continent is None:
self.continent = []
else:
self.continent = [self.continent]
if not isinstance(self.country, list):
if self.country is None:
self.country = []
else:
self.country = [self.country]
if not isinstance(self.customlocation, list):
if self.customlocation is None:
self.customlocation = []
else:
self.customlocation = [self.customlocation]
try:
self.checkerport = int(self.checkerport)
except (ValueError, AttributeError):
self.checkerport = None
finally:
if not 1 < self.checkerport < 65530:
self.checkerport = None
def save(self, update=False):
if not update:
response = self._get(self._API_ADD, self._get_base_parameters())
else:
response = self._get(self._API_MOD, self.to_api_dict())
if not is_successful(response):
raise KempTechApiException(get_error_msg(response))
# Secondary request is needed because the add/mod action
# does not return any data. Therefore, we need to explicitly
# retrieve the info.
response = self._get(self._API_GET, self._get_base_parameters())
if is_successful(response):
response = self._get(self._API_GET, self._get_base_parameters())
data = get_data(response)
maps = data["fqdn"].get(self.API_TAG, {})
if not isinstance(maps, list):
maps = [maps]
map = [m for m in maps if m['IPAddress'] == self.ipaddress]
# This shouldn't happen, but we should catch it anyway
if len(map) > 1:
raise LoadMasterParameterError(
"Multiple matching sites specified.",
map)
if len(map) < 1:
raise LoadMasterParameterError(
"No matching sites specified.",
map)
site = map[0]
self.populate_default_attributes(site)
else:
raise KempTechApiException(get_error_msg(response))
def refresh(self):
response = self._get(
self._API_GET,
self._get_base_parameters())
if is_successful(response):
response = self._get(self._API_GET, self._get_base_parameters())
data = get_data(response)
maps = data["fqdn"].get(self.API_TAG, {})
if not isinstance(maps, list):
maps = [maps]
map = [m for m in maps if m['IPAddress'] == self.ipaddress]
# This shouldn't happen, but we should catch it anyway
if len(map) > 1:
raise LoadMasterParameterError(
"Multiple matching sites specified.",
map)
if len(map) < 1:
raise LoadMasterParameterError(
"No matching sites specified.",
map)
site = map[0]
self.populate_default_attributes(site)
else:
raise KempTechApiException(get_error_msg(response))
@property
def locations(self):
return {
"continent": self.continent,
"country": self.country,
"customlocation": self.customlocation
}
@staticmethod
def __get_map_parameters(location, is_continent=False, is_custom=False):
if is_custom is False:
parameters = {
"countrycode": location.upper()
}
if is_continent is True:
parameters["iscontinent"] = "yes"
else:
parameters["iscontinent"] = "no"
else:
parameters = {
"customlocation": location
}
return parameters
def __mod_location(self, location, is_continent=False, is_custom=False,
remove=False):
parameters = self.__get_map_parameters(location,
is_continent,
is_custom)
parameters.update(self._get_base_parameters())
if not remove:
url = "/addcountry"
else:
url = "/removecountry"
response = self._get(url, parameters)
if is_successful(response):
self.refresh()
else:
raise KempTechApiException(get_error_msg(response))
def set_locations(self, locations):
# Remove all existing locations
for location in self.continent or []:
self.remove_location(location['code'], True, False)
for location in self.country or []:
self.remove_location(location['code'], False, False)
for location in self.customlocation or []:
self.remove_location(location['name'], False, True)
# Add new set of locations
for location in locations.get("continent", []):
self.add_location(location['code'], True, False)
for location in locations.get("country", []):
self.add_location(location['code'], False, False)
for location in locations.get("customlocation", []):
self.add_location(location['name'], False, True)
self.refresh()
def add_location(self, location=None, is_continent=False, is_custom=False):
self.__mod_location(location,
is_continent,
is_custom,
remove=False)
def remove_location(self, location, is_continent=False, is_custom=False):
self.__mod_location(location,
is_continent,
is_custom,
remove=True)
def set_coordinates(self, latitude=None, longitude=None):
latitude = latitude or self.locationlatitude
longitude = longitude or self.locationlongitude
parameters = {
"lat": latitude,
"long": longitude
}
parameters.update(self._get_base_parameters())
url = "/changemaploc"
response = self._get(url, parameters)
if is_successful(response):
self.refresh()
else:
raise KempTechApiException(get_error_msg(response))
class Cluster(BaseKempObject):
_API_ADD = "/addcluster"
_API_MOD = "/modcluster"
_API_DELETE = "/delcluster"
_API_GET = "/showcluster"
_API_LIST = "/listclusters"
API_TAG = "cluster"
API_INIT_PARAMS = {
"ip": "IPAddress",
"name": "Name"
}
_API_BASE_PARAMS = {
"ip": "IPAddress",
"name": "Name"
}
_API_DEFAULT_ATTRIBUTES = {
"status": "Status",
"id": "Index",
"name": "Name",
"checker": "Checker",
"checkerport": "CheckerPort",
"type": "Type",
"enable": "Enable",
"locationlatitude": "LocationLatitude",
"locationlongitude": "LocationLongitude",
"clustervsaddress": "ClusterVSAddress"
}
def __init__(self, loadmaster_info, ip, name):
self.id = None
self.name = name
self.ip = ip
validate_ip(ip)
try:
self.endpoint = loadmaster_info["endpoint"]
except KeyError:
raise ClusterMissingLoadmasterInfo("endpoint")
try:
self.ip_address = loadmaster_info["ip_address"]
except KeyError:
raise ClusterMissingLoadmasterInfo("ip_address")
super(Cluster, self).__init__(loadmaster_info)
def __str__(self):
if self.id is None:
return 'Cluster {} at {} on LoadMaster {}'.format(
self.name, self.ip, self.ip_address)
else:
return 'Cluster #{} {} at {} on LoadMaster {}'.format(
self.id, self.name, self.ip, self.ip_address)
def save(self, update=False):
super(Cluster, self).save(update)
self.refresh()
def populate_default_attributes(self, dictionary):
super(Cluster, self).populate_default_attributes(dictionary)
# Clear checkerport if it's not in use
if hasattr(self, "checkerport") and self.checkerport == "0":
if self.checker != "tcp":
self.checkerport = None
else:
# PD-7338
self.checkerport = "80"
class Range(BaseKempObject):
_API_ADD = "/addip"
_API_MOD_LOC = "/modiploc"
_API_DEL_LOC = "/deliploc"
_API_ADD_CC = "/addipcountry"
_API_DEL_CC = "/removeipcountry"
_API_DELETE = "/delip"
_API_GET = "/showip"
_API_LIST = "/listips"
API_TAG = "IPAddress"
API_INIT_PARAMS = {
"ip": "IPAddress",
"mask": "Mask"
}
_API_BASE_PARAMS = [
"ip",
"mask"
]
_API_DEFAULT_ATTRIBUTES = {
"status": "Status",
"index": "Index",
"country": "Country",
"iscustom": "IsCustom",
"long": "Longitude",
"lat": "Latitude"
}
_API_IGNORE = (
"log_urls", "ip_address", "endpoint", "index", "status", "country",
"iscustom", "mask",
)
def __init__(self, loadmaster_info, ip, mask):
self.ip = ip
validate_ip(self.ip)
self.mask = int(mask)
if not 8 <= self.mask <= 32:
raise RangeMaskInvalid(mask)
self.lat = None
self.long = None
self.country = None
self.iscustom = None
try:
self.endpoint = loadmaster_info["endpoint"]
except KeyError:
raise RangeMissingLoadmasterInfo("endpoint")
try:
self.ip_address = loadmaster_info["ip_address"]
except KeyError:
raise RangeMissingLoadmasterInfo("ip_address")
super(Range, self).__init__(loadmaster_info)
def __str__(self):
return 'Range {}/{} on LoadMaster {}'.format(
self.ip, self.mask, self.ip_address)
@property
def latitude(self):
if self.lat is not None:
return self.lat / 3600
else:
return None
@latitude.setter
def latitude(self, value):
self.lat = value * 3600
@property
def longitude(self):
if self.lat is not None:
return self.long / 3600
else:
return None
@longitude.setter
def longitude(self, value):
self.long = value * 3600
def _get_base_parameters(self):
"""Returns the bare minimum FQDN parameters."""
return {
"ip": self.ip
}
def save(self, update=False):
if not update:
base_parameters = {
"ip": self.ip + "/" + str(self.mask)
}
response = self._get(self._API_ADD, base_parameters)
if not is_successful(response):
raise KempTechApiException(get_error_msg(response))
# We need to refresh here, creating the range does not return data
self.refresh()
# Set Coordinates
if self.lat is not None and self.long is not None:
response = self._get(self._API_MOD_LOC,
self.to_api_dict())
else:
response = self._get(self._API_DEL_LOC,
self._get_base_parameters())
if is_successful(response):
pass
else:
raise KempTechApiException(get_error_msg(response))
# Set Country
if self.iscustom is True:
key = "customloc"
else:
key = "countrycode"
parameters = {
key: self.country
}
if self.country is not None:
parameters.update(self._get_base_parameters())
response = self._get(self._API_ADD_CC, parameters)
else:
response = self._get(self._API_DEL_CC, self._get_base_parameters())
if is_successful(response):
range_data = get_data(response)
self.populate_default_attributes(range_data)
else:
raise KempTechApiException(get_error_msg(response))
def populate_default_attributes(self, dictionary):
super(Range, self).populate_default_attributes(dictionary)
if self.country == "-1":
self.country = None
if self.lat is not None:
self.lat = int(self.lat)
if self.long is not None:
self.long = int(self.long)
class CustomLocation(BaseKempObject):
def __init__(self, loadmaster_info, name):
self.name = name
self.old_name = name
try:
self.endpoint = loadmaster_info["endpoint"]
except KeyError:
raise RangeMissingLoadmasterInfo("endpoint")
try:
self.ip_address = loadmaster_info["ip_address"]
except KeyError:
raise RangeMissingLoadmasterInfo("ip_address")
super(CustomLocation, self).__init__(loadmaster_info)
def __str__(self):
return 'Custom Location {} on LoadMaster {}'.format(
self.name, self.ip_address)
def _get_base_parameters(self):
"""Returns the bare minimum FQDN parameters."""
return {
"clname": self.name,
"location": self.name
}
def save(self, update=False):
if not update:
response = self._get("/addcustomlocation",
self._get_base_parameters())
if not is_successful(response):
raise KempTechApiException(get_error_msg(response))
else:
parameters = {
"cloldname": self.old_name,
"clnewname": self.name
}
response = self._get("/editcustomlocation", parameters)
if is_successful(response):
# range_data = get_data(response)
self.old_name = self.name
# Unfinished. Need to implement populate_attributes
else:
raise KempTechApiException(get_error_msg(response))
def delete(self):
response = self._get("/deletecustomlocation",
self._get_base_parameters())
return send_response(response)
class CipherSet(BaseKempObject):
def __init__(self, loadmaster_info, cipherset_name, ciphers):
self.cipherset_name = cipherset_name
cipher_regex = re.compile("^([A-Z0-9-]*:*)*[^:]$")
if isinstance(ciphers, list):
self.ciphers = ":".join(ciphers)
elif isinstance(ciphers, str) and cipher_regex.match(ciphers):
self.ciphers = ciphers
else:
raise CipherListInvalid(ciphers)
try:
self.endpoint = loadmaster_info["endpoint"]
except KeyError:
raise CertificateMissingLoadmasterInfo("endpoint")
try:
self.ip_address = loadmaster_info["ip_address"]
except KeyError:
raise CertificateMissingLoadmasterInfo("ip_address")
super(CipherSet, self).__init__(loadmaster_info)
def __str__(self):
return 'Cipher List {}'.format(self.ciphers)
def _get_base_parameters(self):
"""Returns the bare minimum cipherset parameters"""
return {
"name": self.cipherset_name,
"value": self.ciphers
}
def save(self, update=False):
response = self._get('/modifycipherset',
parameters=self._get_base_parameters())
if is_successful(response):
pass
else:
raise KempTechApiException(get_error_msg(response))
def delete(self):
response = self._get("/delcipherset", self._get_base_parameters())
return send_response(response)
class Certificate(BaseKempObject):
def __init__(self, loadmaster_info, certname,
certfile=None, certpass=None):
self.certname = certname
# If certname is a structure, pull out the name and set the modulus
if isinstance(self.certname, dict):
self.modulus = self.certname['modulus']
self.certname = self.certname['name']
if certfile is not None:
self.certfile = certfile
if certpass is not None:
self.certpass = certpass
else:
self.certpass = None
try:
self.endpoint = loadmaster_info["endpoint"]
except KeyError:
raise CertificateMissingLoadmasterInfo("endpoint")
try:
self.ip_address = loadmaster_info["ip_address"]
except KeyError:
raise CertificateMissingLoadmasterInfo("ip_address")
super(Certificate, self).__init__(loadmaster_info)
def __str__(self):
return 'Certificate {}'.format(self.certname)
def _get_base_parameters(self):
"""Returns the bare minimum VS parameters. IP, port and protocol"""
if self.certpass is None:
return {
"cert": self.certname,
"replace": "0"
}
else:
return {
"cert": self.certname,
"replace": "0",
"password": self.certpass
}
def save(self, update=False):
response = self._post("/addcert", file=self.certfile,
parameters=self._get_base_parameters())
if is_successful(response):
pass
else:
raise KempTechApiException(get_error_msg(response))
def delete(self):
response = self._get("/delcert",
self._get_base_parameters())
return send_response(response)
def populate_default_attributes(self, dictionary):
super(Certificate, self).populate_default_attributes(dictionary)
# If certname is a structure, pull out the name and set the modulus
if isinstance(self.certname, dict):
self.modulus = self.certname['modulus']
self.certname = self.certname['name']
class IntermediateCertificate(BaseKempObject):
def __init__(self, loadmaster_info, certname, certfile=None):
self.certname = certname
# If certname is a structure, pull out the name and set the modulus
if isinstance(self.certname, dict):
self.modulus = self.certname['modulus']
self.certname = self.certname['name']
if certfile is not None:
self.certfile = certfile
try:
self.endpoint = loadmaster_info["endpoint"]
except KeyError:
raise CertificateMissingLoadmasterInfo("endpoint")
try:
self.ip_address = loadmaster_info["ip_address"]
except KeyError:
raise CertificateMissingLoadmasterInfo("ip_address")
super(IntermediateCertificate, self).__init__(loadmaster_info)
def __str__(self):
return 'Intermediate Certificate {}'.format(self.certname)
def _get_base_parameters(self):
"""Returns the bare minimum VS parameters. IP, port and protocol"""
return {
"cert": self.certname,
}
def save(self, update=False):
response = self._post("/addintermediate",
file=self.certfile,
parameters=self._get_base_parameters())
if is_successful(response):
pass
else:
raise KempTechApiException(get_error_msg(response))
def delete(self):
response = self._get("/delintermediate", self._get_base_parameters())
return send_response(response)
class Interface(BaseKempObject):
_API_MOD = "/modiface"
_API_GET = "/showiface"
_API_LIST = "/stats"
_API_ADD_ADDRESS = "/addadditional"
_API_DELETE_ADDRESS = "/deladditional"
API_TAG = "Interface"
_API_LIST_TAG = "Network"
API_INIT_PARAMS = {
"interface": "Id"
}
_API_BASE_PARAMS = {
"interface": "Id"
}
_API_DEFAULT_ATTRIBUTES = {
"interface": "Id",
"addr": "IPAddress",
"shared": "SharedIPAddress",
"partner": "PartnerIPAddress",
"hacheck": "hacheck",
"mtu": "Mtu",
"interfacetype": "InterfaceType",
"geotraffic": "GeoTrafficEnable",
"gwiface": "DefaultInterface",
"additionaladdresses": "AdditionalAddresses",
"adminwuienable": "AdminWuiEnable"
}
_API_IGNORE = (
"log_urls", "ip_address", "endpoint", "name"
"interfacetype", "additionaladdresses"
)
def __init__(self, loadmaster_info, interface, params=None):
""" Interface constructor
:param loadmaster_info: LoadMaster access_info
:param interface: LoadMaster interface ID.
"""
self.interface = interface
# Set addr and cidr to empty strings to allow the properties
# to work correctly when there are no IPs or CIDR set.
self.addr = ""
self.cidr = ""
self.shared = None
self.partner = None
self.populate_default_attributes(params)
super(Interface, self).__init__(loadmaster_info)
def __str__(self):
return 'Interface {} on LoadMaster {}'.format(
self.interface, self.ip_address)
@property
def address(self):
# self.addr can be None as it is not mandatory for an interface to have an address
return self.addr.split("/")[0] if self.addr is not None else None
@address.setter
def address(self, value):
self.addr = "{}/{}".format(value, self.cidr) if value is not None else None
@property
def cidr(self):
return self.addr.split("/")[1] if self.addr is not None else None
@cidr.setter
def cidr(self, value):
self.addr = "{}/{}".format(self.address, value) if value is not None else None
def save(self, update=True):
# pylint: disable=duplicate-code
# Duplicate code required due to the lacking nature of interfaces API
for key, value in self.to_api_dict().items():
parameters = {
"interface": self.interface,
key: value
}
try:
response = self._get(self._API_MOD, parameters)
except KempTechApiException as e:
if str(e) == "Nothing Modified":
pass
else:
raise
else:
self._is_successful_or_raise(response)
def stats(self):
try:
response = self._get( # pylint: disable=protected-access
self._API_LIST)
data = get_data(response)
xml_object = data.get(self._API_LIST_TAG, [])
except KempTechApiException:
xml_object = []
# If there is no API_TAG key, build will fail with a
# ValidationError, which is the best we can do for now
# (without changing the upstream code and raising an
# exception earlier, possibly retrying)
stats = {}
for interface_details in xml_object.values():
if interface_details['ifaceID'] == self.interface:
for k, v in interface_details.items():
stats[k.lower()] = v
return stats
def populate_default_attributes(self, params):
params = {} if params is None else params
super(Interface, self).populate_default_attributes(params)
# Strip additional addresses into a list
if not hasattr(self, "additionaladdresses"):
self.additionaladdresses = []
elif self.additionaladdresses is None:
self.additionaladdresses = []
elif isinstance(self.additionaladdresses, OrderedDict):
self.additionaladdresses = self.additionaladdresses['IPaddress']
self.additionaladdresses = cast_to_list(self.additionaladdresses)
if not hasattr(self, "geotraffic"):
self.geotraffic = None
elif self.geotraffic == "no":
self.geotraffic = "0"
elif self.geotraffic == "yes":
self.geotraffic = "1"
self._additionaladdresses = []
for address in self.additionaladdresses:
self._additionaladdresses.append(address)
def set_additionaladdresses(self):
new = self.additionaladdresses
old = self._additionaladdresses
for address in list(set(old) - set(new)):
self._delete_additionaladdress(address)
for address in list(set(new) - set(old)):
self._add_additionaladdress(address)
self.refresh()
def _add_additionaladdress(self, address):
parameters = {
"interface": self.interface,
"addr": address
}
response = self._get( # pylint: disable=protected-access
self._API_ADD_ADDRESS,
parameters)
if not is_successful(response):
raise KempTechApiException(get_error_msg(response))
def _delete_additionaladdress(self, address):
parameters = {
"interface": self.interface,
"addr": address
}
response = self._get( # pylint: disable=protected-access
self._API_DELETE_ADDRESS,
parameters)
if not is_successful(response):
raise KempTechApiException(get_error_msg(response))
class License(BaseKempObject):
_API_DEFAULT_ATTRIBUTES = {
"uuid": "uuid",
"activationdate": "activationdate",
"licenseduntil": "licenseduntil",
"supportuntil": "supportuntil",
"supportlevel": "supportlevel",
"licensetype": "licensetype",
"licensestatus": "licensestatus",
"appliancemodel": "appliancemodel",
"freelicense": "freelicense",
}
def __init__(self, loadmaster_info, params=None):
""" License constructor
:param loadmaster_info: LoadMaster access_info
"""
self.subscriptions = []
self.populate_default_attributes(params)
super(License, self).__init__(loadmaster_info)
def populate_default_attributes(self, params):
params = {} if params is None else params
super(License, self).populate_default_attributes(params)
def save(self, update=False):
pass
def __str__(self):
return 'License type: {}'.format(self.licensetype)
class Subscription(BaseKempObject):
_API_DEFAULT_ATTRIBUTES = {
"name": "Name",
"expires": "Expires",
"featurelist": "FeatureList",
}
def __init__(self, loadmaster_info, params=None):
"""Subscription constructor
:param loadmaster_info: LoadMaster access_info
"""
self.populate_default_attributes(params)
super(Subscription, self).__init__(loadmaster_info)
def populate_default_attributes(self, params):
params = {} if params is None else params
super(Subscription, self).populate_default_attributes(params)
def save(self, update=False):
pass
def __str__(self):
return 'Subscription type: {}'.format(self.name)
| apache-2.0 | -1,186,749,965,522,894,800 | 31.720312 | 90 | 0.566694 | false |
sug4rok/Servus | Servus/plugins/arduino_yl83/widget.py | 1 | 2253 | # coding=utf-8
from plugins.utils import get_used_plugins_by, get_latest_sensor_value
from climate.models import RaindropValue
def rain_level(sensor, number):
"""
Функция, определяющая 4 уровня осадков.
Необходимо подстройка с учетом расположения, исполнения, угола наклона и пр. датчика дождя.
Поэтому для каждого датчика записывается его исторический минимум и максимум.
За начало осадков принято max_value - 10.
:param sensor: plugins.models.SensorYL83 Датчик дождя
:param number: int Число в идеальных условиях от 0 до 1023 - результат работы датчик дождя, где
0 - полное погружение в воду, 1023 - абсолютно сухая поверхность.
:returns: int Число от 0 до 3 - четыре уровня осадков, где 0 - осадков нет,
3 - ливень.
"""
min_val = sensor.min_value
max_val = sensor.max_value
middle_val = (max_val - min_val) / 2 + min_val
shower_val = (max_val - min_val) / 5 + min_val
if number <= shower_val:
return 3
elif shower_val < number <= middle_val:
return 2
elif middle_val < number <= max_val - 10:
return 1
else:
return 0
def get_widget_data(plan_id):
"""
Функция, предоставляющая данные с датчиков дождя YL-83
:returns: list Список кортежей с данными о дожде и координатами расположения
виджетов.
"""
sensors = get_used_plugins_by(package='plugins.arduino_yl83')
sensors = [s for s in sensors if s.plan_image_id == plan_id]
values = [get_latest_sensor_value(RaindropValue, sensor) for sensor in sensors]
return [(plan_id, v.content_object.name, v.content_object.horiz_position,
v.content_object.vert_position, v.content_object.level,
rain_level(v.content_object, v.raindrop)) for v in values if v is not None]
| mit | 283,418,469,228,938,800 | 34.82 | 99 | 0.680067 | false |
Jindam/HPCGISLab | setup.py | 2 | 1970 | """
Copyright (c) 2014 High-Performance Computing and GIS (HPCGIS) Laboratory. All rights reserved.
Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
Authors and contributors: Eric Shook ([email protected]); Zhengliang Feng ([email protected], [email protected])
"""
from setuptools import setup
from pip.req import parse_requirements
# parse_requirements() returns generator of pip.req.InstallRequirement objects
install_reqs = parse_requirements("./requirements.txt")
# reqs is a list of requirement
reqs = [str(ir.req) for ir in install_reqs]
if __name__ == '__main__':
# https://docs.python.org/2/distutils/setupscript.html#additional-meta-data
setup(
name='pcml',
version='0.1',
description='The parallel cartographic modeling language (PCML) provides spatial operations while hiding the implementation complexities of parallelism.',
url='https://github.com/HPCGISLab/pcml',
author='Eric Shook',
author_email='[email protected]',
license='Copyright (c) 2014 High-Performance Computing and GIS (HPCGIS) Laboratory. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.',
long_description=
"""
PCML
====
The parallel cartographic modeling language (PCML) is a multi-institutional
collaborative project aiming to create a computing language for
cyberGIScientists that is designed for (1) usability, (2) programmability, and
(3) scalability. PCML provides multi-core parallel processing for spatial
operations while hiding the implementation complexities of parallelism.
**Author**
Eric Shook <[email protected]>
**Contributors**
* Zhengliang Feng ([email protected], [email protected])
""",
#platform=[''],
install_requires = reqs,
packages=['pcml', 'pcml.core', 'pcml.lib', 'pcml.util'],
test_suite = 'tests',
)
| bsd-3-clause | 3,017,802,232,708,107,300 | 40.041667 | 214 | 0.711168 | false |
ilanschnell/ironpkg | enstaller/utils.py | 1 | 3500 | import sys
import hashlib
import urlparse
import urllib2
from os.path import abspath, expanduser
from egginst.utils import human_bytes, rm_rf
from enstaller import __version__
from enstaller.verlib import NormalizedVersion, IrrationalVersionError
def abs_expanduser(path):
return abspath(expanduser(path))
def canonical(s):
"""
Return the canonical representations of a project name.
"""
return s.lower()
def cname_fn(fn):
return canonical(fn.split('-')[0])
def comparable_version(version):
"""
Given a version string (e.g. '1.3.0.dev234'), return an object which
allows correct comparison. For example:
comparable_version('1.3.10') > comparable_version('1.3.8') # True
whereas:
'1.3.10' > '1.3.8' # False
"""
try:
# This hack makes it possible to use 'rc' in the version, where
# 'rc' must be followed by a single digit.
ver = version.replace('rc', '.dev99999')
return NormalizedVersion(ver)
except IrrationalVersionError:
# If obtaining the RationalVersion object fails (for example for
# the version '2009j'), simply return the string, such that
# a string comparison can be made.
return version
def md5_file(path):
"""
Returns the md5sum of the file (located at `path`) as a hexadecimal
string of length 32.
"""
fi = open(path, 'rb')
h = hashlib.new('md5')
while True:
chunk = fi.read(65536)
if not chunk:
break
h.update(chunk)
fi.close()
return h.hexdigest()
def open_url(url):
"""
Open a urllib2 request, handling HTTP authentication
"""
scheme, netloc, path, params, query, frag = urlparse.urlparse(url)
assert not query
auth, host = urllib2.splituser(netloc)
request = urllib2.Request(url)
request.add_header('User-Agent', 'IronPkg/%s' % __version__)
return urllib2.urlopen(request)
def write_data_from_url(fo, url, md5=None, size=None):
"""
Read data from the url and write to the file handle fo, which must be
open for writing. Optionally check the MD5. When the size in bytes
is provided, a progress bar is displayed using the download/copy.
"""
if size:
sys.stdout.write('%9s [' % human_bytes(size))
sys.stdout.flush()
n = cur = 0
if url.startswith('file://'):
path = url[7:]
fi = open(path, 'rb')
elif url.startswith('http://'):
try:
fi = open_url(url)
except urllib2.URLError, e:
raise urllib2.URLError("\n%s\nCannot open URL:\n %s" % (e, url))
else:
raise Exception("Invalid url: %r" % url)
h = hashlib.new('md5')
if size and size < 131072:
buffsize = 256
else:
buffsize = 16384
while True:
chunk = fi.read(buffsize)
if not chunk:
break
fo.write(chunk)
if md5:
h.update(chunk)
if not size:
continue
n += len(chunk)
if float(n) / size * 64 >= cur:
sys.stdout.write('.')
sys.stdout.flush()
cur += 1
if size:
sys.stdout.write(']\n')
sys.stdout.flush()
fi.close()
if md5 and h.hexdigest() != md5:
sys.stderr.write("FATAL ERROR: Data received from\n\n"
" %s\n\n"
"is corrupted. MD5 sums mismatch.\n" % url)
fo.close()
sys.exit(1)
| bsd-3-clause | -8,809,423,909,564,426,000 | 25.717557 | 79 | 0.584 | false |
OSSESAC/odoopubarquiluz | addons/account/account_cash_statement.py | 1 | 21372 | # encoding: utf-8
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2008 PC Solutions (<http://pcsol.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class account_cashbox_line(osv.osv):
""" Cash Box Details """
_name = 'account.cashbox.line'
_description = 'CashBox Line'
_rec_name = 'pieces'
def _sub_total(self, cr, uid, ids, name, arg, context=None):
""" Calculates Sub total
@param name: Names of fields.
@param arg: User defined arguments
@return: Dictionary of values.
"""
res = {}
for obj in self.browse(cr, uid, ids, context=context):
res[obj.id] = {
'subtotal_opening' : obj.pieces * obj.number_opening,
'subtotal_closing' : obj.pieces * obj.number_closing,
}
return res
def on_change_sub_opening(self, cr, uid, ids, pieces, number, *a):
""" Compute the subtotal for the opening """
return {'value' : {'subtotal_opening' : (pieces * number) or 0.0 }}
def on_change_sub_closing(self, cr, uid, ids, pieces, number, *a):
""" Compute the subtotal for the closing """
return {'value' : {'subtotal_closing' : (pieces * number) or 0.0 }}
_columns = {
'pieces': fields.float('Unit of Currency', digits_compute=dp.get_precision('Account')),
'number_opening' : fields.integer('Number of Units', help='Opening Unit Numbers'),
'number_closing' : fields.integer('Number of Units', help='Closing Unit Numbers'),
'subtotal_opening': fields.function(_sub_total, string='Opening Subtotal', type='float', digits_compute=dp.get_precision('Account'), multi='subtotal'),
'subtotal_closing': fields.function(_sub_total, string='Closing Subtotal', type='float', digits_compute=dp.get_precision('Account'), multi='subtotal'),
'bank_statement_id' : fields.many2one('account.bank.statement', ondelete='cascade'),
}
account_cashbox_line()
class account_cash_statement(osv.osv):
_inherit = 'account.bank.statement'
def _update_balances(self, cr, uid, ids, context=None):
"""
Set starting and ending balances according to pieces count
"""
res = {}
for statement in self.browse(cr, uid, ids, context=context):
if (statement.journal_id.type not in ('cash',)) or (not statement.journal_id.cash_control):
continue
start = end = 0
for line in statement.details_ids:
start += line.subtotal_opening
end += line.subtotal_closing
data = {
'balance_start': start,
'balance_end_real': end,
}
res[statement.id] = data
super(account_cash_statement, self).write(cr, uid, [statement.id], data, context=context)
return res
def _get_sum_entry_encoding(self, cr, uid, ids, name, arg, context=None):
""" Find encoding total of statements "
@param name: Names of fields.
@param arg: User defined arguments
@return: Dictionary of values.
"""
res = {}
for statement in self.browse(cr, uid, ids, context=context):
res[statement.id] = sum((line.amount for line in statement.line_ids), 0.0)
return res
def _get_company(self, cr, uid, context=None):
user_pool = self.pool.get('res.users')
company_pool = self.pool.get('res.company')
user = user_pool.browse(cr, uid, uid, context=context)
company_id = user.company_id
if not company_id:
company_id = company_pool.search(cr, uid, [])
return company_id and company_id[0] or False
def _get_statement_from_line(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('account.bank.statement.line').browse(cr, uid, ids, context=context):
result[line.statement_id.id] = True
return result.keys()
def _compute_difference(self, cr, uid, ids, fieldnames, args, context=None):
result = dict.fromkeys(ids, 0.0)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = obj.balance_end_real - obj.balance_end
return result
def _compute_last_closing_balance(self, cr, uid, ids, fieldnames, args, context=None):
result = {}
for stmt in self.browse(cr, uid, ids, context=context):
result[stmt.id] = 0.0
if stmt.previus_id:
result[stmt.id] = stmt.previus_id.balance_end_real
return result
def onchange_journal_id(self, cr, uid, ids, journal_id, line_ids=[], balance_end_real=0.0, opening_details_ids=[], context=None):
result = super(account_cash_statement, self).onchange_journal_id(cr, uid, ids, journal_id)
if not journal_id:
return result
result['value']['cash_control'] = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context).cash_control
statement_ids = self.search(cr, uid,
[('journal_id', '=', journal_id),('state', '=', 'confirm'),'|',('next_id','=',False),('next_id','=',journal_id)],
order='date desc, id desc',
limit=1,
context=context)
result['value']['previus_id'] = statement_ids and statement_ids[0] or False
result['value'].update(self.onchange_previus_id(cr, uid, ids, result['value']['previus_id'], journal_id, opening_details_ids, context=context)['value'])
result['value'].update(self.onchange_details(cr, uid, ids, result['value']['opening_details_ids'], line_ids, result['value']['cash_control'], result['value']['last_closing_balance'], balance_end_real, context=context)['value'])
return result
def onchange_previus_id(self, cr, uid, ids, previus_id, journal_id, opening_details_ids, context=None):
result = {'value':{}}
if not journal_id:
return result
result['value']['opening_details_ids'] = []
for detail in opening_details_ids:
if detail[0] in [1,4]:
result['value']['opening_details_ids'].append((2, detail[1], detail[2]))
elif detail[0] == 2:
result['value']['opening_details_ids'].append(detail)
result['value']['last_closing_balance'] = 0.0
if not previus_id:
for value in self.pool.get('account.journal').browse(cr, uid, journal_id, context=context).cashbox_line_ids:
result['value']['opening_details_ids'].append([0, False, {'pieces':value.pieces, 'number_opening':0, 'subtotal_opening':0.0, 'number_closing':0, 'subtotal_closing':0.0}])
else:
st = self.browse(cr, uid, previus_id, context=context)
for value in st.details_ids:
result['value']['opening_details_ids'].append([0, False, {'pieces':value.pieces, 'number_opening':value.number_closing, 'subtotal_opening':value.number_closing*value.pieces, 'number_closing':0, 'subtotal_closing':0.0}])
result['value']['last_closing_balance'] = st.balance_end_real
return result
def onchange_details(self, cr, uid, ids, details_ids, line_ids, cash_control, balance_start, balance_end_real, context=None):
res = {'value':{'total_entry_encoding':0.0,'balance_start':0.0,'balance_end':0.0,'balance_end_real':0.0,}}
cashbox_line_obj = self.pool.get('account.cashbox.line')
stmt_line_obj = self.pool.get('account.bank.statement.line')
for action, line_id, data in line_ids:
amount = 0.0
if action != 0:
stmt_line = stmt_line_obj.browse(cr, uid, line_id, context=context)
amount = stmt_line.amount
amount = data and data.get('amount') or amount
if action in (1, 4, 0):
#1,4:Modified 0:inserted
res['value']['total_entry_encoding'] += amount
# elif action == 2:
# #deleted
res['value']['balance_end'] = res['value']['total_entry_encoding']
if not cash_control:
res['value']['balance_start'] += balance_start
res['value']['balance_end_real'] += balance_end_real
res['value']['balance_end'] += balance_start
else:
for action, line_id, data in details_ids:
pieces = number_opening = number_closing = 0.0
if action != 0:
cashbox_line = cashbox_line_obj.browse(cr, uid, line_id, context=context)
pieces = cashbox_line.pieces
number_opening = cashbox_line.number_opening
number_closing = cashbox_line.number_closing
pieces = data and data.get('pieces') or pieces
number_opening = data and data.get('number_opening') or number_opening
number_closing = data and data.get('number_closing') or number_closing
if action in (1, 4, 0):
#1,4:Modified 0:inserted
res['value']['balance_start'] += pieces * number_opening
res['value']['balance_end_real'] += pieces * number_closing
res['value']['balance_end'] += pieces * number_opening
# elif action == 2:
# #deleted
res['value']['difference'] = res['value']['balance_end_real'] - res['value']['balance_end']
return res
def _next_id(self, cr, uid, ids, name, arg, context=None):
res=dict.fromkeys(ids, False)
for stmt in self.browse(cr, uid, ids, context=context):
for next in stmt.next_ids:
if next.state == 'cancel':
continue
res[stmt.id] = next.id
break
return res
_columns = {
'total_entry_encoding': fields.function(_get_sum_entry_encoding, string="Total Transactions",
store = {
'account.bank.statement': (lambda self, cr, uid, ids, context=None: ids, ['line_ids','move_line_ids'], 10),
'account.bank.statement.line': (_get_statement_from_line, ['amount'], 10),
}),
'closing_date': fields.datetime("Closed On"),
'details_ids' : fields.one2many('account.cashbox.line', 'bank_statement_id', string='CashBox Lines'),
'opening_details_ids' : fields.one2many('account.cashbox.line', 'bank_statement_id', string='Opening Cashbox Lines'),
'closing_details_ids' : fields.one2many('account.cashbox.line', 'bank_statement_id', string='Closing Cashbox Lines'),
'user_id': fields.many2one('res.users', 'Responsible', required=False),
'difference' : fields.function(_compute_difference, method=True, string="Difference", type="float"),
'last_closing_balance' : fields.function(_compute_last_closing_balance, method=True, string='Last Closing Balance', type='float'),
'cash_control': fields.related('journal_id','cash_control', string="Cash Control", type="boolean", readonly=True),
'previus_id': fields.many2one('account.bank.statement', string='Previus Statement',
readonly=True, states={'draft':[('readonly',False)]}),
'next_ids': fields.one2many('account.bank.statement','previus_id',string='Next Statements', readonly=True),
'next_id' : fields.function(_next_id,type="many2one",relation='account.bank.statement', string='Next Statement', readonly=True,
store={'account.bank.statement': (lambda s, cr, uid, ids, c={}:[st.previus_id.id for st in s.pool.get('account.bank.statement').browse(cr,uid,ids,context=c)], ['previus_id'], 20),}),
}
_defaults = {
'state': 'draft',
'date': lambda self, cr, uid, context={}: context.get('date', time.strftime("%Y-%m-%d %H:%M:%S")),
'user_id': lambda self, cr, uid, context=None: uid,
}
def create(self, cr, uid, vals, context=None):
journal = False
if vals.get('journal_id'):
journal = self.pool.get('account.journal').browse(cr, uid, vals['journal_id'], context=context)
if journal and (journal.type == 'cash') and not vals.get('details_ids'):
vals['details_ids'] = []
if vals.get('previus_id'):
stmt = self.pool.get('account.bank.statement').browse(cr, uid, vals['previus_id'], context=context)
if stmt.next_id:
raise osv.except_osv(_('User Error!'), (_('You do not select a previus statement (%s) used by other statement (%s)') % (stmt.previus_id.name,stmt.previus_id.next_id.name, )))
vals['balance_start'] = stmt.balance_end_real
res_id = super(account_cash_statement, self).create(cr, uid, vals, context=context)
self._update_balances(cr, uid, [res_id], context)
return res_id
def write(self, cr, uid, ids, vals, context=None):
"""
Update redord(s) comes in {ids}, with new value comes as {vals}
return True on success, False otherwise
@param cr: cursor to database
@param user: id of current user
@param ids: list of record ids to be update
@param vals: dict of new values to be set
@param context: context arguments, like lang, time zone
@return: True on success, False otherwise
"""
if vals.get('previus_id'):
stmt = self.pool.get('account.bank.statement').browse(cr, uid, vals['previus_id'], context=context)
if stmt.next_id:
raise osv.except_osv(_('User Error!'), (_('You do not select a previus statement (%s) used by other statement (%s)') % (stmt.previus_id.name,stmt.previus_id.next_id.name, )))
res = super(account_cash_statement, self).write(cr, uid, ids, vals, context=context)
self._update_balances(cr, uid, ids, context)
return res
def _user_allow(self, cr, uid, statement_id, context=None):
return True
def button_previus_id(self, cr, uid, ids, context=None):
for stmt in self.browse(cr, uid, ids, context=context):
if not stmt.previus_id:
continue
self.pool.get('account.cashbox.line').unlink(cr, uid, [d.id for d in stmt.details_ids], context=context)
self.write(cr, uid, [stmt.id], {'balance_start': stmt.previus_id.balance_end_real,
'details_ids': [(0,False,{'pieces':d.pieces, 'number_opening':d.number_closing, 'subtotal_opening':d.number_closing*d.pieces, 'number_closing':0, 'subtotal_closing':0.0}) for d in stmt.previus_id.details_ids]},
context=context)
return True
def button_open(self, cr, uid, ids, context=None):
""" Changes statement state to Running.
@return: True
"""
obj_seq = self.pool.get('ir.sequence')
if context is None:
context = {}
statement_pool = self.pool.get('account.bank.statement')
for statement in statement_pool.browse(cr, uid, ids, context=context):
vals = {}
if not self._user_allow(cr, uid, statement.id, context=context):
raise osv.except_osv(_('Error!'), (_('You do not have rights to open this %s journal!') % (statement.journal_id.name, )))
if statement.previus_id and statement.previus_id.next_id and statement.previus_id.next_id.id != statement.id:
raise osv.except_osv(_('User Error!'), (_('You do not select a previus statement (%s) used by other statement (%s)') % (statement.previus_id.name,statement.previus_id.next_id.name, )))
if statement.name and statement.name == '/':
c = {'fiscalyear_id': statement.period_id.fiscalyear_id.id}
if statement.journal_id.sequence_id:
st_number = obj_seq.next_by_id(cr, uid, statement.journal_id.sequence_id.id, context=c)
else:
st_number = obj_seq.next_by_code(cr, uid, 'account.cash.statement', context=c)
vals.update({
'name': st_number
})
vals.update({
'state': 'open',
})
self.write(cr, uid, [statement.id], vals, context=context)
return True
def button_cancel(self, cr, uid, ids, context=None):
for stmt in self.browse(cr, uid, ids, context=context):
if stmt.next_id and stmt.next_id.state != 'draft':
raise osv.except_osv(_('User Error!'),
_('The next cash statement (%s) must be in draft state') % (stmt.next_id.name,))
return super(account_cash_statement,self).button_cancel(cr, uid, ids, context=context)
def statement_close(self, cr, uid, ids, journal_type='bank', context=None):
if journal_type == 'bank':
return super(account_cash_statement, self).statement_close(cr, uid, ids, journal_type, context)
vals = {
'state':'confirm',
'closing_date': time.strftime("%Y-%m-%d %H:%M:%S")
}
return self.write(cr, uid, ids, vals, context=context)
def check_status_condition(self, cr, uid, state, journal_type='bank'):
if journal_type == 'bank':
return super(account_cash_statement, self).check_status_condition(cr, uid, state, journal_type)
return state=='open'
def button_confirm_cash(self, cr, uid, ids, context=None):
absl_proxy = self.pool.get('account.bank.statement.line')
TABLES = ((_('Profit'), 'profit_account_id'), (_('Loss'), 'loss_account_id'),)
for obj in self.browse(cr, uid, ids, context=context):
if obj.previus_id:
if obj.previus_id.state != 'confirm':
raise osv.except_osv(_('User Error!'),
_('The previus cash statement (%s) must be in confirm state') % (obj.previus_id.name,))
if obj.previus_id.balance_end_real != obj.balance_start:
raise osv.except_osv(_('User Error!'),
_('The start balance (%s) must be equal to balance end real (%s) of previus cash statement (%s)') % (obj.balance_start,obj.previus_id.balance_end_real,obj.previus_id.name))
if obj.difference == 0.0:
continue
for item_label, item_account in TABLES:
if not getattr(obj.journal_id, item_account):
raise osv.except_osv(_('Error!'),
_('There is no %s Account on the journal %s.') % (item_label, obj.journal_id.name,))
is_profit = obj.difference < 0.0
account = getattr(obj.journal_id, TABLES[is_profit][1])
values = {
'statement_id' : obj.id,
'journal_id' : obj.journal_id.id,
'account_id' : account.id,
'amount' : obj.difference,
'name' : _('Exceptional %s') % TABLES[is_profit][0],
}
absl_proxy.create(cr, uid, values, context=context)
super(account_cash_statement, self).button_confirm_bank(cr, uid, ids, context=context)
return self.write(cr, uid, ids, {'closing_date': time.strftime("%Y-%m-%d %H:%M:%S")}, context=context)
account_cash_statement()
class account_journal(osv.osv):
_inherit = 'account.journal'
def _default_cashbox_line_ids(self, cr, uid, context=None):
# Return a list of coins in Euros.
result = [
dict(pieces=value) for value in [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500]
]
return result
_columns = {
'cashbox_line_ids' : fields.one2many('account.journal.cashbox.line', 'journal_id', 'CashBox'),
}
_defaults = {
'cashbox_line_ids' : _default_cashbox_line_ids,
}
account_journal()
class account_journal_cashbox_line(osv.osv):
_name = 'account.journal.cashbox.line'
_rec_name = 'pieces'
_columns = {
'pieces': fields.float('Values', digits_compute=dp.get_precision('Account')),
'journal_id' : fields.many2one('account.journal', 'Journal', required=True, select=1, ondelete="cascade"),
}
_order = 'pieces asc'
account_journal_cashbox_line()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,085,056,484,625,786,400 | 50.251799 | 254 | 0.586234 | false |
scztt/sc-debug | windows/distrowin.py | 2 | 8830 | # distrowin.py
# script to generate SuperCollider WIX (windows installer xml) source file from template
# Copyright (c) 2008 Dan Stowell. All rights reserved.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
#
# REQUIREMENTS:
# (1) You must have installed the "WiX toolset" and added its "bin" folder to your PATH
# (2) You must have run the Visual Studio compilation process to create Psycollider stuff in the "build" folder
# (3) This script file, and the wix template, must be in the "windows" folder in SuperCollider3 svn tree (sibling to the "build" folder)
# (4) I think you also need to put FFTW and libsndfile DLLs into the "build" folder
import os, glob, uuid, re, sys, shutil, zipfile
########################################
# Check for SwingOSC, because we definitely want it included :)
for detectpath in ('../build/SwingOSC.jar', '../build/SCClassLibrary/SwingOSC', '../build/Help/SwingOSC'):
if not os.path.exists(detectpath):
print("ERROR:\n Path %s not detected.\n It's required for bundling SwingOSC into the distro." % detectpath)
sys.exit(1)
########################################
# Run the "py2exe" procedure to build an exe file
os.system('cd ../Psycollider/Psycollider && python setup.py py2exe')
for detectpath in ('../Psycollider/Psycollider/dist/Psycollider.exe', '../Psycollider/Psycollider/dist/w9xpopen.exe'):
if not os.path.exists(detectpath):
print("ERROR:\n Path %s not detected.\n Generating executable (using py2exe) probably failed." % detectpath)
sys.exit(1)
# Also copy PySCLang.pyd out of its "site-packages" location
shutil.copy(os.getenv('PYTHONPATH', sys.exec_prefix) + '/Lib/site-packages/PySCLang.pyd', '../Psycollider/Psycollider/dist/')
# and a dll we need
shutil.copy(os.getenv('PYTHONPATH', sys.exec_prefix) + '/Lib/site-packages/wx-2.8-msw-unicode/wx/gdiplus.dll', '../Psycollider/Psycollider/dist/')
########################################
# Now we start to build up the XML content for WiX
xmlstr1 = "" # directory tree
xmlstr2 = "" # "feature" contents
regex1 = re.compile('[^a-zA-Z0-9.]')
def pathToId(path):
global regex1
id = regex1.sub('_', path.replace('../build/', ''))
return 's'+id[max(len(id)-64, 0):] # Prepending 's' a bit of a hack to ensure never begins with '3', '_' etc
def pathToGuid(path):
return str(uuid.uuid3(uuid.NAMESPACE_DNS, 'supercollider.sourceforge.net/' + path))
# This recursively scans a directory and builds up the requisite XML for installing the relevant files.
def scanDirForWix(path, fileexts, nestlev):
global xmlstr1, xmlstr2
dircontents = os.listdir(path)
for item in dircontents:
fullerpath = path + '/' + item
if os.path.isdir(fullerpath) and item[0] != '.' and item!='osx' and item!='linux': # the '.' is to exclude .svn
# print fullerpath
xmlstr1 = xmlstr1 + ' '*nestlev + '<Directory Id="%s" Name="%s">\n' % (pathToId(fullerpath), item)
# Recurse:
scanDirForWix(fullerpath, fileexts, nestlev+1)
xmlstr1 = xmlstr1 + ' '*nestlev + '</Directory>\n'
elif os.path.isfile(fullerpath) and not os.path.islink(fullerpath):
for fileext in fileexts:
if item.lower().endswith(fileexts): #and item matches a certain range of file extensions:
# print fullerpath + " --- FILE"
compId = pathToId(fullerpath)
xmlstr1 = xmlstr1 + ' '*nestlev + '<Component Id="%s" Guid="%s">\n' % (compId, pathToGuid(fullerpath))
xmlstr1 = xmlstr1 + ' '*nestlev + ' <File Id="%s" Name="%s" Source="%s" DiskId="1"/>\n' % \
(compId + '.file', item, fullerpath)
xmlstr1 = xmlstr1 + ' '*nestlev + '</Component>\n'
xmlstr2 = xmlstr2 + ' <ComponentRef Id="%s" />\n' % compId
break
#else:
# print 'Ignored %s\n' % fullerpath
# Now we do all the different scans we want
xmlstr1 = xmlstr1 + '<DirectoryRef Id="SCpluginsFolder">\n'
xmlstr2 = xmlstr2 + '<Feature Id="CorePluginsFeature" Title="Server plugins" Description="Core set of SC3 plugins" Level="1">\n'
scanDirForWix('../build/plugins', ('.scx'), 0)
xmlstr2 = xmlstr2 + '</Feature>\n'
xmlstr1 = xmlstr1 + '</DirectoryRef>\n\n'
xmlstr1 = xmlstr1 + '<DirectoryRef Id="SCHelpFolder">\n'
xmlstr2 = xmlstr2 + '<Feature Id="HelpFilesFeature" Title="Help files" Description="SC3 help documentation" Level="1">\n'
scanDirForWix('../build/Help', ('.html', '.htm', '.rtf', '.rtfd', '.jpg', '.png', '.gif', '.scd'), 0)
xmlstr2 = xmlstr2 + '</Feature>\n'
xmlstr1 = xmlstr1 + '</DirectoryRef>\n\n'
includeExtras = False # whether or not to expect build/sc3-plugins and to bundle it into the extensions folder
xmlstr1 = xmlstr1 + '<DirectoryRef Id="SCextensionsFolder">\n'
if includeExtras:
xmlstr1 = xmlstr1 + ' <Component Id="SCextensions" Guid="35AF303A-C836-11DD-84E5-084C56D89593">\n'
xmlstr1 = xmlstr1 + ' </Component>\n'
xmlstr1 = xmlstr1 + '<Directory Id="sc3plugins" Name="sc3-plugins">\n'
xmlstr2 = xmlstr2 + '<Feature Id="Sc3PluginsFeature" Title="Community sc3-plugins pack" Description="Third-party plugins pack sc3-plugins" Level="1">\n'
scanDirForWix('../build/sc3-plugins', ('.html', '.htm', '.rtf', '.rtfd', '.jpg', '.png', '.gif', '.scd', '.scx', '.sc'), 0)
xmlstr2 = xmlstr2 + '</Feature>\n'
xmlstr1 = xmlstr1 + '</Directory>\n\n'
else:
xmlstr1 = xmlstr1 + ' <Component Id="SCextensions" Guid="35AF303A-C836-11DD-84E5-084C56D89593">\n'
xmlstr1 = xmlstr1 + ' <CreateFolder/>\n' # This is how to create an empty folder in wix
xmlstr1 = xmlstr1 + ' </Component>\n'
xmlstr1 = xmlstr1 + '</DirectoryRef>\n\n'
xmlstr1 = xmlstr1 + '<DirectoryRef Id="SCsoundsFolder">\n'
xmlstr2 = xmlstr2 + '<Feature Id="SoundFilesFeature" Title="Sound files" Description="Some audio files" Level="1">\n'
scanDirForWix("../build/sounds", (".aiff", ".wav", ".aif"), 0)
xmlstr2 = xmlstr2 + '</Feature>\n'
xmlstr1 = xmlstr1 + '</DirectoryRef>\n\n'
xmlstr1 = xmlstr1 + '<DirectoryRef Id="SCClassLibrary">\n'
xmlstr2 = xmlstr2 + '<Feature Id="SCClassLibraryFeature" Title="SC3 class files" Description="The classes which define the SuperCollider language" Level="1">\n'
scanDirForWix("../build/SCClassLibrary", (".sc"), 0)
xmlstr2 = xmlstr2 + '</Feature>\n'
xmlstr1 = xmlstr1 + '</DirectoryRef>\n\n'
# WORKAROUND FOR M$ BUG:
# Windows installer is supposed to be able to handle massive numbers of files, but actually it fucks up if a <Feature> contains more than around 1000 files.
# See http://www.add-in-express.com/creating-addins-blog/2007/11/12/windows-installer-error-2908/
# Because of this, we need to artificially split the helpfiles feature in two.
xmlstr2b = xmlstr2.split('<ComponentRef Id="Help_Style_Guide', 1)
if not len(xmlstr2b) == 2:
print "Warning, unable to break up the XML string as expected."
else:
xmlstr2 = xmlstr2b[0] + '</Feature>\n<Feature Id="HelpFilesFeaturePT2" Title="Help files, part 2" Description="SC3 help documentation" Level="1">\n<ComponentRef Id="Help_Style_Guide' + xmlstr2b[1]
# OK, now we have the XML fragments, we want to substitute them into the XML template file
template = ''
templatef = open('sc3-win-installer-template.wxs', 'r')
for line in templatef:
template = template + line
templatef.close()
template = template.split('<!-- SUBST:SPLITHERE -->');
f = open('supercollider-installer.wxs', 'w')
f.write(template[0])
f.write(xmlstr1)
f.write(template[1])
f.write(xmlstr2)
f.write(template[2])
f.close()
print "\ndistrowin.py: done generating WiX file\n"
print "Calling WiX compile/link steps...\n"
os.system('candle supercollider-installer.wxs')
os.system('light -ext WixUIExtension -cultures:en-us supercollider-installer.wixobj')
print "\ndistrowin.py: done building MSI\n"
print "\ndistrowin.py: now to bundle a zip file\n"
z = zipfile.ZipFile('supercollider-installer.zip', 'w', zipfile.ZIP_DEFLATED)
z.write('supercollider-installer.msi')
z.write('INSTALL.txt')
z.write('../COPYING', 'COPYING.txt')
z.write('copyright_info_forwinbundle.txt', 'copyright info.txt')
z.close()
print "\ndistrowin.py: done\n"
| gpl-2.0 | 4,047,091,641,985,246,000 | 47.887006 | 197 | 0.675425 | false |
iniverno/RnR-LLC | simics-3.0-install/simics-3.0.31/amd64-linux/lib/python/sim_commands.py | 1 | 344681 | import sys, os, time, string, types
if os.environ.has_key('SIMICS_PYCHECKER'):
os.environ['PYCHECKER'] = ('--no-shadowbuiltin --no-argsused'
+ ' --no-implicitreturns --no-shadow')
import pychecker.checker
from cli import *
from refmanual import *
from re import *
from string import join
import os.path
import sim, __main__
def cast(cli_int_type, value):
return cli_int_type([("int", value)])[0]
# checks that we either have no CPU's or that we're not running
def assert_not_running():
try:
current_processor()
except:
return
if SIM_simics_is_running():
raise CliError, "This command cannot be used when Simics is running."
# ok, this is duplicated, which it shouldn't be
def conf_object_expander(string):
return get_completions(string, conf.all_object_names)
#
# -------------------- class-specific stuff --------------------
#
class_funcs = {}
def set_class_funcs(cls, funcs):
class_funcs[cls] = funcs
def get_class_funcs(cls):
if class_funcs.has_key(cls):
return class_funcs[cls]
else:
return { }
def get_obj_funcs(obj):
return get_class_funcs(obj.class_data)
def info_cmd(obj):
title = "Information about %s [class %s]" % (obj.name, obj.classname)
print title
print "=" * len(title)
try:
fn = get_obj_funcs(obj)['get_info']
info = fn(obj)
if info:
print_info(info, 30)
else:
print "No information available"
except Exception, msg:
print "Problem getting info for %s: %s" % (obj.name, msg)
def status_cmd(obj):
title = "Status of %s [class %s]" % (obj.name, obj.classname)
print title
print "=" * len(title)
try:
fn = get_obj_funcs(obj)['get_status']
info = fn(obj)
if info:
print_info(info, 30)
else:
print "No status available"
except Exception, msg:
print "Problem getting status for %s: %s" % (obj.name, msg)
# <add-fun id="simics api python">
# <short>define a new info command</short>
# <namespace>sim_commands</namespace>
#
# Define a new <cmd>info</cmd> command for a given device.
# <param>cls</param> is the class for which the <cmd>info</cmd> command
# should be registered. <param>get_info</param> is a function returning
# the information to be printed. <param>get_info()</param> should return
# a data structure of the following kind:
# <tt>
# [(SectionName1, [(DataName1.1, DataValue1.1),
# (DataName1.2, DataValue1.2), ...]),
# (SectionName2, [(DataName2.1, DataValue2.1),
# (DataName2.2, DataValue2.2), ...]),
# ...]
# </tt>
#
# Each section will be printed separately. Each piece of data will be printed
# on one line. If no sections are necessary, just provide <tt>None</tt> as
# the only section's name, followed by the list of data.
#
# </add-fun>
def new_info_command(cls, get_info, ctype = None):
if ctype == None:
ctype = cls+" commands"
class_funcs = get_class_funcs(cls)
if class_funcs.has_key('get_info'):
print "Duplicate definitions of <%s>.info" % cls
return
class_funcs['get_info'] = get_info
set_class_funcs(cls, class_funcs)
new_command("info", info_cmd,
[],
alias = "",
type = ctype,
short = "print information about the device",
namespace = cls,
doc = "Print detailed information about the configuration of the device.", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="111")
# <add-fun id="simics api python">
# <short>define a new status command</short>
# <namespace>sim_commands</namespace>
#
# Define a new <cmd>status</cmd> command for a given device.
# <param>cls</param> is the class for which the <cmd>status</cmd> command
# should be registered. <param>get_status</param> is a function returning
# the information to be printed. <param>get_status()</param> should return
# a data structure of the same kind as in <fun>new_info_command()</fun>.
#
# <di name="SEE ALSO">sim_commands.new_info_command</di>
# </add-fun>
def new_status_command(cls, get_status, ctype = None):
if ctype == None:
ctype = cls+" commands"
class_funcs = get_class_funcs(cls)
if class_funcs.has_key('get_status'):
print "Duplicate definitions of <%s>.status" % cls
return
class_funcs['get_status'] = get_status
set_class_funcs(cls, class_funcs)
new_command("status", status_cmd,
[],
alias = "",
type = ctype,
short = "print status of the device",
namespace = cls,
doc = "Print detailed information about the current status of the device.", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="140")
# This function is kindof unnecessary, really
def new_info_commands(cls, get_info, get_status):
new_info_command(cls, get_info)
new_status_command(cls, get_status)
# after info/status commands
import components
def local_print_disassemble_line(cpu, address, type, print_cpu = 1, mnemonic = None):
f = get_obj_funcs(cpu)['print_disassemble_line']
return f(cpu, address, type, print_cpu, mnemonic)
# Opcode print function for targets with four-byte instructions. Print
# the opcode as a word rather than as a sequence of bytes.
def fourbyte_print_opcode(cpu, paddr, length):
try:
word = "0x%08x" % SIM_read_phys_memory(cpu, paddr, 4)
except:
word = ""
pr("%-*s" % (10, word))
# Return a local_print_disassemble_line function.
# default_instr_len(address) says how many bytes an instruction is
# considered to be if that could not be determined by the disassembly
# function. disasm is the disassembly function to use.
# virtual_address_prefix is the prefix to use when printing virtual
# addresses. print_opcode is the function that prints the opcode
# bytes. address_filter is applied to the address before it is used.
def make_print_disassemble_line_fun(default_instr_len = 4,
disasm = SIM_disassemble,
virtual_address_prefix = "v",
print_opcode = fourbyte_print_opcode,
address_filter = lambda address: address):
# default_instr_len can be either a function or a number. If it is
# a number, make a function that returns that number.
try:
deflen = int(default_instr_len)
default_instr_len = lambda address: deflen
except:
pass
# Translate address to (virtual, physical) pair, setting virtual
# address to None if type indicades physical address. May raise an
# Exception.
def translate_address(cpu, address, type):
if type == 1: # address is virtual
vaddr = address
try:
paddr = SIM_logical_to_physical(cpu, 0, vaddr)
except SimExc_Memory:
raise Exception, "address not in TLB"
except OverflowError:
raise Exception, "illegal address"
else: # address is physical
vaddr = None # no way to get a well-defined virtual address
paddr = address
return (vaddr, paddr)
# Print address profile views set for this processor.
aprof_column_size = {}
def print_aprof_views(cpu, vaddr, paddr, length):
pr(" ")
for ap, view in cpu.aprof_views:
ifc = ap.iface.address_profiler
if ifc.physical_addresses(ap, view):
start = paddr
else:
start = vaddr
id = (ap.name, view)
if start != None:
count = ifc.sum(ap, view, start, start + length - 1)
aprof_column_size[id] = max(aprof_column_size.get(id, 1),
len("%d" % count))
pr("%*d " % (aprof_column_size[id], count))
else:
# Profiler is indexed by an address we can't compute.
pr("%*s " % (aprof_column_size.get(id, 1), "?"))
# Return the smallest number of hex digits sufficient to represent
# the given number of bits.
def bits_to_hex_digits(bits):
return (bits + 3)/4
# A local_print_disassemble_line function. To be returned.
def lpdl_fun(cpu, address, type, print_cpu, name):
if print_cpu:
pr("[%s] " % cpu.name)
address = address_filter(address)
paddr_bits, vaddr_bits = cpu.address_width
length = default_instr_len(address)
try:
vaddr, paddr = translate_address(cpu, address, type)
except Exception, e:
# Could not get physical address.
paddr_err_string = e
paddr = None
vaddr = address
if vaddr != None:
pr("%s:0x%0*x " % (virtual_address_prefix,
bits_to_hex_digits(vaddr_bits), vaddr))
if paddr is None:
pr("<%s>\n" % paddr_err_string)
return length
if vaddr == None or disassembly_settings["physaddr"]:
pr("p:0x%0*x " % (bits_to_hex_digits(paddr_bits), paddr))
length = -1
try:
length, asm = disasm(cpu, address, type)
if name != None:
asm = name
except SimExc_Memory, e:
asm = "<whole instruction not in memory>"
except SimExc_General, e:
asm = "<%s>" % e
if length > 0:
if len(cpu.aprof_views) > 0:
print_aprof_views(cpu, vaddr, paddr, length)
if disassembly_settings["opcode"]:
pr(" ")
print_opcode(cpu, paddr, length)
pr(" ")
pr(" %s\n" % asm)
return length
return lpdl_fun
disassembly_settings = {
"opcode": 0,
"physaddr": 1,
"partial-opcode": 1,
}
disassembly_setting_desc = {
"opcode": "Print opcode bytes ",
"physaddr": "Print physical translation of virtual address",
"partial-opcode": "Show only part of the opcode (ia64 only) ",
}
def disassemble_settings_cmd(opcode, physaddr, partial_opcode):
if not (opcode or physaddr or partial_opcode):
print "Current disassemble settings:"
for name in disassembly_settings.keys():
print " %s %s" % (disassembly_setting_desc[name],
["No", "Yes"][disassembly_settings[name]])
if opcode:
disassembly_settings["opcode"] = (opcode == "on")
if physaddr:
disassembly_settings["physaddr"] = (physaddr == "on")
if partial_opcode:
disassembly_settings["partial-opcode"] = (partial_opcode == "on")
def on_off_expander(string):
return get_completions(string, ("on", "off"))
new_command("disassemble-settings", disassemble_settings_cmd,
[arg(str_t, "opcode", "?", None, expander = on_off_expander),
arg(str_t, "physaddr", "?", None, expander = on_off_expander),
arg(str_t, "partial-opcode", "?", None,
expander = on_off_expander)],
type = ["Command-Line Interface", "Output", "Execution", "Memory"],
short = "change disassembly output settings",
see_also = ["disassemble"],
doc = """
Change disassemble output settings. Each of these settings can be set
to <tt>on</tt> or <tt>off</tt>.
<i>opcode</i> indicates whether to print the raw bytes of the
instruction in addition to the disassembly. If <i>partial-opcode</i>
is set, and the opcode encodes more than one instruction, the opcode
bytes will be divided among the instructions so that the entire opcode
has been printed exactly once when all the instructions have been
disassembled. If <i>partial-opcode</i> is not set, the entire opcode
will be printed for every instruction. (The only Simics target with
multiple instructions per opcode is ia64.)
<i>physaddr</i> indicates whether to compute and display the physical
address if the virtual address was specified (if the physical address
was specified, the virtual address is never printed).
Without arguments, the current settings will be shown.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="301")
def local_pregs(cpu, all):
f = get_obj_funcs(cpu)['pregs']
return f(cpu, all)
# Common functionality for PCI devices
# cross 32-bit reads not supported
def read_config(obj, offset, size):
reg = offset / 4
off = (offset % 4) * 8
return 0L + (obj.config_registers[reg] >> off) & ((1L << size * 8) - 1)
def read_config_str(obj, offset, size):
format = "0x%%0%dx" % (2 * size)
return "%10s" % (format % read_config(obj, offset, size))
def get_pci_header_old(obj):
if read_config(obj, 0xe, 1) != 1:
# header type 0
reg_list = [("Base Address 2", read_config_str(obj, 0x18, 4)),
("Base Address 3", read_config_str(obj, 0x1c, 4)),
("Base Address 4", read_config_str(obj, 0x20, 4)),
("Base Address 5", read_config_str(obj, 0x24, 4)),
("Cardbus CIS Ptr", read_config_str(obj, 0x28, 4)),
("Subsystem Vendor ID", read_config_str(obj, 0x2c, 2)),
("Subsystem Device ID", read_config_str(obj, 0x2e, 2)),
("Expansion ROM Base", read_config_str(obj, 0x30, 4)),
("Capabilities Ptr", read_config_str(obj, 0x34, 1)),
("Interrupt Line", read_config_str(obj, 0x3c, 1)),
("Interrupt Pin", read_config_str(obj, 0x3d, 1)),
("Min Gnt", read_config_str(obj, 0x3e, 1)),
("Max Lat", read_config_str(obj, 0x3f, 1))]
else:
# header type 1
reg_list = [("Primary Bus Number", read_config_str(obj, 0x18, 1)),
("Secondary Bus Number", read_config_str(obj, 0x19, 1)),
("Subordinate Bus Number", read_config_str(obj, 0x1a, 1)),
("Secondary Latency Timer", read_config_str(obj, 0x1b, 1)),
("IO Base", read_config_str(obj, 0x1c, 1)),
("IO Limit", read_config_str(obj, 0x1d, 1)),
("Secondary Status", read_config_str(obj, 0x1e, 2)),
("Memory Base", read_config_str(obj, 0x20, 2)),
("Memory Limit", read_config_str(obj, 0x22, 2)),
("Prefetchable Memory Base", read_config_str(obj, 0x24, 2)),
("Prefetchable Memory Limit", read_config_str(obj, 0x26, 2)),
("Prefetchable Base Upper", read_config_str(obj, 0x28, 4)),
("Prefetchable Limit Upper", read_config_str(obj, 0x2c, 4)),
("IO Base Upper", read_config_str(obj, 0x30, 2)),
("IO Limit Upper", read_config_str(obj, 0x32, 2)),
("Capabilities Ptr", read_config_str(obj, 0x34, 1)),
("Expansion ROM Base", read_config_str(obj, 0x38, 4)),
("Interrupt Line", read_config_str(obj, 0x3c, 1)),
("Interrupt Pin", read_config_str(obj, 0x3d, 1)),
("Bridge Control", read_config_str(obj, 0x3e, 2))]
return [ ("Generic Registers",
[ ("Vendor ID", read_config_str(obj, 0x0, 2)),
("Device ID", read_config_str(obj, 0x2, 2)),
("Command", read_config_str(obj, 0x4, 2)),
("Status", read_config_str(obj, 0x6, 2)),
("Revision ID", read_config_str(obj, 0x8, 1)),
("Class Code", read_config_str(obj, 0x9, 3)),
("Cache Line Size", read_config_str(obj, 0xc, 1)),
("Latency Timer", read_config_str(obj, 0xd, 1)),
("Header Type", read_config_str(obj, 0xe, 1)),
("BIST", read_config_str(obj, 0xf, 1)),
("Base Address 0", read_config_str(obj, 0x10, 4)),
("Base Address 1", read_config_str(obj, 0x14, 4))] +
reg_list)]
def get_pci_header(obj):
try:
config_register_info = obj.config_register_info
except:
return get_pci_header_old(obj)
header = []
for x in config_register_info:
header.append((x[1], read_config_str(obj, x[0], x[2])))
return [ ("Configuration Registers", header) ]
def pci_header_cmd(obj):
title = "PCI Header for %s [class %s]" % (obj.name, obj.classname)
print title
print "=" * len(title)
# additional regs are optional
fn = get_obj_funcs(obj)['get_conf']
if fn:
try:
info = fn(obj)
except Exception, msg:
print "Problem getting header for %s: %s" % (obj.name, msg)
info = []
else:
info = []
print_info(get_pci_header(obj) + info, 30)
def new_pci_header_command(cls, get_conf = None):
class_funcs = get_class_funcs(cls)
if class_funcs.has_key('get_conf'):
print "Duplicate definitions of <%s>.pci-header" % cls
return
class_funcs['get_conf'] = get_conf
set_class_funcs(cls, class_funcs)
new_command("pci-header", pci_header_cmd,
[],
alias = "",
type = cls + " commands",
short = "print PCI device header",
namespace = cls,
doc = "Print the PCI header, i.e the configuration registers.", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="431")
def space_info(m, mem_on, io_on):
dis = 0
if m[5] & 4:
name = "Expansion ROM"
if not (m[5] & 1):
dis = 1
elif m[3] == 0: # memory
if m[5] & 2:
name = "64-bit Memory"
else:
name = "Memory"
if not mem_on:
dis = 1
elif m[3] == 1: # I/O
name = "IO"
if not mem_on:
dis = 1
else:
name = "Unknown"
if not dis:
desc = "base 0x%x size 0x%x (function %d)" % (m[1], m[2], m[4])
else:
desc = "base 0x%x size 0x%x (disabled)" % (m[1], m[2])
return ("%s BAR 0x%x" % (name, m[0]), desc)
def get_pci_info(obj):
try:
rom = obj.expansion_rom
except:
# C++ implementations lack this attribute
rom = None
if rom:
rom = "%s, function %d (0x%x bytes)" % (
rom[0].name, obj.expansion_rom[2], obj.expansion_rom[1])
else:
rom = "none"
try:
maps = obj.mappings
except:
# C++ implementations lack this attribute
maps = []
io_on = obj.config_registers[1] & 1
mem_on = obj.config_registers[1] & 2
infos = []
for m in maps:
infos.append(space_info(m, mem_on, io_on))
memory_mappings = [(None,
[ ("Memory mappings", iff(mem_on, "enabled", "disabled")),
("IO mappings", iff(io_on, "enabled", "disabled"))])]
if len(infos):
memory_mappings += [("Supported Mappings", infos)]
return [ ("PCI information",
[ ("PCI bus", obj.pci_bus),
("Expansion ROM", rom),
])] + memory_mappings
def get_pci_status(obj):
return []
#
# -------------------- tracker class --------------------
#
# This class provides a command factory for break-* and trace-*
# functions. It is used by break-io, break-cr, and break-hap. It
# shouldn't be used directly, but instead be subclassed with the
# specifics.
#
# Ths stop parameter to __init__ decides if it breaks or traces.
#
# The callback method is ususally used as a hap callback. It can be
# given to SIM_hap_add_callback(). However, it can not be used
# in SIM_hap_callback_exists(), since the function pointer will be
# different each time.
#
# if namespace is None, global commands will be created
# if namespace is processor, a namespace command will be created
#
class tracker:
def __init__(self, stop, cmd, target_name, expander,
short, doc,
namespace = None,
group = "breakpoint",
see_also = [],
expander_cpu = None):
self.cmd = cmd
self.stop = stop
uncmd = "un"+cmd
if type(target_name) == types.StringType:
target_types = (str_t,)
target_names = (target_name,)
expander = (expander,)
expander_cpu = (expander_cpu,)
elif type(target_name) == types.TupleType:
target_types, target_names = target_name
if type(target_types) != types.TupleType:
target_types = (target_types,)
target_names = (target_names,)
else:
raise TypeError
args = [arg(target_types + (flag_t, flag_t),
target_names + ("-all", "-list"),
expander = expander + (0, 0))]
self.namespace = namespace
if self.namespace:
new_command(cmd, self.do_namespace_cmd, args, type=group,
short=short,
namespace = namespace,
see_also = see_also,
doc=doc, filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="548")
new_command(uncmd, self.do_namespace_uncmd, args, type=group,
namespace = namespace,
short=short,
doc_with='<' + namespace + '>.' + cmd, filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="554")
# if it's a processor, register commands on current processor
if self.namespace == "processor":
# use new args with expander_cpu instead
cpu_args = [arg(target_types + (flag_t, flag_t),
target_names + ("-all", "-list"),
expander = expander_cpu + (0, 0))]
new_command(cmd, self.do_cpu_cmd, cpu_args, type=group,
short=short,
doc_with='<' + namespace + '>.' + cmd, filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="566")
new_command(uncmd, self.do_cpu_uncmd, cpu_args, type=group,
short=short,
doc_with='<' + namespace + '>.' + cmd, filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="570")
else:
new_command(cmd, self.do_cmd, args, type=group,
short=short,
see_also = see_also,
doc=doc, filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="575")
new_command(uncmd, self.do_uncmd, args, type=group,
short=short,
doc_with=cmd, filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="580")
def filter(self, *args):
# Override this in your subclass if you want to ignore some
# callbacks.
if self.namespace:
obj = args[0]
return 1
def resolve_target(self, *args):
# This function translates the string given by the command to
# something internally more useful, such as a configuration
# object or something like that. Override in a subclass.
if self.namespace:
obj, target = args
else:
target = args[0]
return target
def show(self, *args):
# This function is called to print that something happened.
# It should obviously be overridden in a subclass.
if self.namespace:
obj = args[0]
print "Something happened"
def list(self, *args):
# This function is called to list the tracked things, in
# response to the -list parameter.
if self.namespace:
obj = args[0]
print "Cannot list"
def is_tracked(self, *args):
# This function is called to check if somthing is already
# tracked. Override it with something more useful. Remember
# that you can't use SIM_hap_callback_exists(self.callback).
if namespace:
obj = args[0]
return 0
def track_all(self, *args):
# This function is called to set tracking on all possible choices.
# Override it with something more useful.
if namespace:
obj = args[0]
def track_none(self, *args):
# This function is called to remove tracking on all possible choices.
# Override it with something more useful.
if namespace:
obj = args[0]
def track_on(self, *args):
# This function is called to set tracking on a target.
# Override it with something more useful.
if self.namespace:
obj = args[0]
target = args[1]
else:
target = args[0]
def track_off(self, *args):
# This function is called to remove tracking on a target.
# Override it with something more useful.
if self.namespace:
obj = args[0]
target = args[1]
else:
target = args[0]
def callback(self, *args):
# Do not override this without a very good reason.
if not self.filter(*args):
return
self.show(*args)
if self.stop:
raise SimExc_Break, self.cmd
return 0
def do_namespace_cmd(self, obj, target_desc):
type, target_name, param = target_desc
try:
if type == flag_t and param == "-all":
if obj:
self.track_all(obj)
else:
self.track_all()
elif type == flag_t and param == "-list":
if obj:
self.list(obj)
else:
self.list()
else:
if obj:
target = self.resolve_target(obj, target_name)
if not self.is_tracked(obj, target):
self.track_on(obj, target)
else:
target = self.resolve_target(target_name)
if not self.is_tracked(target):
self.track_on(target)
except SimExc_Index, msg:
print msg
SIM_command_has_problem()
return
def do_cmd(self, target_desc):
self.do_namespace_cmd(None, target_desc)
def do_cpu_cmd(self, target_desc):
self.do_namespace_cmd(SIM_current_processor(), target_desc)
def do_namespace_uncmd(self, obj, target_desc):
type, target_name, param = target_desc
try:
if type == flag_t and param == "-all":
if obj:
self.track_none(obj)
else:
self.track_none()
elif type == flag_t and param == "-list":
if obj:
self.list(obj)
else:
self.list()
else:
if obj:
target = self.resolve_target(obj, target_name)
if self.is_tracked(obj, target):
self.track_off(obj, target)
else:
target = self.resolve_target(target_name)
if self.is_tracked(target):
self.track_off(target)
except SimExc_Index, msg:
print msg
SIM_command_has_problem()
return
def do_uncmd(self, target_desc):
self.do_namespace_uncmd(None, target_desc)
def do_cpu_uncmd(self, target_desc):
self.do_namespace_uncmd(SIM_current_processor(), target_desc)
#
# -------------------- -> --------------------
#
def attribute_cmd(o, a, rw, v):
try:
o = o.replace('-', '_')
o = SIM_get_object(o)
except SimExc_General:
print 'There is no object called "%s"' % o
SIM_command_has_problem()
return
a = a.replace('-', '_')
if rw[2] == '-w':
try:
SIM_set_attribute(o, a, v[1])
except SimExc_AttrNotFound:
SIM_command_has_problem()
print 'The "%s" object has no attribute "%s"' % (o.name, a)
return
except Exception, msg:
try:
if v[0] == str_t and v[1] in sim.objects:
SIM_set_attribute(o, a, SIM_get_object(v[1]))
elif v[0] in (int_t, float_t) and v[1] == 0:
SIM_set_attribute(o, a, None)
else:
raise
except Exception, msg:
SIM_command_has_problem()
print 'Failed writing attribute %s in %s: %s' % (a, o.name, msg)
return
else:
try:
val = SIM_get_attribute(o, a)
except SimExc_AttrNotFound:
SIM_command_has_problem()
print 'The "%s" object has no attribute "%s"' % (o.name, a)
return
except Exception, msg:
SIM_command_has_problem()
print 'Failed reading attribute %s from %s: %s' % (a, o.name, msg)
return
if (isinstance(val, str)
or isinstance(val, (int, long))):
return val
elif type(val) == type(conf.sim):
return val.name
elif isinstance(val, float):
return val
elif val == None:
return 0
else:
print val
new_command("->", attribute_cmd,
[arg(str_t, 'object'),
arg(str_t, 'attribute'),
arg((flag_t, flag_t), ('-r', '-w')),
arg((int_t, str_t, float_t), ('ival', 'sval', 'fval'),
"?", doc = 'value')],
type = ["Command-Line Interface"],
pri = 700, infix = 1,
group_short = "access object attribute",
short = "access object attribute",
doc = """
Get the value of <arg>attribute</arg> from <arg>object</arg>. Only object,
string, float and integer attributes can be returned by the command. Other
attribute types will be printed, but the command will not return anything.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="785")
#
# -------------------- + --------------------
#
def plus(a, b):
if a[0] == b[0]:
return a[1] + b[1]
return str(a[1]) + str(b[1])
new_command("+", plus, [arg((int_t, str_t), ('isrc1', 'ssrc1'), doc = 'arg1'),
arg((int_t, str_t), ('isrc2', 'ssrc2'), doc = 'arg2')],
type = ["Command-Line Interface"],
pri = 150, infix = 1,
group_short = "arithmetic addition",
short = "arithmetic addition",
doc = """
Arithmetic addition.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="811")
#
# -------------------- - --------------------
#
def minus(a, b):
return a - b
new_command("-", minus, [arg(int_t), arg(int_t)],
type = ["Command-Line Interface"],
pri = 150, infix = 1,
short="arithmetic subtraction",
doc="""
Arithmetic subtraction.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="828")
#
# -------------------- * --------------------
#
def muls(a, b):
return a * b
new_command("*", muls, [arg(int_t), arg(int_t)],
type = ["Command-Line Interface"],
pri = 200, infix = 1, short="arithmetic multiplication",
doc="""
Arithmetic multiplication.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="843")
#
# -------------------- / --------------------
#
def div(a, b):
if b == 0:
raise CliError, "Division by zero"
return a / b
new_command("/", div, [arg(int_t), arg(int_t)],
type = ["Command-Line Interface"],
pri = 200, infix = 1, short="arithmetic division",
doc="""
Arithmetic division.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="859")
#
# -------------------- & --------------------
#
def and_cmd(a, b):
return a & b
new_command("&", and_cmd, [arg(int_t), arg(int_t)],
type = ["Command-Line Interface"],
pri = 80, infix = 1, group_short = "various bitwise operators",
short="bitwise AND operation", doc = """
Bitwise AND operation.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="873")
#
# -------------------- | --------------------
#
def or_cmd(a, b):
return a | b
new_command("|", or_cmd, [arg(int_t), arg(int_t)],
type = ["Command-Line Interface"],
pri = 60, infix = 1, short="bitwise OR operation",
doc="""
Bitwise OR operation
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="888")
#
# -------------------- ^ --------------------
#
def xor_cmd(a, b):
return a ^ b
new_command("^", xor_cmd, [arg(int_t), arg(int_t)],
type = ["Command-Line Interface"],
pri = 70, infix = 1, short="bitwise XOR operation",
doc="""
Bitwise XOR operation.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="902")
#
# -------------------- >> --------------------
#
def shr_cmd(a, b):
if b < 0:
return a << -b
return a >> b
new_command(">>", shr_cmd, [arg(int_t), arg(int_t)],
type = ["Command-Line Interface"],
pri = 100, infix = 1, short="bitwise right shift",
doc="""
Bitwise right shift.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="918")
#
# -------------------- << --------------------
#
def shl_cmd(a, b):
if b < 0:
return a >> -b
return a << b
new_command("<<", shl_cmd, [arg(int_t), arg(int_t)],
type = ["Command-Line Interface"],
pri = 100, infix = 1, short="bitwise left shift",
doc="""
Bitwise left shift.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="934")
#
# -------------------- ~ --------------------
#
def not_cmd(a):
return ~a
new_command("~", not_cmd, [arg(int_t)],
type = ["Command-Line Interface"],
pri = 250, check_args = 0, short="bitwise not",
doc="""
Bitwise not.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="949")
#
# -------------------- pow --------------------
#
def pow_cmd(a, b):
if b >= 0:
return a ** b
else:
raise CliError, "illegal exponent"
new_command("pow", pow_cmd, [arg(int_t), arg(int_t)],
type = ["Command-Line Interface"],
pri = 500, infix = 1, short="power of", doc="""
Return the <arg>arg1</arg> to the power of <arg>arg2</arg>.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="966")
#
# -------------------- python --------------------
#
def python_cmd(str):
try:
ret = eval(str, __main__.__dict__)
if type(ret) == type(conf.sim):
return ret.name
else:
return ret
except SyntaxError:
try:
exec str in __main__.__dict__
return
except Exception, msg:
pass
except Exception, msg:
pass
raise CliError, "Error in Python expression: %s" % msg
new_command("python", python_cmd, [arg(str_t, "exp")],
type = ["Command-Line Interface", "Python"],
pri = 800, short="evaluate an expression in python",
see_also = ['@', 'run-python-file'],
doc = """
<i>exp</i> will be evaluated in the Python environment and the result
returned to the frontend. This can also be done by enclosing the
Python code within backquotes (`); e.g., <cmd>print -x
`SIM_step_count(SIM_current_processor())`</cmd>.
Both expressions and statements can be run, but for statements the
<cmd>@</cmd> command can be used instead.
<cmd>run-python-file</cmd> uses Simics's Search Path and path markers
(%simics%, %script%) to find the script to run. Refer to Simics User Guide (CLI
chapter) for more information.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="993")
#
# -------------------- command-list --------------------
#
def command_list_cmd(name):
format_commands_as_html(SIM_native_path(name))
new_command("command-list", command_list_cmd,
[arg(filename_t(), "file")],
type = ["Help"],
short="generate html document describing commands",
doc = """
Produces a quick reference list of Simics commands in HTML. Note that
Simics commands can be added at runtime, and any particular dynamic
usage will be different from printed manuals. <i>file</i> is the file
to write to.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1019")
def api_help_cmd(str):
try:
doc = api_help[str]
print "Help on API keyword \"%s\":\n" % str
print_wrap_code(doc, terminal_width() - 1)
return
except KeyError, msg:
pass
l = []
for key in api_help.keys():
if key.find(str) >= 0:
l.append(key)
if not l:
print "No API keyword matching \"%s\" found." % str
return
if len(l) == 1:
return api_help_cmd(l[0])
l.sort()
print "The following API keywords contain the substring \"%s\":\n" % str
print_columns([Just_Left], l, has_title = 0, wrap_space = " ")
def api_help_expander(comp):
l = []
for key in api_help.keys():
if key[:len(comp)] == comp:
l.append(key)
return get_completions(comp, l)
new_command("api-help", api_help_cmd,
[ arg(str_t, "topic", expander = api_help_expander) ],
short = "get API help",
type = ["Help"],
see_also = ["api-apropos", "apropos", "help"],
doc = """
Prints the declaration of API declarations matching <i>topic</i>.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1063")
def api_apropos_cmd(str):
l = []
for key in api_help.keys():
if key.find(str) >= 0 or api_help[key].find(str) >= 0:
l.append(key)
if not l:
print "The string \"%s\" cannot be found in any API documentation." % str
return
if len(l) == 1:
plural = "y"
else:
plural = "ies"
l.sort()
if str:
print "The string \"%s\" can be found in the following API help entr%s:\n" % (str, plural)
else:
print "The following API help entries exist:\n"
print_columns([Just_Left], l, has_title = 0, wrap_space = " ")
new_command("api-apropos", api_apropos_cmd,
[ arg(str_t, "search-string") ],
short = "search API help",
type = ["Help"],
see_also = ["api-help", "apropos", "help"],
doc = """
Search the API documentation for the string <i>search-string</i>.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1094")
#
# -------------------- hex --------------------
#
def int2base(base, value):
chars = "0123456789abcdef"
str = ""
while value > 0:
str = chars[int(value % base)] + str
value = value / base
if not str:
str = "0"
return str
def hex_cmd(value):
return number_str(value, 16)
new_command("hex", hex_cmd,
[arg(int_t, "value")],
alias = "",
type = ["Command-Line Interface", "Output"],
short = "display integer in hexadecimal notation",
see_also = ["print"],
doc = """
Returns the parameter as a string in hexadecimal notation. This is similar to <b>print</b> -x <i>value</i>.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1124")
#
# -------------------- output-radix --------------------
#
def output_radix_cmd(rad, group):
if rad == 0 and group < 0:
group = get_output_group()
print "The current output-radix is %d." % get_output_radix()
if group:
print "Output is grouped in units of %d digits." % group
return
if rad != 0 and rad != 2 and rad != 8 and rad != 10 and rad != 16:
raise CliError, "The radix must be either 2, 8, 10, or 16."
if group < 0:
group = get_output_group(rad)
if rad == 0:
rad = get_output_radix()
try:
set_output_radix(rad, group)
except ValueError, msg:
print msg
new_command("output-radix", output_radix_cmd,
[ arg(int_t, "base", "?", 0),
arg(int_t, "group", "?", -1) ],
type = ["Command-Line Interface", "Output"],
short = "change the default output radix",
see_also = ["digit-grouping", "print"],
doc = """
Changes or displays the default output radix for numbers. It can be
set to 2 for binary, 8 for octal, 10 for decimal, or 16 for
hexadecimal output.
If <var>group</var> is non-zero, numbers will be grouped in groups of
<var>group</var> digits, separated by underscores (<tt>_</tt>).
Currently, this only affects the output of the <cmd>print</cmd> command,
and how return values of commands are displayed.
Without arguments, the current setting will be shown.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1158")
#
# -------------------- digit-grouping --------------------
#
def digit_grouping_cmd(rad, group):
if rad != 2 and rad != 8 and rad != 10 and rad != 16:
raise CliError, "The radix must be either 2, 8, 10, or 16."
if group < 0:
group = get_output_group(rad)
if group == 0:
print "Numbers in radix %d are not grouped." % rad
else:
print ("Numbers in radix %d are grouped in groups of %d digits."
% (rad, group))
return
try:
set_output_radix(rad, group, 0)
except ValueError, msg:
print msg
new_command("digit-grouping", digit_grouping_cmd,
[ arg(int_t, "base"),
arg(int_t, "digits", "?", -1) ],
type = ["Command-Line Interface", "Output"],
short = "set output formatting for numbers",
see_also = ["output-radix", "print"],
doc = """
Changes or displays how numbers are formatted for a given radix.
This will separate groups of <arg>digits</arg> digits by an underscore when
they are formatted for output. Separate grouping is maintained for each radix.
If <arg>digits</arg> is zero, no separators are printed for that radix.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1198")
#
# -------------------- print --------------------
#
def print_cmd(f, value, size):
x = b = o = s = d = 0
if f[2] == "-x": x = f[1]
if f[2] == "-o": o = f[1]
if f[2] == "-b": b = f[1]
if f[2] == "-s": s = f[1]
if f[2] == "-d": d = f[1]
if size == 0:
size = 64
had_size = 0
else:
had_size = 1
if size not in [8, 16, 32, 64, 128]:
print "Size must be 8, 16, 32, 64, or 128"
return
if value < 0:
if value < -(1 << size):
print "value truncated to", size, "bits."
value = ((1 << size) + value) & ((1 << size) - 1)
if not had_size:
s = 1
elif value >= (1 << size):
value = value & ((1 << size) - 1)
print "value truncated to", size, "bits."
if x:
base = 16
elif d:
base = 10
elif o:
base = 8
elif b:
base = 2
else:
base = -1
if s and value >= (1 << (size - 1)):
value -= 1 << size
print number_str(value, base)
new_command("print", print_cmd,
[arg((flag_t,flag_t,flag_t,flag_t,flag_t), ("-x","-o","-b","-s","-d"), "?", (flag_t, 0, "-x")),
arg(int_t, "value"), arg(int_t, "size", "?", 0)],
alias = "p",
repeat = print_cmd,
pri = -100, #set pri to lower than 0, makes cli evaluate all common commands first
type = ["Command-Line Interface", "Output"],
short = "display integer in various bases",
see_also = ["output-radix"],
doc = """
Prints <arg>value</arg> in hexadecimal (<arg>-x</arg>), decimal
(<arg>-d</arg>), octal (<arg>-o</arg>), or binary (<arg>-b</arg>)
notation. Default is to use the notation specified by the
<cmd>output-radix</cmd> command.
Use <arg>-s</arg> to convert the value to signed
integers. <arg>size</arg> is the bit width to use. E.g., <cmd>print -x
257 8</cmd> will print 0x1. Valid sizes are 8, 16, 32, 64, and 128
bits. Default size is 64.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1257")
#
# -------------------- echo --------------------
#
def echo_cmd(poly):
if poly[0] == str_t:
print poly[1]
elif poly[0] == float_t:
print poly[1]
else:
print number_str(poly[1])
new_command("echo", echo_cmd,
[arg((int_t, float_t, str_t), ("integer", "float", "string"), "?", (str_t, ""))],
type = ["Command-Line Interface", "Output"],
short = "echo a value to screen",
doc = """
Prints the string, integer, or float. Useful for annotating test scripts.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1291")
#
# -------------------- date --------------------
#
def date_cmd():
import time
print time.ctime(time.time())
new_command("date", date_cmd,
[],
type = ["Command-Line Interface"],
short = "host time and date",
doc = """
Prints the current date and time, in the form <tt>Fri Nov 2 12:00:36 2001</tt>.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1306")
#
# -------------------- timer-start --------------------
#
timer_start = 0
timer_stop = 0
def timer_start_cmd():
global timer_start
print "Timing of Simics started"
timer_start = time.clock()
new_command("timer-start", timer_start_cmd,
[], # timer-start
type = "internal commands",
short = "start user timing",
see_also = ['timer-stop', 'timer-query'],
doc = """
Start timing of Simics (user time).""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1325")
#
# -------------------- timer-stop --------------------
#
def timer_stop_cmd():
global timer_start, timer_stop
timer_stop = (time.clock() - timer_start)
print "Timing of Simics stopped"
new_command("timer-stop", timer_stop_cmd,
[],
type = "internal commands",
short = "end user timing",
see_also = ['timer-start', 'timer-query'],
doc = """
End timing of Simics (user time).""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1342")
#
# -------------------- timer-query --------------------
#
def timer_query_cmd():
global timer_stop
print "User time (s): %.2f" % timer_stop
new_command("timer-query", timer_query_cmd,
[],
type = "internal commands",
short = "query user timing",
see_also = ['timer-start', 'timer-stop'],
doc = """
Query timing of Simics (user time).""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1359")
#
# -------------------- ls --------------------
#
def ls_cmd():
l = os.listdir(os.getcwd())
l.sort()
print_columns([ Just_Left ], l, has_title = 0, wrap_space = " ")
new_command("ls", ls_cmd,
[],
type = ["Command-Line Interface", "Files and Directories"],
short = "list files",
see_also = ["cd", "pwd"],
doc = """
List files in working directory of Simics.
Works like <tt>ls</tt> in a Unix shell, but does not take any parameters.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1376")
#
# -------------------- cd --------------------
#
def cd_cmd(path):
try:
os.chdir(SIM_native_path(path))
except OSError, msg:
print msg
new_command("cd", cd_cmd,
[arg(filename_t(dirs=1,exist=1), "path")],
type = ["Command-Line Interface", "Files and Directories"],
short = "change working directory",
see_also = ["ls", "pwd"],
doc = """
Change working directory of Simics. Works as if you had done 'cd' at the
shell. Converts <i>param</i> to host native form first (see
<i>native-path</i>).
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1395")
#
# -------------- pushd/popd/dirs --------------
#
_dir_stack = [ ]
def _print_dir_stack():
print os.getcwd(),
for d in _dir_stack:
print d,
print
def pushd_cmd(no_cd, path):
global _dir_stack
if not path and no_cd:
return
if not path:
if len(_dir_stack) < 1:
print "No other directory available on the directory stack."
return
dir = _dir_stack[0]
_dir_stack[0] = os.getcwd()
try:
os.chdir(SIM_native_path(dir))
except OSError, msg:
print msg
_dir_stack = _dir_stack[1:]
return
return
old_dir = os.getcwd()
if not no_cd:
try:
os.chdir(SIM_native_path(path))
except OSError, msg:
print msg
return
_dir_stack = [ old_dir ] + _dir_stack
if not path:
_print_dir_stack()
new_command("pushd", pushd_cmd,
[arg(flag_t, "-n"),
arg(filename_t(dirs = 1, exist = 1), "path", "?", 0)],
type = ["Command-Line Interface", "Files and Directories"],
short = "push directory on directory stack",
see_also = ["dirs", "popd"],
doc = """
Pushes the directory <i>path</i> on top of the directory stack, or
exchanges the topmost two directories on the stack. If <tt>-n</tt> is
given, only change the contents of the stack, but do not change
current working directory.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1452")
def popd_cmd(no_cd):
global _dir_stack
if len(_dir_stack) < 1:
print "The directory stack is empty."
return
dir = _dir_stack[0]
if not no_cd:
try:
os.chdir(SIM_native_path(dir))
except OSError, msg:
print msg
return
_dir_stack = _dir_stack[1:]
if no_cd:
_print_dir_stack()
new_command("popd", popd_cmd,
[arg(flag_t, "-n")],
type = ["Command-Line Interface", "Files and Directories"],
short = "pop directory from directory stack",
see_also = ["dirs", "pushd"],
doc = """
Pops a directory off the directory stack and, unless the <tt>-n</tt>
option is specified, change current working directory to that
directory.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1483")
def dirs_cmd():
_print_dir_stack()
new_command("dirs", dirs_cmd,
[],
type = ["Command-Line Interface", "Files and Directories"],
short = "display directory stack",
see_also = ["pushd", "popd"],
doc = """
Shows the contents of the directory stack.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1497")
#
# -------------------- run-command-file --------------------
#
new_command("run-command-file", SIM_run_command_file,
args = [arg(filename_t(exist = 1, simpath = 1), "file")],
type = ["Command-Line Interface", "Files and Directories"],
short = "execute a simics script",
alias = "include",
see_also = ["run-python-file", "add-directory"],
doc = """
This command starts executing a Simics script. A Simics script is an
ordinary text file that contains Simics commands. One command on each
line. The syntax used is exactly the same as when commands are typed
at the Simics prompt. The # character is used as the start of a
comment and applies to the rest of the line.
Python code can also by executed by prefixing the line with
@. Multi-line Python statements can be used by leaving a blank line at
the end of the statement. Only the first line should have an @ in this
case.
Simics scripts usually ends with the suffix ".simics" but this is only a
convention. The suffix is ignored by Simics.
This is an example of a Simics script:
<tt>
# This is a Simics script<br/>
<br/>
break 0xffc000 # set a breakpoint<br/>
run<br/>
echo "breakpoint reached"<br/>
run-command-file another-script.simics<br/>
</tt>
Simics scripts can be executed directly
when Simics has started by using the -x command line option.
If a command fails or the user presses Control-C the Simics script is
interrupted and control returns to the Simics prompt.
<cmd>run-command-file</cmd> uses Simics's Search Path and path markers
(%simics%, %script%) to find the script to run. Refer to Simics User Guide (CLI
chapter) for more information.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1511")
#
# -------------------- list-haps --------------------
#
def hap_expander(str):
try:
hap_list = [ hap[0] for hap in conf.sim.hap_list ]
return get_completions(str, hap_list)
except:
return []
def hap_list_cmd(name):
haps = conf.sim.hap_list
if not name:
def f(x):
return [ x[0] ]
l = map(f, haps)
print_columns([Just_Left], l, has_title = 0)
return
for hap in haps:
if -1 != string.find(hap[0], name):
pr(bold("\nNAME\n"))
print " %s" % hap[0]
pr(bold("\nCALLBACK TYPE\n"))
argnames = [ "callback_data", "trigger_obj" ]
if hap[2] != None:
argnames = argnames + hap[2]
pr(hap_c_arguments("noc" + hap[1], argnames, terminal_width() - 5, 3))
pr("\n")
pr(bold("\nINDEX\n"))
print " %s" % iff(hap[3] == None, "no index", hap[3])
pr(bold("\nINSTALLED HANDLERS\n"))
print " %s" % iff(hap[5], hap[5], "none")
pr(bold("\nDESCRIPTION\n"))
pr(" ")
format_print(hap[4], 3, terminal_width())
print
new_command("list-haps", hap_list_cmd,
[arg(str_t, "substring", "?", "", expander = hap_expander)],
alias="hl",
type = ["Haps"],
short="print list of haps",
doc_items = [],
doc = """
Prints a description of all haps whose names contain <i>substring</i>. If the
name is omitted a list of all haps will be printed.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1594")
def find_function(funcs, addr):
for fn, file, start, end in funcs:
if start <= addr and addr < end:
return fn
return None
def hap_callback_list_cmd(name):
if name:
try:
callbacks = [[name, conf.sim.hap_callbacks[name]]]
except:
print "No hap '%s'" % name
return
else:
callbacks = []
for hap in conf.sim.hap_list:
cb = conf.sim.hap_callbacks[hap[0]]
if len(cb):
callbacks += [[hap[0], cb]]
try:
st = SIM_get_object("internal-symbols")
funcs = st.functions
except:
funcs = None
# todo: handle internals
for hap in callbacks:
for cb in hap[1]:
if cb[4]:
obj = cb[4].name
else:
obj = ""
f = None
if cb[5][0:2] == "0x" and funcs:
f = find_function(funcs, string.atol(cb[5], 0))
if f:
f = "(C) '%s()'" % f
else:
m = re.match("<function (.*) at 0x(.*)>", cb[5])
if m:
f = "(Python) '%s()'" % m.group(1)
minus_one = (1 << 64) - 1
if cb[1] == minus_one and cb[2] == minus_one:
range = ' '
elif cb[1] != cb[2]:
range = 'range %2d-%2d' % (cb[1], cb[2])
else:
range = ' index %3d ' % cb[1]
print "%-28s id %d %s Sim: %s Object: %s" % (
hap[0], cb[0], range, iff(cb[3] & 1, "yes", "no"), obj)
fstr = "Function : %-38s" % iff(f, f, cb[5][:68])
dstr = "User data: %s" % cb[6][:68]
print fstr,
if len(fstr) + len(dstr) >= 80:
print
print dstr
print
new_command("list-hap-callbacks", hap_callback_list_cmd,
[arg(str_t, "hap", "?", "", expander = hap_expander)],
type = ["Haps"],
short="print list of hap callbacks",
doc_items = [],
doc = """
Prints a list of all callbacks installed for <arg>hap</arg>, or
for all haps if the argument is omitted.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1663")
#
# -------------------- break-hap, trace-hap --------------------
#
class hap_tracker(tracker):
def __init__(self, stop, cmd, short, doc, type, see_also = []):
tracker.__init__(self, stop, cmd, "hap", hap_expander, short, doc,
group = type, see_also = see_also)
self.map = {}
self.catchall = 0
def all_haps(self):
return [self.resolve_target(name) for name in SIM_get_all_hap_types()]
def show(self, hapname, obj, *args):
print "got hap %s" % hapname
def list(self):
print "%s enabled for these haps:" % iff(self.stop, "breaking", "tracing")
for hap in self.map.keys():
print " %s" % hap
def resolve_target(self, hapname):
# check that hap exists
SIM_hap_get_number(hapname)
return hapname
def is_tracked(self, hap):
return self.map.has_key(hap)
def install_hap(self, hap):
self.map[hap] = SIM_hap_add_callback(hap, self.callback, hap)
def uninstall_hap(self, hap):
SIM_hap_delete_callback_id(hap, self.map[hap])
del self.map[hap]
def track_all(self):
# TODO: document why these haps aren't tracked when -all is given
for hap in self.all_haps():
if hap in ['Core_Hap_Callback_Installed',
'Core_Hap_Callback_Removed',
'Core_Screen_Resized',
'Core_Back_To_Front',
'Core_Continuation',
'Core_Simulation_Stopped'
'Python_Tab_Completion']:
print 'Skipping %s' % hap
continue
if not self.is_tracked(hap):
self.install_hap(hap)
def track_none(self):
for hap in self.map.keys():
self.uninstall_hap(hap)
def track_on(self, hap):
if self.is_tracked(hap):
return
self.install_hap(hap)
def track_off(self, hap):
if self.is_tracked(hap):
self.uninstall_hap(hap)
trace_hap_cmds = hap_tracker(0, "trace-hap",
short = "trace haps",
type = "inspect/change",
see_also = [ "break-hap", "list-haps" ],
doc = """
Enables and disables tracing of haps. When this is enabled, every
time the specified hap is triggered a message is printed.
The <i>hap</i> parameter specifies the hap.
Instead of a hap, the <tt>-all</tt> flag may be given. This will
enable or disable tracing of all haps.
""")
break_hap_cmds = hap_tracker(1, "break-hap",
short = "break on haps",
type = "breakpoint",
see_also = [ "trace-hap", "list-haps" ],
doc = """
Enables and disables breaking simulation on haps. When this is
enabled, every time the specified hap is triggered a message is
printed and simulation is stopped.
The <i>hap</i> parameter specifies the hap.
Instead of a hap, the <tt>-all</tt> flag may be given. This will
enable or disable breaking on all haps.
""")
#
# -------------------- break-on-log-message --------------------
#
def log_hap_callback(udata, trigger, type, message):
hid, filt_obj, substring, filt_type = udata
if substring in message and (filt_type is None or filt_type == type) \
and (not filt_obj or filt_obj == trigger):
cpu = SIM_current_processor()
step = SIM_step_count(cpu)
SIM_hap_delete_callback_id("Core_Log_Message", hid)
SIM_break_simulation("Log message matched at step %s:%d."%
(cpu.name, step))
log_types = {
'info': Sim_Log_Info,
'error': Sim_Log_Error,
'spec-viol': Sim_Log_Spec_Violation,
'tgt-error': Sim_Log_Target_Error,
'unimpl': Sim_Log_Unimplemented,
'undefined': Sim_Log_Undefined,
}
def break_log_cmd(substring, obj, type):
udata = [ 0, obj, substring, type ]
udata[0] = SIM_hap_add_callback_obj("Core_Log_Message", obj, 0,
log_hap_callback, udata)
new_command("break-log", break_log_cmd,
[arg(str_t, "substring", "?", ""),
arg(obj_t("log object", kind = "log_object"), "object", "?"),
arg(string_set_t(log_types), "type", "?")],
short = "break on log message",
doc = """
Break on log message. With no arguments the simulation will stop when the
next log message is printed. By specifying <arg>object</arg>, <arg>type</arg>,
and/or <arg>substring</arg> it will stop on the next log message matching these
conditions. The break is triggered once only, to break again you need to run
the command again.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1796")
#
# -------------------- pwd --------------------
#
def pwd_cmd():
return os.getcwd()
new_command("pwd", pwd_cmd,
[],
type = ["Command-Line Interface", "Files and Directories"],
short = "print working directory",
see_also = ["cd", "ls"],
doc = """
Print the working directory of Simics. Similar to the shell command 'pwd'
(print working directory).""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1815")
#
# -------------------- set-simics-id --------------------
#
def set_simics_id_cmd(id):
pass
new_command("set-simics-id", set_simics_id_cmd,
[arg(int_t, "id")],
short = "set the global Simics ID number",
deprecated = "the new central system",
doc = """
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1831")
#
# -------------------- quit --------------------
#
def quit_cmd(code):
SIM_quit(code)
new_command("quit", quit_cmd,
[arg(sint32_t, "status", "?", 0)],
alias = ["q", "exit"],
type = ["Command-Line Interface"],
short = "quit from Simics",
doc = """
Stop Simics gracefully. The optional argument is the exit status of Simics.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1845")
#
# -------------------- expect --------------------
#
def expect_cmd(i1, i2, v):
if v:
print "Value is " + `i1[1]` +" expecting " + `i2[1]`
if i1[1] != i2[1]:
print "*** Values differ in expect command:",
if isinstance(i1[1], (int, long)):
print number_str(i1[1]),
else:
print i1[1],
if isinstance(i2[1], (int, long)):
print number_str(i2[1])
else:
print i2[1]
SIM_quit(1)
new_command("expect", expect_cmd,
[arg((int_t, str_t), ("i1", "s1")),
arg((int_t, str_t), ("i2", "s2")),
arg(flag_t, "-v")],
type = ["Test"],
short = "fail if not equal",
doc = """
If values <arg>i1</arg> and <arg>i2</arg> are not equal the simulator will
print them and exit with error <fun>exit(1)</fun>. <arg>-v</arg> prints the two
values before comparing them.
This can be useful when writing scripts that want to assert a state in
the simulator. Note that it only works with integer and string arguments.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1872")
#
# -------------------- pid --------------------
#
def pid_cmd():
print os.getpid()
new_command("pid", pid_cmd,
[],
type = ["Command-Line Interface"],
short = "print pid of Simics process",
doc = """
Outputs the process identity of the Simics process itself, useful for
various things (such as attaching a remote debugger).""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1894")
#
# -------------------- readme --------------------
#
def readme_cmd():
print SIM_readme()
new_command("readme", readme_cmd,
[],
type = ["Help"],
short = "print information about Simics",
doc = """
Prints various useful information (README) about Simics.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1909")
#
# -------------------- license --------------------
#
def license_cmd():
SIM_license()
new_command("license", license_cmd,
[],
type = ["Help"],
short = "print simics license",
doc = """
Prints the LICENSE that applies to this copy of Simics.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1923")
#
# -------------------- copyright --------------------
#
def copyright_cmd():
SIM_copyright()
new_command("copyright", copyright_cmd,
[],
type = ["Help"],
short = "print full Simics copyright information",
doc = """
Prints the complete copyright information that applies to this copy of Simics.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1937")
#
# -------------------- version --------------------
#
def version_cmd(verbose):
if verbose:
print "%s (%s)\n" % (SIM_version(), SIM_snapshot_date())
print "build-id: %d" % conf.sim.version
print SIM_version_sub()
else:
print SIM_version()
print
new_command("version", version_cmd,
[arg(flag_t, "-v")],
type = ["Help"],
short = "display Simics version",
doc = """
Prints the Simics version. With the <tt>-v</tt> flag, compiler version
and compile dates are printed as well.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1957");
#
# -------------------- quiet --------------------
#
quiet_mode = 0
def quiet_cmd(mode):
global quiet_mode
if mode not in [-1,0,1]:
print "Illegal mode"
return
if mode == -1:
quiet_mode = 1 - quiet_mode
else:
quiet_mode = mode
SIM_set_quiet(quiet_mode)
if quiet_mode:
print "[simics] Switching on quiet mode."
else:
print "[simics] Switching off quiet mode."
new_command("quiet", quiet_cmd,
[arg(int_t, "mode", "?", -1)],
type = "internal commands",
short = "toggle quiet mode",
doc = """
Switches quiet mode to given value.
Sets Simics to 'quiet' mode if the value is 1, or turns off
quiet mode if the value is 0, or toggles if the value is -1.
See also the 'verbose' command. Default is to toggle.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1990")
#
# -------------------- verbose --------------------
#
verbose_mode = 0
def verbose_cmd(mode):
global verbose_mode
if mode not in [-1,0,1]:
print "Illegal mode"
return
if mode == -1:
verbose_mode = 1 - verbose_mode
else:
verbose_mode = mode
SIM_set_verbose(verbose_mode)
if verbose_mode:
print "[simics] Switching on verbose mode."
else:
print "[simics] Switching off verbose mode."
new_command("verbose", verbose_cmd,
[arg(int_t, "mode", "?", -1)],
type = "internal commands",
short = "toggle verbose mode",
doc = """
Switches verbose mode to given value.
Sets Simics to 'verbose' mode if the value is 1, or turns off
verbose mode if the value is 0, or toggles if the value is -1.
See also the 'quiet' command. Default is to toggle.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="2024")
#
# -------------------- = --------------------
#
def assignment_command(name, value):
if name[0] == '%':
if not value[0] == int_t:
print "Value is not an integer."
SIM_command_has_problem()
return
(cpu, _) = get_cpu()
return obj_write_reg_cmd(cpu, name[1:], cast(uint64_t, value[1]))
elif name.startswith('$$'):
name = name[2:]
local = 1
elif name[0] == '$':
name = name[1:]
local = 0
else:
local = 0
print ('Deprecation warning: variable assignment without variable '
'prefix $.')
if conf.prefs.fail_on_warnings:
SIM_quit(1)
get_current_locals().set_variable_value(name, value[1], local)
# do not return anything (avoid execution of string assignments)
new_command("=", assignment_command,
[arg(str_t, doc = "name"),
arg((int_t, str_t, float_t),
("ival", "sval", "fval"), doc = "value")],
type = ["Command-Line Interface"],
short = "set an environment variable",
pri = -100,
infix = 1,
doc = """
Set a Simics environment variable to an integer or string value.
Or assigns a value to a processor register.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="2061")
#
# -------------------- [ --------------------
#
def array_command(name, idx, rw, value):
if name[0] != '$':
print "Indexing only supported for variables"
SIM_command_has_problem()
return
if name.startswith('$$'):
name = name[2:]
local = 1
else:
name = name[1:]
local = 0
space = get_current_locals()
if rw[2] == '-r':
try:
return getattr(space, name)[idx]
except Exception, msg:
return
else:
try:
space.set_variable_value_idx(name, value[1], local, idx)
except:
print "Failed setting variable."
SIM_command_has_problem()
return
new_command("[", array_command,
[arg(str_t, "variable"),
arg(int_t, "idx"),
arg((flag_t, flag_t), ('-r', '-w')),
arg((int_t, str_t), ('ival', 'sval'), doc = 'value')],
type = ["Command-Line Interface"],
short = "",
pri = 750,
infix = 1,
doc = """
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="2103")
#
# -------------------- unset --------------------
#
def unset_command(all, names):
if all:
rlist = [x for x in get_current_locals().get_all_variables().keys()
if x not in names]
else:
rlist = names
for n in rlist:
try:
get_current_locals().remove_variable(n)
except:
print 'Unset failed for $%s.' % n
new_command("unset", unset_command,
[arg(flag_t, "-a"),
arg(str_t, "variables", "*")],
type = ["Command-Line Interface"],
short = "remove a environment variable",
doc = """
Removes (unsets) a Simics environment variable. The <arg>-a</arg> flag causes
all variables to be removed, <em>except</em> the ones specified as
<arg>variables</arg>.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="2132")
#
# -------------------- += --------------------
#
def inc_environment_variable(name, value):
if name[0] == '%':
if not value[0] == int_t:
print "Value is not an integer."
SIM_command_has_problem()
return
(cpu, _) = get_cpu()
value = cast(uint64_t, obj_read_reg_cmd(cpu, name[1:]) + value[1])
return obj_write_reg_cmd(cpu, name[1:], value)
elif name[0] == '$':
name = name[1:]
else:
print 'Deprecation warning: variable assignment without variable prefix $.'
if conf.prefs.fail_on_warnings:
SIM_quit(1)
space = get_current_locals()
if space.get_all_variables().has_key(name):
old = getattr(space, name)
if isinstance(old, str):
old = (str_t, old)
else:
old = (int_t, old)
else:
if value[0] == int_t:
old = (value[0], 0)
else:
old = (str_t, '')
setattr(space, name, plus(old, value))
return getattr(space, name)
new_command("+=", inc_environment_variable,
[arg(str_t, doc = "name"),
arg((int_t, str_t), ("ival", "sval"), doc = "value")],
type = ["Command-Line Interface"],
short = "set an environment variable",
pri = -100,
infix = 1,
doc = """
Add a string or integer to a Simics environment variable, or an integer
value to a register.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="2178")
#
# -------------------- -= --------------------
#
def dec_environment_variable(name, value):
if name[0] == '%':
(cpu, _) = get_cpu()
value = cast(uint64_t, obj_read_reg_cmd(cpu, name[1:]) - value)
return obj_write_reg_cmd(cpu, name[1:], value)
elif name[0] == '$':
name = name[1:]
else:
print 'Deprecation warning: variable assignment without variable prefix $.'
if conf.prefs.fail_on_warnings:
SIM_quit(1)
space = get_current_locals()
if space.get_all_variables().has_key(name):
old = getattr(space, name)
if not isinstance(old, (int, long)):
print "Variable is not an integer."
SIM_command_has_problem()
return
else:
old = 0
setattr(space, name, minus(old, value))
return getattr(space, name)
new_command("-=", dec_environment_variable,
[arg(str_t, doc = "name"),
arg(int_t, "value")],
type = ["Command-Line Interface"],
short = "set an environment variable",
pri = -100,
infix = 1,
doc = """
Subtract an integer from a Simics environment variable, or from a
register.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="2218")
#
# -------------------- $ --------------------
#
def environment_var_expander(comp):
return get_completions(comp,
get_current_locals().get_all_variables().keys())
def get_environment_variable(name):
return getattr(get_current_locals(), name)
new_command("$", get_environment_variable,
[arg(str_t, "name", expander = environment_var_expander)],
type = ["Command-Line Interface"],
short = "get the value of an environment variable",
pri = 2000,
check_args = 0,
doc = """
Gets the value of a Simics environment variable, like in <tt>print $var</tt>.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="2240")
#
# -------------------- list-vars --------------------
#
def list_vars_cmd():
l = []
m = 0
d = get_current_locals().get_all_variables()
for k in d.keys():
l.append((k,d[k]))
m = max(m, len(k))
l.sort()
for v in l:
print "%-*s =" % (m, v[0]), v[1]
new_command("list-vars", list_vars_cmd,
[],
type = ["Command-Line Interface"],
short = "list environment variables",
doc = """
Lists all Simics environment variables and their current
values. Environment variables can be used to store temporary
values. To set a variable, write <tt>variable = value</tt> at the
Simics prompt. The value can be of type integer, string, or float. To
access a variable, prefix the name with a <tt>$</tt>, e.g.,
<tt>$variable</tt>. A variable can be put wherever an expression can be
used. For example:
<tt>simics> tmp = %pc + 4</tt><br/>
<tt>simics> count = 10</tt><br/>
<tt>simics> disassemble $tmp $count</tt><br/>
They can also be accessed from Python by using the name space simenv:
<tt>simics> $foo = 1 + 4 * 4</tt><br/>
<tt>simics> @print simenv.foo</tt><br/>
<tt>17</tt><br/>
<tt>simics> @simenv.bar = "hello"</tt><br/>
<tt>simics> echo $bar</tt><br/>
<tt>hello</tt>
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="2265")
#
# -------------------- list-namespaces --------------------
#
def list_namespaces_cmd(name_sort):
l = []
i = []
m = 0
# Fins all interfaces used as command namespaces
iface_namespaces = []
for cmd in simics_commands():
iface = cmd["namespace"]
if iface:
try:
if SIM_get_class(iface) == iface:
iface = None
except SimExc_General:
pass
if iface and not iface in iface_namespaces:
iface_namespaces.append(iface)
for o in SIM_get_all_objects():
namespace = o.name
classname = o.classname
l = l + [ [ "<" + classname + ">", namespace ] ]
for iface in iface_namespaces:
if instance_of(o, iface):
i = i + [ [ "<" + iface + ">", namespace ] ]
if name_sort:
l.sort(lambda a, b: cmp(a[1], b[1]))
i.sort(lambda a, b: cmp(a[1], b[1]))
else:
l.sort()
i.sort()
print_columns([ Just_Left, Just_Left ],
[ [ "Class", "Namespace (Object)" ] ] + l)
print ""
print_columns([ Just_Left, Just_Left ],
[ [ "Interface", "Namespace (Object)" ] ] + i)
new_command("list-namespaces", list_namespaces_cmd,
[arg(flag_t, "-n")],
type = ["Command-Line Interface", "Help"],
short = "list all namespaces",
see_also = ['list-objects'],
doc = """
Lists all namespaces (objects) and which classes or interfaces they
belong to. A namespace is the same as a configuration object. Many
objects implement commands local to them. These commands are invoked
by giving the object name followed by a period and then the local
command name; e.g., <cmd>rec0.playback-start</cmd>.
If the <arg>-n</arg> flag is given, the output will be sorted on the
object name instead of the class name, which is the default.
Some objects also implement command interfaces. A command interface is
a collection of commands that can be used by an object implementing
this interface. For example, breakpoint is an interface that is
implemented by objects of the <class>memory-space</class> class. This
allows one to write <cmd>phys_mem0.break 0xffc00</cmd> to set a
breakpoint in the memory interface.
Objects implementing command interfaces are listed in the second half
of output from this command.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="2341")
#
# -------------------- list-objects --------------------
#
def class_or_iface_expander(string):
return get_completions(string, sim.classes.keys() + sim.interfaces.keys());
def print_object_list(type, comp, objs, name_sort):
l = [ ["<" + o.classname + ">", o.name]
for o in objs
if not type or instance_of(o, type) ]
if name_sort:
l.sort(lambda a, b: cmp(a[1], b[1]))
else:
l.sort()
print_columns([ Just_Left, Just_Left ],
[ [ iff(comp, "Component Class", "Class"),
"Object" ] ] + l)
print ""
def list_objects_cmd(type, name_sort, all):
if type and type not in sim.classes and type not in sim.interfaces:
print "No such class or interface: '%s'" % type
return
if all:
print_object_list(type, False, sim.objects.values() , name_sort)
else:
comp_objs = sim.interfaces['component'].objects.values()
print_object_list(type, True, comp_objs, name_sort)
print_object_list(type, False,
(o for o in sim.objects.values()
if o not in comp_objs),
name_sort)
new_command("list-objects", list_objects_cmd,
[arg(str_t, "type", "?", "", expander = class_or_iface_expander),
arg(flag_t, "-n"),
arg(flag_t, "-a")],
type = ["Configuration"],
short = "list all objects",
see_also = ["list-namespaces"],
doc = """
Lists all configuration objects and the class they belong to.
You can specify a class or interface name as <arg>type</arg>. Only objects of
that class or implementing that interface will then be listed.
The objects are sorted by class name by default. Use the <arg>-n</arg> flag to
sort them by object name instead.
Component objects are printed, first, and then all other objects. To mix all
objects in the same list, use the <arg>-a</arg> flag.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="2404")
#
# -------------------- load-module --------------------
#
def module_expander(string):
nonloaded = []
def callback(name, file, usr_ver, attrs, nonloaded=nonloaded):
if "LOADED" not in attrs:
nonloaded.append(name)
SIM_for_all_modules(callback)
return get_completions(string, nonloaded)
def import_module_commands(module, glob):
if not VT_module_has_commands(module, glob):
return
scriptmod = sub("\+", "__", sub("-", "_", module))
modname = "mod_%s_%scommands" % (scriptmod, iff(glob, "g", ""))
try:
moduleobj = __import__(modname)
except Exception, msg:
print "Problem importing %scommands for %s: %s" % (
iff(glob, "global ", ""), module, msg)
SIM_command_has_problem()
return
if not glob and hasattr(moduleobj, 'class_funcs'):
class_funcs.update(moduleobj.class_funcs)
def module_loaded_callback(dummy, obj, module):
import_module_commands(module, 0)
def check_for_gcommands():
for name in [x[0] for x in SIM_get_all_modules()]:
VT_push_current_loading_module(name);
import_module_commands(name, 1)
VT_pop_current_loading_module()
# put it in a try to get documentation working
try:
SIM_hap_add_callback("Core_Module_Loaded", module_loaded_callback, 0)
except NameError:
pass
def load_module_cmd(module):
assert_not_running()
try:
SIM_load_module(module)
except Exception, msg:
print "Error loading module '%s': %s" % (module, msg)
SIM_command_has_problem();
new_command("load-module", load_module_cmd,
[arg(str_t, "module", expander = module_expander)],
type = ["Configuration", "Modules"],
see_also = ["list-modules", "list-failed-modules",
"module-list-refresh", "unload-module",
"add-module-directory"],
short = "load module into Simics",
doc = """
Load a module (Simics extension). Simics supports dynamically loadable
modules. Read the <cite>Simics Users Guide</cite> for more info on how
to write modules.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="2475")
#
# -------------------- unload-module --------------------
#
def unload_module_cmd(module):
assert_not_running()
try:
SIM_unload_module(module)
print "Module", module, "unloaded"
except Exception, msg:
print "Error unloading module %s: %s" % (module, msg)
new_command("unload-module", unload_module_cmd,
[arg(str_t, "module")],
alias = "",
type = ["Configuration", "Modules"],
short = "unload module",
see_also = ["load-module"],
doc = """
Unload a module (Simics extension). Note that not all modules can be unloaded.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="2501")
def module_list_cmd(substr, loaded, verbose):
mods = [x for x in SIM_get_all_modules() if x[0].find(substr) >= 0]
if len(mods) == 0:
print "No modules %sfound." % (
iff(len(substr), 'matching pattern "%s" ' % substr, ''))
return
info = []
for mod in mods:
if loaded and not mod[2]:
continue
line = [mod[0],
iff(mod[2], 'Loaded', ''),
iff(mod[3] != conf.sim.version,
mod[3], ''),
mod[4]]
if verbose:
line.append(mod[1])
info.append(line)
info.sort()
title = ['Name', 'Status', 'Version', 'User Version']
if verbose:
title.append('Path')
print_columns((Just_Left,) * len(title), [title] + info)
new_command("list-modules", module_list_cmd,
[arg(str_t,"substr","?",""),
arg(flag_t,"-l"),
arg(flag_t,"-v")],
alias = "module-list",
type = ["Configuration", "Modules"],
see_also = ['list-failed-modules', 'module-list-refresh',
'load-module', 'add-module-directory'],
short = "list loadable modules",
doc = """
Lists all modules that can be loaded into Simics. If the optional
<arg>substr</arg> argument is specified, only modules with a matching name will
be printed. Use <arg>-v</arg> to get more information on the modules, and
<arg>-l</arg> to only list loaded modules. The ABI version of modules is only
printed if it differs from the current Simics ABI.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="2535")
#
# -------------------- list-failed-modules --------------------
#
def module_list_failed_cmd(substr, verbose):
mods = [x for x in SIM_get_all_failed_modules() if x[0].find(substr) >= 0]
if len(mods) == 0:
print "No failed modules %sfound." % (
iff(len(substr), 'matching pattern "%s" ' % substr, ''))
return
print ""
print "Current ABI version: %d " % conf.sim.version,
print "Lowest supported: %d" % conf.sim.version_compat
print ""
errors = []
for mod in mods:
if mod[3]:
msg = mod[6] or 'unknown linker error'
elif mod[4] and mod[4] < conf.sim.version_compat:
msg = 'Unsupported Simics ABI version: %d' % mod[4]
elif mod[2]:
msg = 'Duplicated module'
elif mod[6]:
msg = mod[6]
else:
msg = 'Uknown failure'
if verbose:
errors.append([mod[0], msg, mod[1]])
else:
errors.append([mod[0], msg])
errors.sort()
title = ['Name', 'Error']
if verbose:
title.append('Path')
print_columns((Just_Left,) * len(title), [title] + errors)
new_command("list-failed-modules", module_list_failed_cmd,
[arg(str_t,"substr","?",""), arg(flag_t, "-v") ],
alias = "module-list-failed",
type = ["Configuration", "Modules"],
see_also = ["list-modules", "module-list-refresh", "load-module",
"add-module-directory"],
short = "list the modules that are not loadable",
# Whenever you change the columns, make sure to update the
# example in the programming guide.
doc = """
Lists the modules (Simics extensions) that are not loadable,
optionally only those matching <arg>substr</arg>.
Similar to <cmd>list-modules</cmd> but shows modules that will not load into
Simics, and the reason why Simics refuses to load them (e.g., missing symbol,
wrong version, ...).
If the -v flag is specified, show verbose information, with the full path to
the module file and any library loader error message.
The <tt>MODULE</tt> column contains the name of the module or the filename of the
shared library file if the module name could not be established.
If the module has the same name as another module, an <tt>X</tt> will
be printed in the <tt>DUP</tt> column.
If the module could not be loaded since it was compiled or written for
a different version of Simics, the version it was built for will be
printed in the <tt>VERSION</tt> column.
The <tt>USR_VERS</tt> will contain the user version string, if provided.
The <tt>LINK</tt> column contains any linker error (cf. the <tt>dlerror(3)</tt>
manpage).
When the -v flag is provided, the <tt>PATH</tt> column will contain
linker information for the module.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="2591")
#
# -------------------- module-list-refresh --------------------
#
def module_list_refresh_cmd():
SIM_module_list_refresh()
check_for_gcommands()
new_command("module-list-refresh", module_list_refresh_cmd,
[], # module-list-refresh
alias = "",
type = ["Configuration", "Modules"],
short = "create a new list of loadable modules",
see_also = ["list-modules", "list-failed-modules", "load-module"],
doc = """
Refresh (reload) the list of all Simics modules.<br/>
This command causes Simics to re-query all modules currently
not loaded. This can be used after changing or
adding a module that Simics, when started, considered as
non-loadable.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="2638")
#
# -------------------- read-configuration --------------------
#
from update_config import update_configuration
def any_object_exists(set, prefix):
for s in [x for x in set if x not in ['sim', 'python', 'prefs']]:
try:
SIM_get_object(prefix + s)
return True
except:
pass
return False
def py_SIM_read_configuration(file):
set = VT_get_configuration(file)
prefix = ''
idx = 0
# TODO: remove this check when bug #2389 (cannot create cpu once started)
# has been fixed.
if any_object_exists(set, prefix):
print "Cannot read configuration, object names already used."
SIM_command_has_problem()
return
while any_object_exists(set, prefix):
prefix = 'machine_%d_' % idx
idx += 1
if prefix:
print "Using object prefix '%s'" % prefix
for s in set:
set[s].name = prefix + set[s].name
update_configuration(set)
SIM_add_configuration(set, file)
def read_configuration_cmd(file):
assert_not_running()
try:
py_SIM_read_configuration(file)
except Exception, msg:
SIM_command_has_problem()
print msg
print "Failed reading configuration"
new_command("read-configuration", read_configuration_cmd,
[arg(filename_t(simpath = 1), "file")],
type = ["Configuration"],
short = "restore configuration",
see_also = ["write-configuration"],
doc = """
Read configuration from file. The configuration can be either a checkpoint or
an initial configuration. For information about how to create or modify
configurations, see the <cite>Simics User Guide</cite>.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="2701")
#
# -------------------- write-configuration --------------------
#
def write_configuration_cmd(file, z, u):
assert_not_running()
old_compressed_diff = SIM_get_class_attribute("image", "compressed_diff")
compr = conf.prefs.compress_images
if u:
compr = 0
if z:
compr = 1
SIM_set_class_attribute("image", "compressed_diff", compr)
try:
file = SIM_native_path(file)
if os.path.exists(file):
pr(("%s already exists. Overwriting checkpoints is not"
+ " supported.\n") % file)
SIM_command_has_problem()
return
try:
if VT_remote_control():
print "Writing configuration to:", file
if SIM_write_configuration_to_file(file):
print "Failed to write configuration"
SIM_command_has_problem()
# If running from a client, the prompt does not always
# function as "command is complete" indicator.
if VT_remote_control():
print "Finished writing configuration."
except Exception, msg:
print msg
print "Failed writing configuration"
SIM_command_has_problem()
finally:
SIM_set_class_attribute("image", "compressed_diff",
old_compressed_diff)
new_command("write-configuration", write_configuration_cmd,
[arg(filename_t(),"file"), arg(flag_t, "-z"), arg(flag_t, "-u")],
type = ["Configuration"],
short = "save configuration",
see_also = ["read-configuration", "save-persistent-state"],
doc = """
Write configuration to disk. In addition to the main text
configuration file, objects may create additional files with names
starting with <i>file</i>. Note that some classes save their state
incrementally meaning that files from earlier checkpoints (or from the
starting configuration) may be referenced in the new checkpoint.
Use the <tt>-z</tt> flag for compressed images, or <tt>-u</tt>
for uncompressed. The default is taken from the preference object.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="2755")
#
# -------------------- save-persistent-state --------------------
#
def save_persistent_cmd(file, z, u):
assert_not_running()
old_compressed_diff = SIM_get_class_attribute("image", "compressed_diff")
compr = conf.prefs.compress_images
if u:
compr = 0
if z:
compr = 1
if os.path.exists(file):
pr(("%s already exists. Overwriting persistent state files is not"
+ " supported.\n") % file)
SIM_command_has_problem()
return
SIM_set_class_attribute("image", "compressed_diff", compr)
try:
if VT_write_configuration_persistent(file):
print "Failed saving persistent state"
SIM_command_has_problem()
except Exception, msg:
print "Failed saving persistent state: %s" % msg
SIM_command_has_problem()
SIM_set_class_attribute("image", "compressed_diff", old_compressed_diff)
new_command("save-persistent-state", save_persistent_cmd,
[arg(filename_t(),"file"), arg(flag_t, "-z"), arg(flag_t, "-u")],
type = ["Configuration", "Disk"],
short = "save persistent simulator state",
see_also = ["load-persistent-state", "write-configuration"],
doc = """
Save the persistent simulator state to a file. Persistent data typically
includes disk images, NVRAM and flash memory contents and clock settings,
i.e. data that survive reboots. The persistent state is saved as a
standard Simics configuration file.
Use the <tt>-z</tt> flag for compressed images, or <tt>-u</tt>
for uncompressed. The default is taken from the preference object.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="2798")
def is_config_file(filename):
try:
f = open(filename, 'r')
l = f.readline()
while len(l):
l = l.strip()
if len(l) > 0 and not l.startswith('#'):
if 'OBJECT' in l and 'TYPE' in l and '{' in l:
return 1
else:
return 0
l = f.readline()
except:
pass
return 0
# called from main.c
def run_command_or_checkpt(filename):
try:
if is_config_file(filename):
read_configuration_cmd(filename)
else:
SIM_run_command_file(filename)
except Exception, msg:
print msg
#
# -------------------- load-persistent-state --------------------
#
from configuration import convert_to_pre_objects
from configuration import OBJECT # for Python-format persistent-state files
def load_persistent_cmd(file, prefix):
assert_not_running()
(directory, fname) = os.path.split(file)
# do an excursion to where it was saved, since file names in the config
# are relative that directory
here = os.getcwd()
try:
if directory:
os.chdir(directory)
try:
f = open(fname, 'r')
if is_config_file(fname):
python_format = False
config = VT_get_configuration(fname)
else:
python_format = True
config = eval(open(fname, 'r').read())
except Exception, msg:
SIM_command_has_problem()
print "Failed opening persistent state file: %s" % msg
return
try:
if python_format:
config = convert_to_pre_objects(config)
except Exception, msg:
SIM_command_has_problem()
print "Illegal format in persistent state file: %s" % msg
return
for phase in (3, 0, 1): # 3 = phase -1
for objname, obj in config.items():
if objname not in ('sim', 'python'):
objname = prefix + objname
try:
o = SIM_get_object(objname)
except:
print ("Failed applying persistent state to object %s"
% objname)
print ("Perhaps '%s' refers to some other configuration?"
% file)
print
SIM_command_has_problem()
# skip this object
continue
for a, v in [x for x in obj.__dict__.items()
if x[0][:2] != '__']:
if ((SIM_get_attribute_attributes(o.classname, a)
>> Sim_Init_Phase_Shift)
& Sim_Init_Phase_Mask) != phase:
continue
try:
SIM_set_attribute(o, a, v)
except Exception, msg:
print ("Failed setting attribute '%s' in '%s': %s"
% (a, objname, msg))
SIM_command_has_problem()
finally:
os.chdir(here)
new_command("load-persistent-state", load_persistent_cmd,
[arg(filename_t(simpath = 1), "file"),
arg(str_t, "prefix", "?", "")],
type = ["Configuration", "Disk"],
short = "load persistent state",
see_also = ["save-persistent-state", "read-configuration"],
doc = """
Load persistent simulator state from a file. Persistent data typically
includes disk images, NVRAM and flash memory contents and clock settings,
i.e. data that survive reboots. The <param>prefix</param> argument can be
used to add a name prefix to all objects in the persistent state file.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="2905")
#
# -------------------- run-python-file --------------------
#
def run_python_file_cmd(filename):
try:
SIM_source_python(filename)
except Exception, msg:
print "Failed executing Python file: ", msg
SIM_command_has_problem()
new_command("run-python-file",
run_python_file_cmd,
args = [arg(filename_t(exist = 1, simpath = 1), "filename")],
alias = "source",
type = ["Python", "Command-Line Interface",
"Files and Directories"],
short = "execute Python file",
see_also = ["python", "@", "run-command-file", "add-directory"],
doc = """
Read Python code from <i>filename</i>. Any definitions are entered into the top
level name-space in the Python environment. Uses the Simics search path to
locate <i>filename</i>. This command can be used to start Python scripts inside
Simics. """, filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="2929")
# def my_command_cmd(arg):
# ret = SIM_my_api_function(arg)
# return ret
# new_command("my-command", my_command_cmd,
# args = [arg(int_t, "arg")],
# alias = "mc",
# type = "inspect/change commands",
# short = "my command does it",
# doc_items = [("NOTE", "This command is best")],
# see_also = ["my_other_command"],
# doc = """
# <b>my-command</b> is best.
# This is its documentation. <i>arg</i>
# is the argument.""")
#
#
# -------------------- list-attributes --------------------
#
attr_list = {
Sim_Attr_Required: "Required",
Sim_Attr_Optional: "Optional",
Sim_Attr_Session: "Session ",
Sim_Attr_Pseudo: "Pseudo "
}
# TODO: Move this command inside every namespace
def list_attributes_cmd(obj, attrname):
try:
attrs = obj.attributes
def f(attr):
s = []
if attr[1] & Sim_Attr_Integer_Indexed:
s += [ "Integer_Indexed" ]
if attr[1] & Sim_Attr_String_Indexed:
s += [ "String_Indexed" ]
if attr[1] & Sim_Attr_List_Indexed:
s += [ "List_Indexed" ]
return [ attr[0], attr_list[int(attr[1] & 0xff)],
join(s, " ") ]
if attrname == None:
attrs = map(f, attrs)
attrs.sort()
print_columns([Just_Left, Just_Left, Just_Left],
[ [ "Attribute", "Type", "Flags" ] ] + attrs)
else:
found = 0
for a in attrs:
if a[0] == attrname:
found = 1
help_cmd("attribute:%s.%s" % (obj.name, attrname))
if not found:
print "No such attribute found"
except Exception, msg:
print msg
print "Failed reading attributes"
new_command("list-attributes", list_attributes_cmd,
args = [arg(obj_t('object'), "object",
expander = conf_object_expander),
arg(str_t, "attribute-name", "?", None)],
type = ["Configuration", "Help"],
short = "list all attributes",
doc = """
Print a list of all attributes that are registered in an object. For every
attribute the type, as well as additional flags are listed. See the
<fun>SIM_register_typed_attribute()</fun> documentation function for valid
attribute types and flags. If an attribute name is given, the description for
that particular attribute will be displayed.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="3008")
def attr_expander(string, obj):
return get_completions(string, sim.classes[obj.classname].attributes)
new_command("list-attributes", list_attributes_cmd,
args = [arg(str_t, "attribute-name", "?",
expander = attr_expander)],
type = ["Configuration", "Help"],
short = "list all attributes",
namespace = 'conf_object_t',
doc = """
Print a list of all attributes that are registered in an object. For every
attribute the type, as well as additional flags are listed. See the
<fun>SIM_register_typed_attribute()</fun> documentation function for valid
attribute types and flags. If an attribute name is given, the description for
that particular attribute will be displayed.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="3025")
#
#
# -------------------- list-classes --------------------
#
def list_classes_cmd(loaded):
all_classes = sorted(VT_get_all_known_classes())
classes = sorted(SIM_get_all_classes())
if loaded:
if not classes:
print "There are no classes defined."
return
print "The following classes have been registered:\n"
print_columns([ Just_Left ], classes, has_title = 0)
else:
if not all_classes:
print "There are no classes defined."
return
print "The following classes are available:\n"
print_columns([ Just_Left ], all_classes, has_title = 0)
new_command("list-classes", list_classes_cmd,
args = [arg(flag_t,"-l")],
type = ["Configuration"],
short = "list all configuration classes",
doc = """
Print a list of all configuration classes. The <arg>-l</arg> flag
will reduce the list to classes that has been registered by loaded
modules.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="3061")
#
# -------------------- dummy commands (@ and !) --------------------
#
def dummy_command(cmd_name):
# this does not catch ! and @
print "Arguments missing, or illegal use of '%s' command" % cmd_name
SIM_command_has_problem()
new_command("!", lambda : dummy_command("!"),
type = ["Command-Line Interface"],
short = "execute a shell command",
doc = """
Executes the rest of the command line in the system command-line
interpreter. For Unix, this is the default shell. For Windows, this is
<file>cmd.exe</file>.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="3080")
new_command("@", lambda : dummy_command("@"),
type = ["Command-Line Interface", "Python"],
short = "evaluate a Python statement",
see_also = ["python", "run-python-file"],
doc = """
Evaluates the rest of the command line as a Python statement and print its
result.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="3089")
new_command("if", lambda : dummy_command("if"),
type = ["Command-Line Interface", "Python"],
see_also = ["while"],
doc = """
Runs a block of commands conditionally. Some examples:
<pre>
if <condition> { commands }
if <condition> { commands } else { commands }
if <condition> { commands } else if <condition> { commands }
</pre>
The <cmd>if</cmd> command returns the value of the last executed command in
the block.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="3098")
new_command("else", lambda : dummy_command("else"),
type = ["Command-Line Interface", "Python"],
doc_with = "if", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="3112")
new_command("while", lambda : dummy_command("while"),
type = ["Command-Line Interface", "Python"],
see_also = ["if"],
doc = """
Runs a block of commands while condition is true.
<pre>
while <condition> { commands }
</pre>
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="3116")
new_command("script-branch", lambda : dummy_command("script-branch"),
type = ["Command-Line Interface", "Python"],
see_also = ["list-script-branches",
"interrupt-script-branch",
"wait-for-variable",
"wait-for-hap",
"<text-console>.wait-for-string",
"<processor>.wait-for-cycle",
"<processor>.wait-for-step"],
doc = """
Starts a block of commands as a separate branch. The <cmd>wait-for-</cmd>
commands can be used to postpone the execution of a script branch until
a selection action occurs.
<pre>
script-branch { commands }
</pre>
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="3126")
next_expr_id = 1
expr_list = {}
last_cpu = 0
last_step = -1
display_cb_active = False
def display_expr(id):
global expr_list
(expr, typ, tag) = expr_list[id]
if tag:
print ">%d<" % id
else:
pr(expr + " :\t");
try:
if typ == "py":
exec "print " + expr
else:
run(expr, 0)
except:
print "display expression %d raised exception" % (id)
if tag:
print ">.<"
def display_cb(always):
global last_step, last_cpu
now_cpu = SIM_current_processor()
now_step = SIM_step_count(SIM_current_processor())
if now_cpu == last_cpu and now_step == last_step and always == 0:
return
last_cpu = now_cpu
last_step = now_step
for id in expr_list.keys():
display_expr(id)
def check_display_output():
if display_cb_active:
display_cb(0)
def list_displays():
if expr_list:
for id in expr_list.keys():
(expr, typ, tag) = expr_list[id]
print "%d: %s [%s]" % (id, expr, typ)
else:
print "Empty display list"
def display_cmd(expr, l, p, t):
global next_expr_id, expr_list, display_cb_active
global last_cpu, last_step
if l:
list_displays()
return
if expr == "<list>":
display_cb(1)
return
if not display_cb_active:
display_cb_active = True
if p:
type = "py"
else:
type = "cli"
# avoid output first time
last_cpu = SIM_current_processor()
last_step = SIM_step_count(SIM_current_processor())
if p:
type = "py"
else:
type = "cli"
expr_list[next_expr_id] = (expr, type, t)
print "display %d: %s" % (next_expr_id, expr)
next_expr_id += 1
def undisplay_cmd(expr_id):
global expr_list, display_cb_active
try:
del expr_list[expr_id]
except:
print "No such display"
return
if not expr_list and display_cb_active:
display_cb_active = False
new_command("display", display_cmd,
[arg(str_t, "expression", "?", "<list>"),
arg(flag_t, "-l"),
arg(flag_t, "-p"),
arg(flag_t, "-t")],
alias = "",
type = ["Command-Line Interface", "Output"],
short = "print expression at prompt",
see_also = ["undisplay"],
doc = """
Install a Python expression, or a frontend statement that will be printed
when Simics returns to the prompt. The <tt>-p</tt> flag is used to indicate
that the string argument is in Python. To list all installed display
expressions, the <tt>-l</tt> argument should be used. The expressions are
only evaluated and printed if the simulation has run any instructions since
last time, but a re-evaluation can be forced by calling display with no
arguments. The <tt>-t</tt> argument makes the output be tagged in a way
that makes it possible to capture the output by external means.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="3232")
new_command("undisplay", undisplay_cmd,
[arg(int_t, "expression-id")],
alias = "",
type = ["Command-Line Interface", "Output"],
short = "remove expression installed by display",
see_also = ["display"],
doc = """
Remove a Python expression, or a frontend statement that was previously
installed with the display command. The argument is the id number of the
expression, as listed by <tt>display -l</tt>.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="3252")
def pipe_cmd(cmd, prog):
if sys.platform.startswith("win32"):
print "The 'pipe' command is not supported on this host."
return
# syntax check of the command
run(cmd, no_execute=True)
import subprocess
p = subprocess.Popen(prog, shell=True, stdin=subprocess.PIPE,
close_fds=True)
# temporary replace fd 1 with the pipe to the command
ostdout = os.dup(1)
os.dup2(p.stdin.fileno(), 1)
p.stdin.close()
disable_columns()
try:
run(cmd)
except Exception, exc:
print "Exception!", exc
enable_columns()
os.dup2(ostdout, 1)
os.close(ostdout)
p.wait()
new_command("pipe", pipe_cmd,
[arg(str_t, "command"), arg(str_t, "pipe")],
alias = "",
type = ["Command-Line Interface", "Output"],
short = "run commands through a pipe",
doc = """
This command runs <tt>command</tt> at the Simics prompt and pipes the
output (stdout) through the external program <tt>pipe</tt>'s stdin.
Available on UNIX hosts only.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="3287")
log_time_stamp = 0
log_file_only = 0
log_file_fd = None
log_file_name = None
# will only be called when group/type/level matches
def log_callback(arg, obj, type, str):
if log_time_stamp:
cpu = SIM_current_processor()
ts = " {%s 0x%x %d}" % (cpu.name,
SIM_get_program_counter(cpu),
SIM_step_count(cpu))
else:
ts = ""
if obj == None:
log_string = "[%s]%s %s" % (conf.sim.log_types[type], ts, str)
else:
log_string = "[%s %s]%s %s" % (obj.name, conf.sim.log_types[type], ts, str)
if not log_file_only:
print log_string
if log_file_fd:
log_file_fd.write(log_string + "\n")
SIM_hap_add_callback("Core_Log_Message", log_callback, None)
#
# -------------------- log-setup --------------------
#
def log_setup_cmd(ts, no_ts, fo, no_fo, no_lf, logfile):
global log_time_stamp, log_file_only, log_file_fd, log_file_name
if ts and no_ts:
print "Both -time-stamp and -no-time-stamp specified in log-setup."
SIM_command_has_problem()
return
if fo and no_fo:
print "Both -file-only and -no-file-only specified in log-setup."
SIM_command_has_problem()
return
if logfile and no_lf:
print "Both a log file and -no-log-file specified in log-setup."
SIM_command_has_problem()
return
if (not ts
and not no_ts
and not fo
and not no_fo
and not no_lf
and not logfile):
print "Time stamp : %sabled" % iff(log_time_stamp, "en", "dis")
print "Log file only: %sabled" % iff(log_file_only, "en", "dis")
print "Log file : %s" % iff(log_file_fd, log_file_name, "Not used")
return
if ts:
log_time_stamp = 1
elif no_ts:
log_time_stamp = 0
if fo:
log_file_only = 1
elif no_fo:
log_file_only = 0
if logfile:
try:
open(logfile, "r")
SIM_command_has_problem()
print "File %s already exists." % logfile
return
except:
pass
try:
log_file_fd = open(logfile, "w")
log_file_name = logfile
except Exception, msg:
SIM_command_has_problem()
print "Failed opening log file %s : %s" % (logfile, msg)
return
elif no_lf:
if log_file_fd:
log_file_fd.close()
log_file_fd = None
log_file_name = None
new_command("log-setup", log_setup_cmd,
args = [arg(flag_t, "-time-stamp"),
arg(flag_t, "-no-time-stamp"),
arg(flag_t, "-file-only"),
arg(flag_t, "-no-file-only"),
arg(flag_t, "-no-log-file"),
arg(filename_t(), "logfile", "?", None)],
type = ["Output", "Logging"],
short = "configure log behavior",
see_also = ["log", "<log_object>.log-group", "log-size",
"log-type"],
doc = """
The <arg>-time-stamp</arg> flag will cause further log output to
include a time stamp, i.e. the name of the current processor
together with the program counter and step count for this CPU. Time
stamp output is disabled with <arg>-no-time-stamp</arg>. A file
that receives all log output can be specified with the
<arg>logfile</arg> argument. <arg>-no-log-file</arg> disables an
existing log file. Even if a log file is specified, all output is
printed on the console, unless <arg>-file-only</arg> is used. Use
<arg>-no-file_only</arg> to re-enable output. Called without
arguments, the command will print the current setup.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="3385");
#
# -------------------- log-level --------------------
#
def global_log_level_cmd(level):
if level == -1:
print "Current log levels:"
for obj in SIM_get_all_objects():
if LOG_OBJECT_INTERFACE in dir(obj.iface):
print "[%s] %d" % (obj.name, obj.log_level)
print
return
if level < 0 or level > 4:
print "Illegal log level: %d. Allowed are 0 - 4." % level
SIM_command_has_problem()
return
for obj in SIM_get_all_objects():
if LOG_OBJECT_INTERFACE in dir(obj.iface):
obj.log_level = level
#also change the default log-level
conf.sim.default_log_level = level
print "New global log level: %d" % level
new_command("log-level", global_log_level_cmd,
args = [arg(int_t, "level", "?", -1)],
type = ["Output", "Logging"],
short = "set or get the global log level",
see_also = ["log", "<log_object>.log-group", "log-size",
"log-type"],
doc = """
Objects in Simics can generate log messages on different <i>log levels</i>.
These messages will be show in the Simics command line window if the log level
for the object has been set high enough.
The default level is 1, and this is the lowest level that objects can report
messages on. Setting it to 0 will inhibit output of all messages.
Messages are also added to an access log that can be viewed by the <cmd>log</cmd>
command in Simics.
There are four log levels defined:
1 - important messages printed by default.
2 - "high-level" informative messages.
3 - standard debug messages.
4 - detailed information, such as register accesses.
Not all classes are converted to use this log level scheme.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="3433")
#
# -------------------- <obj>.log-level --------------------
#
def log_level_cmd(obj, level):
old = obj.log_level
if level == -1:
print "[%s] Current log level: %d" % (obj.name, old)
return
if level < 0 or level > 4:
print "Illegal log level: %d. Allowed are 0 - 4." % level
SIM_command_has_problem()
return
if level != old:
obj.log_level = level
print "[%s] Changing log level: %d -> %d" % (obj.name, old, level)
else:
print "[%s] Log level unchanged, level: %d" % (obj.name, old)
new_command("log-level", log_level_cmd,
args = [arg(int_t, "level", "?", -1)],
type = ["Output", "Logging"],
namespace = "log_object",
short = "set or get the log level",
doc_with = "log-level", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="3479")
#
# -------------------- log-type --------------------
#
def log_type_expander(string):
return get_completions(string, conf.sim.log_types)
def global_log_type_cmd(add_flag, sub_flag, log_type):
types = conf.sim.log_types
if log_type == "":
print "Current log types:"
for obj in SIM_get_all_objects():
if LOG_OBJECT_INTERFACE in dir(obj.iface):
print "[%s] %s" % (obj.name, log_names(types, obj.log_type_mask))
print
return
if log_type == "all":
new_mask = 0xffffffffL
change_mask = 0
if sub_flag:
print "Cannot remove all log types."
SIM_command_has_problem()
return
else:
try:
change_mask = 1 << types.index(log_type)
except:
print "Unknown log type: %s " % log_type
SIM_command_has_problem()
return
for obj in SIM_get_all_objects():
if not LOG_OBJECT_INTERFACE in dir(obj.iface):
continue
if add_flag:
new_mask = obj.log_type_mask | change_mask
elif sub_flag:
new_mask = obj.log_type_mask & ~change_mask
else:
new_mask = change_mask
obj.log_type_mask = new_mask
if add_flag:
print "Adding global log type: %s" % log_names(types, change_mask)
elif sub_flag:
print "Removing global log type: %s" % log_names(types, change_mask)
else:
print "Setting global log type: %s" % log_names(types, change_mask)
new_command("log-type", global_log_type_cmd,
args = [arg(flag_t, "-add"),
arg(flag_t, "-sub"),
arg(str_t, "log-type", "?", "", expander = log_type_expander)],
type = ["Output", "Logging"],
short = "set or get the current log types",
see_also = ["log", "<log_object>.log-group", "log-level",
"log-size"],
doc = """
Log messages are categorised into one of the several log types. By default,
messages of all types are handled in the same way. This command can be used
to select one or several types. Only messages of the selected types will be
logged and displayed, as defined by the <cmd>log-level</cmd> command. The
flags <b>-add</b> and <b>-sub</b> can be used to add and remove a single log
type. The log types are documented with the <tt>log_type_t</tt> data type,
and are Info, Error, Undefined, Spec_Violation, Target_Error, Unimplemented.
All types can be enabled by setting <b>log-type</b> to <tt>all</tt>.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="3538")
#
# -------------------- <obj>.log-type --------------------
#
def log_names(all, mask):
names = ""
not_all = 0
for i in range(len(all)):
if mask & (1 << i):
names += all[i] + " "
else:
not_all = 1
if (names == "" and len(all) == 0) or not_all == 0:
names = "all"
return names
def log_type_cmd(obj, add_flag, sub_flag, log_type):
types = conf.sim.log_types
old_mask = obj.log_type_mask
if log_type == "":
print "[%s] Current log types: %s" % (obj.name, log_names(types, old_mask))
return
if log_type == "all":
new_mask = 0xffffffffL
if sub_flag:
print "Cannot remove all log types."
SIM_command_has_problem()
return
else:
try:
type_id = types.index(log_type)
except:
print "Unknown log type: %s " % log_type
SIM_command_has_problem()
return
if add_flag:
new_mask = obj.log_type_mask | (1 << type_id)
elif sub_flag:
new_mask = obj.log_type_mask & ~(1 << type_id)
else:
new_mask = 1 << type_id
obj.log_type_mask = new_mask
old_names = log_names(types, old_mask)
new_names = log_names(types, new_mask)
# compare the names and not the masks (for 'all' to work)
if old_names != new_names:
print "[%s] Changing log types: %s -> %s" % (obj.name, old_names, new_names)
else:
print "[%s] Unchanged log types: %s" % (obj.name, old_names)
new_command("log-type", log_type_cmd,
args = [arg(flag_t, "-add"),
arg(flag_t, "-sub"),
arg(str_t, "log-type", "?", "", expander = log_type_expander)],
type = ["Output", "Logging"],
namespace = "log_object",
short = "set or get the current log types",
doc_with = "log-type", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="3612")
#
# -------------------- <obj>.log-group --------------------
#
def log_group_expander(string, obj):
return get_completions(string, ['all'] + obj.log_groups)
def log_group_cmd(obj, add_flag, sub_flag, log_group):
groups = obj.log_groups
old_mask = obj.log_group_mask
if log_group == "":
print "[%s] Current log groups: %s" % (obj.name, log_names(groups, old_mask))
return
if log_group == "all":
if sub_flag:
new_mask = 0
else:
new_mask = 0xffffffffL
else:
try:
group_id = groups.index(log_group)
except:
print "Unknown log group: %s " % log_group
SIM_command_has_problem()
return
if add_flag:
new_mask = obj.log_group_mask | (1 << group_id)
elif sub_flag:
new_mask = obj.log_group_mask & ~(1 << group_id)
else:
new_mask = 1 << group_id
obj.log_group_mask = new_mask
old_names = log_names(groups, old_mask)
new_names = log_names(groups, new_mask)
# compare the names and not the masks (for 'all' to work)
if old_names != new_names:
print "[%s] Changing log groups: %s -> %s" % (obj.name, old_names, new_names)
else:
print "[%s] Unchanged log groups: %s" % (obj.name, old_names)
new_command("log-group", log_group_cmd,
args = [arg(flag_t, "-add"),
arg(flag_t, "-sub"),
arg(str_t, "log-group", "?", "", expander = log_group_expander)],
type = ["Output", "Logging"],
namespace = "log_object",
short = "set or get the current log groups",
see_also = ["log", "log-level", "log-size", "log-type"],
doc = """
A log object in Simics can specify a number of groups, and each log message
is associated with one group. Groups are typically used to separate log
messages belonging to different aspects of an object such as a device. For
example, a network device can have different group for the receive and transmit
engine, one groups for the host protocol and another for PCI accesses. Having
multiple groups simplifies debugging when only messages of the selected groups
are logged and displayed. By default all groups are active, but a single group
can be set with this command, or groups can be added and removed using the
flags <b>-add</b> and <b>-sub</b> for the command. All groups can be enabled by
setting <b>log-group</b> to <tt>all</tt>.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="3664")
#
# -------------------- log-size --------------------
#
def global_log_size_cmd(newsize):
if newsize == -1:
print "Current log buffer sizes: (number of entries)"
for obj in SIM_get_all_objects():
if LOG_OBJECT_INTERFACE in dir(obj.iface):
print "[%s] %d" % (obj.name, obj.log_buffer_size)
return
for obj in SIM_get_all_objects():
if not LOG_OBJECT_INTERFACE in dir(obj.iface):
continue
try:
obj.log_buffer_size = newsize
except Exception, msg:
print "Error changing log buffer size: %s" % msg
SIM_command_has_problem()
return
print "Setting new size of all log buffers: %d" % newsize
new_command("log-size", global_log_size_cmd,
[arg(int_t, "size", "?", -1)],
alias = "",
type = ["Output", "Logging"],
short = "set log buffer size",
see_also = ["log", "<log_object>.log-group", "log-level",
"log-type"],
doc = """
The namespace version of this command changes the buffer size (number of
entries) for log messages and I/O trace entries for an objects. The global
command applies to all log objects. When called with no arguments, the size
of the log buffers are listed.<br/>
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="3709")
#
# -------------------- <obj>.log-size --------------------
#
def log_size_cmd(obj, newsize):
oldsize = obj.log_buffer_size
if newsize == -1:
print "Current log buffer size: %d entries" % oldsize
return
try:
obj.log_buffer_size = newsize
except Exception, msg:
print "Error changing log buffer size: %s" % msg
SIM_command_has_problem()
return
print "[%s] Changing size of the log buffer: %d -> %d" % (obj.name, oldsize, newsize)
new_command("log-size", log_size_cmd,
[arg(int_t, "size", "?", -1)],
alias = "",
type = ["Output", "Logging"],
short = "set log buffer size",
namespace = "log_object",
doc_with = "log-size", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="3741")
#
# -------------------- log --------------------
#
def trace_sort_func(a, b):
if a[0][1] > b[0][1]:
return -1
if b[0][1] > a[0][1]:
return 1
if a[0][0] > b[0][0]:
return 1
if b[0][0] > a[0][0]:
return -1
return 0
def global_log_cmd(num):
trace_list = []
for obj in SIM_get_all_objects():
if not LOG_OBJECT_INTERFACE in dir(obj.iface):
continue
try:
entry = obj.log_buffer_last
size = obj.log_buffer_size
i = 0
while i < num:
trace = obj.log_buffer[entry]
entry = (entry + size - 1) % size
i = i + 1
trace[0].insert(0, obj)
trace_list.append(trace)
except:
pass
trace_list.sort(trace_sort_func)
count = min(num, len(trace_list))
for i in range(count):
display_log_entry(trace_list[i])
if count == 0:
print "Empty log buffers"
elif count < num:
print "Only %d entries listed (no more in log buffers)" % count
new_command("log", global_log_cmd,
args = [arg(int_t, "count", "?", 10)],
alias = "",
type = ["Output", "Logging"],
short = "print log entries for all objects",
see_also = ["<log_object>.log-group", "log-level", "log-size",
"log-type"],
doc = """
Display entries in log buffers. The namespace version displays the entries
for a specific object, while the global command lists the entries of all
object's log buffers but sorted by time. The optional argument is the number
of entries to list. Only the last 10 entries are listed by default.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="3792")
#
# -------------------- <object>.log --------------------
#
read_write_str = ("Read", "Write")
def display_log_entry(trace):
if trace[0][2]:
cpu_name = trace[0][2].name
else:
cpu_name = "None"
str = " Timestamp: obj = %s cycle = %d cpu = %-5s pc = 0x%x" % (
trace[0][0].name, trace[0][1], cpu_name, trace[0][3])
if trace[1] == []:
print " * %s" % str
else:
print "%6d %s" % (trace[1][5], str)
if trace[1][0]:
acc_name = trace[1][0].name
else:
acc_name = "Unknown"
str = " %-5s access from %s. PA = 0x%x" % (read_write_str[trace[1][3]],
acc_name,
trace[1][1])
str = str + " size = %d data = 0x%x" % (trace[1][2], trace[1][4])
print str
for msg in trace[2]:
grps = msg[1]
names = ""
for i in range(32): # 32 groups should be enough for anyone...
if not grps:
break
if grps & 1:
try:
if len(names):
names += ", "
names += trace[0][0].log_groups[i]
except:
pass # illegal group specified
grps >>= 1
print " [%s - %s] %s" % (conf.sim.log_types[msg[2]], names, msg[0])
print
def log_cmd(obj, num):
some_lost = 0
size = obj.log_buffer_size
if num > size:
num = size
some_lost = 1
try:
entry = obj.log_buffer_last
except Exception, msg:
print "Empty log buffer"
return
try:
i = 0
while i < num:
trace = obj.log_buffer[entry]
trace[0].insert(0, obj)
display_log_entry(trace)
entry = (entry + size - 1) % size
i = i + 1
except Exception, msg:
some_lost = 1
if some_lost:
print "Only %d entries listed (no more in buffer)" % i
new_command("log", log_cmd,
args = [arg(int_t, "count", "?", 10)],
alias = "",
type = ["Output", "Logging"],
short = "print log entries for all objects",
namespace = "log_object",
doc_with = "log", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="3874")
#
# -------------------- cpu-switch-time --------------------
#
def cpu_switch_time_cmd(poly):
try:
cpu0_freq = SIM_proc_no_2_ptr(0).freq_mhz * 1e6
except Exception, msg:
cpu0_freq = None
if poly == -1:
print "Current CPU switch time: %d cycles" % (conf.sim.cpu_switch_time),
if cpu0_freq:
print "(%f seconds)" % (conf.sim.cpu_switch_time / cpu0_freq)
else:
print
else:
if poly[0] == int_t:
cycles = poly[1]
elif poly[0] == float_t:
if cpu0_freq:
seconds = poly[1]
cycles = int(cpu0_freq * seconds)
else:
print "Cannot set cpu-switch-time with seconds, no processor defined"
SIM_command_has_problem()
return
else:
print "invalid argument type passed"
SIM_command_has_problem()
return
try:
conf.sim.cpu_switch_time = cycles
except Exception, msg:
print "Failed changing CPU switch time: %s" % msg
SIM_command_has_problem()
return
try:
print "The switch time will change to %d cycles (for CPU-0) once" % conf.sim.new_switch_time,
print "all processors have synchronized."
except:
pass
new_command("cpu-switch-time", cpu_switch_time_cmd,
args = [arg((int_t, float_t),
("cycles", "seconds"),
"?", -1)],
alias = "",
type = ["Execution", "Speed"],
short = "get/set CPU switch time",
doc = "Change the time, in cycles or seconds, between CPU switches. Simics will "
"simulate each processor for a specified number of cycles before "
"switching to the next one. Specifying cycles (which is default) refers to "
"the number of cycles on the first CPU in Simics. The following CPUs cycle switch "
"times are calulated from their CPU frequencies. "
"When issued with no argument, the current switch time is reported.", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="3928")
def api_trace_cmd(poly):
if poly[0] == flag_t:
if poly[1]:
if poly[2] == '-stderr':
conf.sim.api_trace_file = 0
else:
conf.sim.api_trace_file = None
return
trace = conf.sim.api_trace_file
if type(trace) == type(0):
print "API calls traced to stderr"
elif trace == None:
print "API trace currently off"
else:
print "API calls traced to '%s'" % trace
return
try:
conf.sim.api_trace_file = poly[1]
except SimExc_IOError, msg:
print "Failed tracing to file '%s': %s" % (poly[1], msg)
try:
conf.sim.api_trace_file
new_command('api-trace', api_trace_cmd,
args = [arg((flag_t, flag_t, str_t),
('-off', '-stderr', 'file'),
'?', (flag_t, 0))],
type = 'general commands',
short = 'get/set API trace status',
doc = '''
With no arguments, shows current API trace status.
If called with the <tt>-off</tt> flag, turns off API tracing.
If called with the <tt>-stderr</tt> flag, start tracing to stderr.
If called with a <arg>file</arg> argument, starts tracing to that
file. Its contents is overwritten.
This command is only available when Simics is started with the
<tt>SIMICS_API_TRACE</tt> environment variable set. Its value controls
if tracing is turned on or off when Simics starts. If set to
<tt>on</tt>, API tracing is turned on with output to stderr. If set to
<tt>on:<arg>file</arg></tt>, output is sent to <arg>file</arg>. For
other values, API tracing is turned off.''', filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="3968")
except:
pass
#
# -------------------- < --------------------
#
def less_than(a, b):
return a < b
new_command("<", less_than, [arg(int_t), arg(int_t)],
type = ["Command-Line Interface"],
pri = 50, infix = 1,
short = "less than",
doc = """
Returns 1 if <var>arg1</var> is less than <var>arg2</var>, and 0 if not.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4002")
#
# -------------------- <= --------------------
#
def less_or_equal(a, b):
return a <= b
new_command("<=", less_or_equal, [arg(int_t), arg(int_t)],
type = ["Command-Line Interface"],
pri = 50, infix = 1,
short = "less or equal",
doc = """
Returns 1 if <var>arg1</var> is less than or equal to <var>arg2</var>,
and 0 if not.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4017")
#
# -------------------- > --------------------
#
def greater_than(a, b):
return a > b
new_command(">", greater_than, [arg(int_t), arg(int_t)],
type = ["Command-Line Interface"],
pri = 50, infix = 1,
short = "greater than",
doc = """
Returns 1 if <var>arg1</var> is greater than <var>arg2</var>, and 0 if not.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4033")
#
# -------------------- >= --------------------
#
def greater_or_equal(a, b):
return a >= b
new_command(">=", greater_or_equal, [arg(int_t), arg(int_t)],
type = ["Command-Line Interface"],
pri = 50, infix = 1,
short = "greater or equal",
doc = """
Returns 1 if <var>arg1</var> is greater than or equal to <var>arg2</var>,
and 0 if not.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4048")
#
# -------------------- != --------------------
#
def not_equal(a, b):
return a[1] != b[1]
new_command("!=", not_equal,
[arg((int_t, str_t), ('i1', 's1'), doc = 'arg1'),
arg((int_t, str_t), ('i2', 's2'), doc = 'arg2')],
type = ["Command-Line Interface"],
pri = 50, infix = 1,
short = "not equal",
doc = """
Returns 1 if <var>arg1</var> and <var>arg2</var> are not equal, and 0 if equal.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4064")
#
# -------------------- == --------------------
#
def equal(a, b):
return a[1] == b[1]
new_command("==", equal,
[arg((int_t, str_t), ('i1', 's1'), doc = 'arg1'),
arg((int_t, str_t), ('i2', 's2'), doc = 'arg2')],
type = ["Command-Line Interface"],
pri = 50, infix = 1,
short = "equal",
doc = """
Returns 1 if <var>arg1</var> and <var>arg2</var> are equal, and 0 if not.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4081")
#
# -------------------- min --------------------
#
def min_cmd(a, b):
return min(a, b)
new_command("min", min_cmd, [arg(int_t), arg(int_t)],
type = ["Command-Line Interface"],
short = "min",
doc = """
Returns the smaller value of <var>arg1</var> and <var>arg2</var>.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4098")
#
# -------------------- max --------------------
#
def max_cmd(a, b):
return max(a, b)
new_command("max", max_cmd, [arg(int_t), arg(int_t)],
type = ["Command-Line Interface"],
short = "max",
doc = """
Returns the larger value of <var>arg1</var> and <var>arg2</var>.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4112")
def is_set(a):
return a or a == ''
#
# ------------------- and --------------------
#
def and_command(a, b):
return iff(is_set(a[1]) and is_set(b[1]), 1, 0)
new_command("and", and_command,
[arg((int_t, str_t), ('iarg1', 'sarg1'), doc = 'arg1'),
arg((int_t, str_t), ('iarg2', 'sarg2'), doc = 'arg2')],
type = ["Command-Line Interface"],
pri = 20, infix = 1,
short = "logical and",
doc = """
Returns 1 if both <var>arg1</var> and <var>arg2</var> are non-zero,
and 0 if not.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4129")
#
# ------------------- or --------------------
#
def or_command(a, b):
return iff(is_set(a[1]) or is_set(b[1]), 1, 0)
new_command("or", or_command,
[arg((int_t, str_t), ('iarg1', 'sarg1'), doc = 'arg1'),
arg((int_t, str_t), ('iarg2', 'sarg2'), doc = 'arg2')],
type = ["Command-Line Interface"],
pri = 10, infix = 1,
short = "logical or",
doc = """
Returns 1 if <var>arg1</var> or <var>arg2</var> is non-zero,
and 0 if not.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4147")
#
# ------------------- not --------------------
#
def not_command(a):
return iff(is_set(a[1]), 0, 1)
new_command("not", not_command,
[arg((int_t, str_t), ('iarg', 'sarg'), doc = 'arg')],
type = ["Command-Line Interface"],
pri = 30,
short = "logical not",
doc = """
Returns 1 if <var>arg</var> is zero, and 0 if not.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4165")
#
# ------------------- defined --------------------
#
def defined_command(v):
return v in get_current_locals().get_all_variables()
new_command("defined", defined_command,
[arg(str_t, 'variable')],
type = ["Command-Line Interface"],
short = "variable defined",
pri = 40, # higher than 'not' command
doc = """
Returns 1 if <var>variable</var> is a defined CLI variable, and 0 if not.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4181")
#
# ------------ wait-for-variable ------------
#
def wait_for_variable_command(name):
if conf.python.iface.python.in_main_branch():
SIM_command_has_problem()
print ("The wait-for-variable command is only allowed "
"in script branches.")
else:
ret = [None, None]
while ret[1] != name:
try:
ret = wait_for_hap("CLI_Variable_Write")
except SimExc_Break:
print "Command 'wait-for-variable' interrupted."
SIM_command_has_problem()
new_command("wait-for-variable", wait_for_variable_command,
[arg(str_t, 'variable')],
short = "wait for a variable to change",
see_also = ["script-branch",
"wait-for-hap",
"<text-console>.wait-for-string",
"<processor>.wait-for-cycle",
"<processor>.wait-for-step"],
doc = """
Postpones execution of a script branch until a CLI variable is written by some
other script branch or the main thread.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4208")
#
# ------------ wait-for-cycle ------------
#
def wait_for_cycle_command(obj, cycle):
if conf.python.iface.python.in_main_branch():
SIM_command_has_problem()
print ("The wait-for-cycle command is only allowed "
"in script branches.")
else:
try:
wait_for_obj_hap("Core_Cycle_Count", obj, cycle)
except SimExc_Break:
print "Command '%s.wait-for-cycle' interrupted." % obj.name
SIM_command_has_problem()
new_command("wait-for-cycle", wait_for_cycle_command,
[arg(int_t, 'cycle')],
namespace = "processor",
short = "wait until reaching cycle",
see_also = ["script-branch",
"wait-for-variable",
"wait-for-hap",
"<text-console>.wait-for-string",
"<processor>.wait-for-step"],
doc = """
Postpones execution of a script branch until the processor reaches the
specified cycle in the simulation.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4237")
#
# ------------ wait-for-step ------------
#
def wait_for_step_command(obj, step):
if conf.python.iface.python.in_main_branch():
SIM_command_has_problem()
print ("The wait-for-step command is only allowed "
"in script branches.")
else:
try:
wait_for_obj_hap("Core_Step_Count", obj, step)
except SimExc_Break:
print "Command '%s.wait-for-step' interrupted." % obj.name
SIM_command_has_problem()
new_command("wait-for-step", wait_for_step_command,
[arg(int_t, 'step')],
namespace = "processor",
short = "wait until reaching step",
see_also = ["script-branch",
"wait-for-variable",
"wait-for-hap",
"<text-console>.wait-for-string",
"<processor>.wait-for-cycle"],
doc = """
Postpones execution of a script branch until the processor reaches the
specified step in the simulation.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4267")
#
# ------------ wait-for-hap ------------
#
def wait_for_hap_command(hap, obj, idx0, idx1, ret):
if conf.python.iface.python.in_main_branch():
SIM_command_has_problem()
print "The wait-for-hap command is only allowed in script branches."
else:
try:
r = wait_for_obj_hap(hap, obj, idx0, idx1)
except SimExc_Break:
print "Command 'wait-for-hap' interrupted."
SIM_command_has_problem()
if ret:
for i in range(len(r)):
if type(r[i]) == type(conf.sim):
r[i] = r[i].name
simenv.set_variable_value_idx(ret, r[i], 1, i)
new_command("wait-for-hap", wait_for_hap_command,
[arg(str_t, 'hap', '', expander = hap_expander),
arg(obj_t('object'), "object", '?', None),
arg(int_t, 'idx0', '?', -1),
arg(int_t, 'idx1', '?', -1),
arg(str_t, 'ret', '?')],
short = "wait until hap occurs",
see_also = ["script-branch",
"wait-for-variable",
"<text-console>.wait-for-string",
"<processor>.wait-for-cycle",
"<processor>.wait-for-step"],
doc = """
Postpones execution of a script branch until <arg>hap</arg> occurs. The
optional argument <arg>obj</arg> limits the haps to a specific object, and
<arg>idx0</arg>, <arg>idx1</arg> can be used for indexed and range haps. The
data associated with the hap can be saved into the named variable specified by
<arg>ret</arg>. This variable will be indexed, with local scope.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4301")
def list_script_branch_command():
print "ID Object Hap Hap-ID Function"
for sb in conf.python.script_branches:
if sb[3]:
name = sb[3].name
else:
name = "None"
print "%-3d %-15s %-20s %3d %s" % (sb[0], name, sb[1], sb[2], sb[4])
new_command("list-script-branches", list_script_branch_command,
see_also = ["script-branch", "interrupt-script-branch"],
doc = """
List all currently active script branches.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4330")
def interrupt_script_branch_command(id):
try:
conf.python.iface.python.interrupt_branch(id)
print "Script branch %d interrupted." % id
except Exception, msg:
print "Failed interrupting script branch: %s" % msg
SIM_command_has_problem()
new_command("interrupt-script-branch", interrupt_script_branch_command,
[arg(int_t, 'id')],
see_also = ["script-branch", "list-script-branches"],
doc = """
Send a interrupt exception to a scripts branch. The argument is the script
branch ID, that is returned by the <cmd>script-branch</cmd> command, and that
is also listed by the <cmd>list-script-branches</cmd> command. The branch will
wakeup and exit when it receives the exception.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4344")
all_tee_objs = []
def tee_handler(file, buf, count):
file.write(buf)
file.flush()
def tee(filename):
global all_tee_objs
try:
file_obj=open(filename, "w")
except:
print "Failed to open '%s' for writing" % filename
SIM_command_has_problem()
return
all_tee_objs.append((filename, file_obj))
SIM_add_output_handler(tee_handler, file_obj)
def remove_tee(filename):
for i in range(len(all_tee_objs)):
(name, obj) = all_tee_objs[i]
if filename == None or name == filename:
SIM_remove_output_handler(tee_handler, obj)
obj.close()
del(all_tee_objs[i])
if filename:
return
if filename:
print "Output not enabled to file '%s'" % filename
SIM_command_has_problem()
new_command("output-file-start", tee,
[ arg(filename_t(), "filename") ],
type = ["Files and Directories"],
short = "send output to file",
see_also = ['output-file-stop'],
doc = """
Send output to <i>filename</i>. Any output displayed in the Simics console that goes through the output handler API will be written to the file.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4384")
new_command("output-file-stop", remove_tee,
[ arg(filename_t(), "filename", "?", None) ],
type = ["Files and Directories"],
short = "stop sending output to file",
see_also = ['output-file-start'],
doc = """
Stop sending output to file. If no filename is given, then the command
will disable all file output.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4392")
def add_module_path_cmd(path):
VT_add_module_dir(path)
SIM_module_list_refresh()
new_command("add-module-directory", add_module_path_cmd,
[arg(filename_t(dirs = 1), "path")],
type = ["Simics Search Path", "Configuration",
"Files and Directories", "Modules"],
short = "add a directory to the module search path",
doc = """
Adds a directory to the Simics module search path. This path is used to look
for additional modules, that can be used to extend the functionality of Simics.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4405")
def signed_cmd(size, value):
return (value & ((1 << size) - 1)) - ((value << 1) & (1 << size))
new_command("signed", lambda x : signed_cmd(64, x),
[arg(int_t, "int")],
type = ["Command-Line Interface", "Output"],
short = "interpret unsigned integer as signed",
doc = """
Interpret an integer, <param>int</param>, as a signed value of a specific bit
width. For example <cmd>signed16 0xffff</cmd> will return -1. The
<cmd>signed</cmd> command assumes a 64 bit width.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4418")
def _signed_cmd(s):
return lambda x : signed_cmd(s, x)
for i in (8, 16, 32, 64):
new_command("signed%d" % i, _signed_cmd(i),
[arg(int_t, "int")],
type = ["Command-Line Interface", "Output"],
short = "interpret unsigned integer as signed",
doc_with = "signed", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4432")
import sim_commands
# asserts that Simics is stopped
def assert_stopped():
assert_cpu()
if SIM_simics_is_running():
raise CliError, "Simics is already running."
# internal for commands
def list_processors():
limit = SIM_number_processors()
print "Current status:"
for i in range(limit):
cpu = SIM_get_processor(i)
print "Processor", cpu.name, iff(SIM_processor_enabled(cpu), "enabled.", "disabled.")
def conf_object_expander(string):
return get_completions(string, conf.all_object_names);
#
# -------------- simics-path commands ----------------
#
def add_directory_cmd(path, prepend):
SIM_add_directory(path, prepend)
new_command("add-directory", add_directory_cmd,
[arg(filename_t(dirs = 1, keep_simics_ref = 1), "path"),
arg(flag_t, "-prepend")],
type = ["Simics Search Path", "Configuration",
"Files and Directories"],
short = "add a directory to the Simics search path",
doc = """
Adds a directory to the Simics search path. The Simics search path is a list of
directories where Simics searches for additional files when loading a
configuration or executing a command like <cmd>load-file</cmd>.
The value of <arg>path</arg> is normally appended at the end of the list. If
the <i>-prepend</i> flag is given, the path will be added as first in the list.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="27")
def print_directories_cmd():
dirs = SIM_get_directories()
print "The current Simics search path is:"
for dir in dirs:
print dir
new_command("list-directories", print_directories_cmd,
[],
type = ["Simics Search Path", "Files and Directories"],
short = "list directories in Simics search path",
doc = """
Print a list of all directories in the Simics search path.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="48")
new_command("print-directories", print_directories_cmd,
[],
short = "print the directory Simics search path",
type = "deprecated commands",
deprecated = "list-directories", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="56")
def clear_directories_cmd():
SIM_clear_directories()
print "Simics search path is now empty."
new_command("clear-directories", clear_directories_cmd,
[],
type = ["Simics Search Path", "Files and Directories"],
short = "clear the Simics search path",
doc = """
Empty the Simics search path.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="66")
def resolve_file_cmd(filename):
file = SIM_lookup_file(filename)
if not file:
print "Not in search path:", filename
else:
return file
new_command("resolve-file", resolve_file_cmd,
[arg(str_t, "filename")],
type = ["Simics Search Path", "Files and Directories"],
short = "resolve a filename",
alias = "lookup-file",
doc = """\
Looks for the file <arg>filename</arg> in the Simics search path. If it
is found, its complete path is returned.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="82")
def native_path_cmd(filename):
return SIM_native_path(filename)
new_command("native-path", native_path_cmd,
[arg(str_t, "filename")],
type = ["Files and Directories"],
short = "convert a filename to host native form",
doc = """\
Converts a path to its host native form. On Unix, this command returns
<arg>filename</arg> unchanged. On Windows, it translates Cygwin-style paths
to native Windows paths. Refer to the documentation
<b>SIM_native_path()</b>, for a detailed description of the conversions
made.
This command can be used for portability when opening files residing on the
host filesystem.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="95")
# Print a table (list of rows, each a list of strings).
# The alignments list specifies how each column should be aligned,
# each entry being "r" or "l". The string 'spacing' is put between columns.
def print_table(table, alignments, spacing):
widths = [max(len(table[row][col])
for row in xrange(len(table)))
for col in xrange(len(table[0]))]
print "\n".join(spacing.join({'l': "%-*s", 'r': "%*s"}[a] % (w, s)
for (s, w, a) in zip(trow, widths,
alignments))
for trow in table)
def print_time_cmd(cpu, steps = False, cycles = False, all = False):
if all:
if steps or cycles:
print "The -s and -c flags cannot be used with -all."
SIM_command_has_problem()
return
cpus = []
next = SIM_next_queue(None)
while next:
cpus.append(next)
next = SIM_next_queue(next)
elif cpu:
cpus = [cpu]
else:
cpus = [current_processor()]
if steps and cycles:
print "The -s and -c flags cannot be used at the same time."
SIM_command_has_problem()
return
elif steps:
return SIM_step_count(cpus[0])
elif cycles:
return SIM_cycle_count(cpus[0])
print_table([["processor", "steps", "cycles", "time [s]"]]
+ [[cpu.name,
number_str(SIM_step_count(cpu), 10),
number_str(SIM_cycle_count(cpu), 10),
"%.3f" % SIM_time(cpu)]
for cpu in cpus],
["l", "r", "r", "r"], " ")
for ns in ("", "processor"):
new_command("print-time", print_time_cmd,
{"": [arg(obj_t('processor', 'processor'), "cpu-name", "?"),
arg(flag_t, "-s"),
arg(flag_t, "-c"),
arg(flag_t, "-all")],
"processor": [ arg(flag_t, "-s"),
arg(flag_t, "-c")]}[ns],
namespace = ns,
alias = "ptime",
type = ["Execution", "Profiling"],
short = "print number of steps and cycles executed",
repeat = print_time_cmd,
doc = """
Prints the number of steps and cycles that a processor has executed.
The cycle count is also displayed as simulated time in seconds.
If called from a processor namespace (e.g., <i>cpu0</i><tt>.print-time</tt>),
the time for that processor is printed. Otherwise, the time for the
current processor is printed, or, if the <arg>-all</arg> flag is
given, the time for all processors.
if the <arg>-c</arg> flag used, the cycle count for the processor is returned
and nothing is printed. The <arg>-s</arg> flag is similar and returns the
step count.
A step is a completed instruction or an exception. An instruction that
fails to complete because of an exception will count as a single step,
including the exception.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="156")
#
# -------------------- penable --------------------
#
def penable_cmd(poly): # flag_t or obj_t
if not poly:
penable_cmd((obj_t, current_processor()))
elif poly[0] == flag_t: # -all
limit = SIM_number_processors()
for i in range(limit):
penable_cmd((obj_t, SIM_get_processor(i)))
else:
cpu = poly[1]
try:
SIM_enable_processor(cpu)
print "Enabling processor", cpu.name
except Exception, msg:
print "Failed enabling processor.", msg
new_command("penable", penable_cmd,
[arg((obj_t('processor', 'processor'), flag_t),
("cpu-name", "-all"), "?")],
type = ["Execution", "Changing Simulated State"],
short = "switch processor on",
doc = """
Enables a processor. If no processor is specified, the current processor will
be enabled. If the flag <arg>-all</arg> is passed, all processors will be
enabled.
<b>pdisable</b> takes processor as parameter. If no processor is
given, it will list all enabled processors. The method variant can also be used
to disable a processor. A disabled processor is simply stalled for an infinite
amount of time. Make sure that you always have at least one enabled processor.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="208")
def obj_enable_cmd(obj):
try:
SIM_enable_processor(obj)
print "Enabling processor", obj.name
except Exception, msg:
print "Failed enabling processor.", msg
new_command("enable", obj_enable_cmd,
[],
namespace = "processor",
short = "switch processor on",
doc_with = "penable", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="232")
#
# -------------------- pdisable --------------------
#
def pdisable_cmd(poly):
if not poly:
pdisable_cmd((obj_t, current_processor()))
elif poly[0] == flag_t: # -all
limit = SIM_number_processors()
for i in range(limit):
pdisable_cmd((obj_t, SIM_get_processor(i)))
else:
cpu = poly[1]
try:
SIM_disable_processor(cpu)
print "Disabling processor", cpu.name
except Exception, msg:
print "Failed disabling processor.", msg
new_command("pdisable", pdisable_cmd,
[arg((obj_t('processor', 'processor'), flag_t),
("cpu-name", "-all"), "?")],
alias = "",
short = "switch processor off",
doc_with = "penable", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="257")
def obj_disable_cmd(obj):
try:
SIM_disable_processor(obj)
print "Disabling processor", obj.name
except Exception, msg:
print "Failed disabling processor.", msg
new_command("disable", obj_disable_cmd,
[],
namespace = "processor",
short = "switch processor off",
doc_with = "penable", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="271")
#
# -------------------- pstatus --------------------
#
def pstatus_cmd():
list_processors()
new_command("pstatus", pstatus_cmd,
[],
alias = "",
type = ["Execution", "Inspecting Simulated State"],
short = "show processors' status",
doc = """
Show the enabled/disabled status of all processors in the Simics session.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="284")
#
# -------------------------- memory profiling --------------------------
#
def supports_mem_profiling(cpu):
try:
cpu.memory_profilers
except:
print "%s does not support memory read/write profiling." % cpu.name
return 0
return 1
cpu_mem_prof_type = {"read" : Sim_RW_Read,
"write" : Sim_RW_Write}
def cpu_mem_prof_type_expander(string):
return get_completions(string, cpu_mem_prof_type.keys())
def print_valid_mem_prof_types():
print "Valid types are: %s" % ", ".join(cpu_mem_prof_type.keys())
def add_memory_profiler_cmd(cpu, type, obj):
if not supports_mem_profiling(cpu):
return
try:
i = cpu_mem_prof_type[type]
except:
print "'%s' is not a valid profiler type." % type
print_valid_mem_prof_types()
return
if cpu.memory_profilers[i]:
print ("There is an active profiler for memory %s already: %s"
% (type, cpu.memory_profilers[i].name))
return
if obj:
cpu.memory_profilers[i] = obj
else:
# create a new profiler
name = "%s_%s_mem_prof" % (cpu.name, type)
try:
prof = SIM_get_object(name)
cpu.memory_profilers[i] = prof
print ("[%s] Existing profiler added for memory %s: %s"
% (cpu.name, type, name))
except:
try:
gran = cpu.memory_profiling_granularity_log2
desc = "data profiler"
prof = SIM_create_object('data-profiler', name,
[['description', desc],
['granularity', gran],
['physical_addresses', 1]])
cpu.memory_profilers[i] = prof
print ("[%s] New profiler added for memory %s: %s"
% (cpu.name, type, name))
except:
print "Could not add memory profiler."
def remove_memory_profiler_cmd(cpu, type):
if not supports_mem_profiling(cpu):
return
try:
cpu.memory_profilers[cpu_mem_prof_type[type]] = None
except:
print "'%s' is not a valid profiler type." % type
print_valid_mem_prof_types()
def list_memory_profilers_cmd(cpu):
if not supports_mem_profiling(cpu):
return
for t in cpu_mem_prof_type.keys():
try:
name = cpu.memory_profilers[cpu_mem_prof_type[t]].name
except:
name = ""
print "%20s: %s" % (t, name)
new_command("add-memory-profiler", add_memory_profiler_cmd,
[arg(str_t, "type", expander = cpu_mem_prof_type_expander),
arg(obj_t("data-profiler", "data-profiler"), "profiler", "?")],
namespace = "processor",
type = ["Memory", "Profiling"],
short="add a memory profiler to the processor",
doc = """
Add a data profiler to the specified processor that will record either
reads or writes to memory (indexed on physical address) depending on
whether the <tt>type</tt> argument is 'read' or 'write'. An existing
data profiler may be specified with the <tt>profiler</tt> argument;
otherwise, a new data profiler will be created.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="373")
new_command("remove-memory-profiler", remove_memory_profiler_cmd,
[arg(str_t, "type", expander = cpu_mem_prof_type_expander)],
namespace = "processor",
type = ["Memory", "Profiling"],
short="remove a memory profiler from the processor",
doc = """
Remove any memory profiler of the specified <tt>type</tt> ('read' or
'write') currently attached to the processor.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="386")
new_command("list-memory-profilers", list_memory_profilers_cmd,
[],
namespace = "processor",
type = ["Memory", "Profiling"],
short="list memory profilers connected to the processor",
doc = """
List all memory profilers connected to the processor, and what kind of
data they are collecting.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="395")
#
# -------------------- pstats --------------------
#
def all_processors():
return filter(SIM_object_is_processor, SIM_get_all_objects())
def print_stat_per_cpu(cpu):
pr("\nStatistics for cpu %s\n" % cpu.name);
pr(" User Supervisor Total Description\n");
elided = 0
for (name, uval, sval) in cpu.mode_counters:
if uval or sval:
pr("%11d %11d %11d %s\n" % (uval, sval, uval + sval, name))
else:
elided = 1
if elided:
pr("\n(counters whose values are all zero were not displayed)\n")
def pstats_cmd(args):
if args[0] == flag_t:
for c in all_processors():
print_stat_per_cpu(c)
else:
cpu = args[1]
if not cpu:
cpu, _ = get_cpu()
print_stat_per_cpu(cpu)
def obj_pstats_cmd(obj):
pstats_cmd((obj_t, obj))
new_command("print-statistics", pstats_cmd,
[arg((obj_t('processor', 'processor'), flag_t), ("cpu-name","-all"), "?",
(obj_t,None), expander = (cpu_expander,0))],
type = ["Profiling"],
alias = "pstats",
short = "print various statistics",
doc = """
Prints various statistics from the simulation. The <b>print-statistics</b>
command prints statistics for the currently selected CPU if no argument
is given and for all CPUs if the -all flag given.
Any statistics that have been compiled into the
simulator are printed, as well as user-defined per-mode counters.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="438")
new_command("print-statistics", obj_pstats_cmd,
[],
short = "print various statistics",
namespace = "processor",
doc_with = "print-statistics", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="451")
#
# -------------------- step-break-absolute, step-break --------------------
#
def sim_break_absolute_cmd(cpu, cycles):
if not cpu:
(cpu, _) = get_cpu()
sim_break_cmd(cpu, cycles - SIM_step_count(cpu))
def sim_break_cmd(cpu, cycles):
if (cycles < 0):
print "Cannot break on negative time"
return
if not cpu:
(cpu, _) = get_cpu()
SIM_break_step(cpu, cycles)
new_command("step-break-absolute", sim_break_absolute_cmd,
[arg(obj_t('processor', 'processor'), "cpu-name", "?"),
arg(int_t, "instructions")],
alias = ["sba", "sim-break-absolute"],
type = ["Execution", "Breakpoints", "Debugging"],
short = "set absolute time breakpoint",
group_short = "set step breakpoints",
namespace_copy = ("processor", sim_break_absolute_cmd),
see_also = ["step-break-absolute", "cycle-break", "cycle-break-absolute", "list-breakpoints"],
doc = """
Set a breakpoint so that the selected CPU will stop after its step counter has
reached the <i>instructions</i> value. If the CPU is not specified the selected
frontend processor will be used (see <b>pselect</b>).
To list all breakpoints set use the command <b>list-breakpoints</b>.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="474")
new_command("step-break", sim_break_cmd,
[arg(obj_t('processor', 'processor'), "cpu-name", "?"),
arg(int_t, "instructions")],
alias = ["sb", "sim-break"],
short = "set time breakpoint",
namespace_copy = ("processor", sim_break_cmd),
see_also = ["step-break-absolute", "cycle-break", "cycle-break-absolute", "list-breakpoints"],
doc = """
Sets a breakpoint so that the CPU will stop after executing <i>instructions</i>
number of steps from the time the command was issued. If the CPU is not
specified the selected frontend processor will be used (see <b>pselect</b>).
To list all breakpoints set use the command <b>list-breakpoints</b>.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="491")
def cycle_break_absolute_cmd(cpu, cycles):
if not cpu:
(cpu, _) = get_cpu()
if (cycles < SIM_cycle_count(cpu)):
print "Cannot break on negative time"
return
SIM_break_cycle(cpu, cycles - SIM_cycle_count(cpu))
def cycle_break_cmd(cpu, cycles):
if not cpu:
(cpu, _) = get_cpu()
if (cycles < 0):
print "Cannot break on negative time"
return
SIM_break_cycle(cpu, cycles)
new_command("cycle-break-absolute", cycle_break_absolute_cmd,
[arg(obj_t('processor', 'processor'), "cpu-name", "?"),
arg(int_t, "cycles")],
alias = "cba",
type = ["Execution", "Breakpoints", "Debugging"],
short = "set absolute cycle breakpoint",
namespace_copy = ("processor", cycle_break_absolute_cmd),
see_also = ["cycle-break", "step-break", "step-break-absolute", "list-breakpoints"],
doc = """
Set a breakpoint so that the selected CPU will stop after its cycle counter has
reached the <i>cycles</i> value. If the CPU is not specified the selected
frontend processor will be used (see <b>pselect</b>).
To list all breakpoints set use the command <b>list-breakpoints</b>.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="522")
new_command("cycle-break", cycle_break_cmd,
[arg(obj_t('processor', 'processor'), "cpu-name", "?"),
arg(int_t, "cycles")],
alias = "cb",
short = "set cycle breakpoint",
namespace_copy = ("processor", cycle_break_cmd),
see_also = ["cycle-break-absolute", "step-break", "step-break-absolute", "list-breakpoints"],
doc = """
Sets a breakpoint so that the CPU will stop after running <i>cycles</i>
number of cycles from the time the command was issued. If the CPU is not
specified the selected frontend processor will be used (see <b>pselect</b>).
To list all breakpoints set use the command <b>list-breakpoints</b>.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="538")
#
# -------------------- run --------------------
#
_do_disassemble = 0
_do_register_trace = 0
hit_user_breakpoint = 0
user_run_continuation = None
def user_continue2(dummy):
global _do_disassemble, _do_register_trace
_do_disassemble = 1
_do_register_trace = 0
_started_sim()
SIM_continue(0)
def user_continue():
global hit_user_breakpoint, _do_disassemble, _do_register_trace
hit_user_breakpoint = 0
if conf.python.iface.python.in_main_branch() and SIM_postponing_continue():
SIM_post_command(user_continue2, None)
else:
_do_disassemble = 1
_do_register_trace = 0
try:
_started_sim()
SIM_continue(0)
except:
pass
_sim_stop_handler_id = None
_sim_btof_message = None
def _started_sim():
global _sim_stop_handler_id
if _sim_stop_handler_id == None:
_sim_stop_handler_id = SIM_hap_add_callback("Core_Simulation_Stopped",
simulation_stopped_handler,
None)
def simulation_stopped_handler(arg, obj, exc, str):
global hit_user_breakpoint, user_run_continuation, _do_disassemble
global _sim_stop_handler_id
SIM_hap_delete_callback_id("Core_Simulation_Stopped", _sim_stop_handler_id)
_sim_stop_handler_id = None
SIM_step_clean(current_processor(), user_time_stop, None)
SIM_time_clean(current_processor(), Sim_Sync_Processor,
user_time_stop, None)
if hit_user_breakpoint and user_run_continuation != None:
user_run_continuation()
def back_to_front_dis_handler(arg, obj):
global _do_disassemble, _sim_btof_message
if _sim_btof_message and _sim_btof_message != "Received control-c":
print _sim_btof_message
_sim_btof_message = None
if _do_disassemble:
disassemble_at_prompt()
_do_disassemble = 0
check_display_output()
SIM_hap_add_callback("Core_Back_To_Front", back_to_front_dis_handler, None)
old_regs = {}
def disassemble_at_prompt(*dummy):
global old_regs, _do_register_trace
new_proc = SIM_current_processor()
if _do_register_trace:
try:
old_regs_cpu = old_regs[new_proc]
except KeyError, msg:
old_regs[new_proc] = {}
else:
try:
diff_regs = get_obj_funcs(new_proc)['diff_regs']
except KeyError, msg:
pass
else:
if callable(diff_regs):
diff_regs = diff_regs(new_proc)
for r in SIM_get_all_registers(new_proc):
if SIM_get_register_name(new_proc, r) in diff_regs:
new_val = SIM_read_register(new_proc, r)
try:
old_val = old_regs_cpu[r]
except KeyError, msg:
old_regs[new_proc][r] = new_val
else:
if new_val != old_val:
old_regs[new_proc][r] = new_val
print "\t%s <- %s" % (SIM_get_register_name(new_proc, r), number_str(new_val))
try:
pending_exception_func = get_obj_funcs(new_proc)['get_pending_exception_string']
pending_exception = pending_exception_func(new_proc)
except KeyError:
pending_exception = None
if pending_exception != None:
local_print_disassemble_line(new_proc, SIM_get_program_counter(new_proc), 1, 1, pending_exception)
else:
local_print_disassemble_line(new_proc, SIM_get_program_counter(new_proc), 1)
if new_proc != current_processor():
set_current_processor(new_proc)
print "Setting new inspection cpu: %s" % new_proc.name
def user_time_stop(cpu, dummy):
global hit_user_breakpoint
hit_user_breakpoint = 1
raise SimExc_Break, "hit time breakpoint"
def run_cmd(count):
global user_run_continuation
assert_stopped()
if count > 0:
SIM_step_post(current_processor(), count, user_time_stop, None)
user_run_continuation = None
user_continue()
new_command("run", run_cmd,
[arg(uint64_t, "count", "?", 0)],
alias = ["continue", "c", "r"],
type = ["Execution"],
repeat = run_cmd,
short = "start execution",
see_also = ["step-instruction", "run-cycles"],
doc = """
Tells Simics to start or continue executing instructions. If a <arg>count</arg>
argument is provided, Simics will execute <arg>count</arg> number of
instructions and stop.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="684")
def run_cycles_cmd(count):
global user_run_continuation
assert_stopped()
if count > 0:
SIM_time_post_cycle(current_processor(), count, Sim_Sync_Processor, user_time_stop, None)
user_run_continuation = None
user_continue()
new_command("run-cycles", run_cycles_cmd,
[arg(uint64_t, "count", "?", 0)],
alias = ["continue-cycles", "cc", "rc"],
type = ["Execution"],
see_also = ["step-cycle", "run"],
repeat = run_cycles_cmd,
short = "start execution",
doc = """
Tells Simics to start or continue executing instructions. If a <arg>count</arg>
argument is provided, Simics will execute <arg>count</arg> number of cycles and
stop. Note that running <arg>count</arg> cycles may or may not be equivalent to
running <arg>count</arg> instructions depending on the way Simics is
configured. Refer to the chapter Understanding Simics Timing in the Simics User
Guide for more information.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="707")
def stop_cmd(all, msg):
VT_user_interrupt(msg, all)
if not conf.python.iface.python.in_main_branch():
wait_for_hap("Core_Simulation_Stopped")
new_command("stop", stop_cmd,
[arg(flag_t, "-a"),
arg(str_t, "message", "?", None)],
type = ["Execution"],
repeat = stop_cmd,
short = "interrupt simulation",
see_also = ["run"],
doc = """
Stop simulation as soon as possible. If the <param>-a</param> argument is
give, any command script running will also be interrupted. A
<param>message</param> to be printed on the console when the simulation stops
can also be specified.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="728")
#
# -------------------- stepi --------------------
#
stepi_count = 0
def stepi_continuation():
global stepi_count, _do_disassemble, _do_register_trace
stepi_count = stepi_count - 1
if stepi_count <= 0:
return
if _do_disassemble:
disassemble_at_prompt()
_do_disassemble = 0
run_stepi(0)
def run_stepi(register_trace):
global stepi_count, user_run_continuation, _do_disassemble, _do_register_trace
global old_regs
cpu = current_processor()
SIM_step_post(cpu, 1, user_time_stop, None)
if SIM_postponing_continue():
user_run_continuation = stepi_continuation
user_continue()
return
while 1:
_do_disassemble = 1
_do_register_trace = register_trace
if register_trace:
old_regs[cpu] = {}
for r in SIM_get_all_registers(current_processor()):
old_regs[cpu][r] = SIM_read_register(cpu, r)
try:
_started_sim()
SIM_continue(0)
except:
return
stepi_count = stepi_count - 1
if stepi_count <= 0:
return
disassemble_at_prompt()
SIM_step_post(cpu, 1, user_time_stop, None)
def stepi_cmd(count, r):
global stepi_count
assert_stopped()
stepi_count = count
run_stepi(r)
new_command("step-instruction", stepi_cmd,
[arg(uint64_t, "count", "?", 1), arg(flag_t, "-r")],
alias = [ "si", "stepi" ],
type = ["Execution"],
short = "step one or more instructions",
repeat = stepi_cmd,
see_also = ["run", "step-cycle", "step-cycle-single"],
doc = """
Executes <arg>count</arg> instructions, printing the next instruction to be
executed at each step. <arg>count</arg> defaults to one. With the <arg>-r</arg>
flag, register changes will also be printed.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="797")
#
# -------------------- step-cycle --------------------
#
stepc_count = 0
def stepc_continuation():
global stepc_count, _do_disassemble, _do_register_trace
stepc_count = stepc_count - 1
if stepc_count <= 0:
return
_do_register_trace = 0
if _do_disassemble:
disassemble_at_prompt()
_do_disassemble = 0
run_stepc()
def run_stepc(*dummy):
global stepc_count, user_run_continuation, _do_disassemble, _do_register_trace
SIM_time_post_cycle(current_processor(), 1, Sim_Sync_Processor, user_time_stop, None)
if SIM_postponing_continue():
user_run_continuation = stepc_continuation
user_continue()
return
while 1:
_do_disassemble = 1
_do_register_trace = 0
try:
_started_sim()
SIM_continue(0)
except:
return
stepc_count = stepc_count - 1
if stepc_count <= 0:
return
disassemble_at_prompt()
SIM_time_post_cycle(current_processor(), 1, Sim_Sync_Processor, user_time_stop, None)
def step_cycle_cmd(count):
global stepc_count
assert_stopped()
stepc_count = count
run_stepc()
new_command("step-cycle", step_cycle_cmd,
[arg(int_t, "count", "?", 1)],
alias = "sc",
type = ["Execution"],
repeat = step_cycle_cmd,
see_also = ["step-cycle-single", "run", "step-instruction"],
short = "step one or more cycles",
doc = """
Executes <arg>count</arg> cycles, printing the next instruction to be executed
at each cycle. <arg>count</arg> defaults to one.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="861")
def step_cycle_single_handler(cpu, dummy):
raise SimExc_Break, "hit time breakpoint"
def step_cycle_single_cmd(disass):
global number_of_processors
number_of_processors = 0
assert_stopped()
proc = SIM_current_processor()
if proc.ooo_mode != 'micro-architecture':
print "This command is only available in Micro Architecture Mode"
return
SIM_time_post_cycle(proc, 0, Sim_Sync_Processor, step_cycle_single_handler, None)
SIM_continue(0)
new = SIM_current_processor()
if proc != new:
set_current_processor(new)
if not disass:
local_print_disassemble_line(new, SIM_get_program_counter(new), 1)
new_command("step-cycle-single", step_cycle_single_cmd,
[arg(flag_t, "-n")],
alias = "scs",
type = ["Execution"],
repeat = step_cycle_single_cmd,
see_also = ["step-cycle", "step-instruction", "run"],
short = "step one cycle and switch to the next cpu",
doc = """
Used with the Micro Architectural Interface only. <cmd>step-cycle-single</cmd>
executes one cycle on the current processor, switches to the next processor and
prints the next instruction to be committed. Repeated use will thus advance
each processor in a round robin fashion. <arg>-n</arg> will prevent
<cmd>step-cycle-single</cmd> from printing the next instruction after
executing.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="900")
#
# -------------------- pselect --------------------
#
def pselect_cmd(cpu):
if not cpu:
try:
cpu, name = get_cpu()
print "Currently selected processor is", name
except:
print "There are no processors available"
else:
set_current_processor(cpu)
new_command("pselect", pselect_cmd,
[arg(obj_t('processor', 'processor'), "cpu-name", "?")],
alias = "psel",
type = ["Command-Line Interface"],
short = "select a processor",
doc = """
Use this command to select a default processor for the frontend. Many
commands that have a processor as argument operates on the default
processor when the argument is left out. Note that selecting
processors does not affect in which order they are executed when
multiprocessing is simulated.
Without any argument, this command will print the currently selected
processor.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="931")
#
# -------------------- print-event-queue --------------------
#
def print_event_queue_cmd(cpu, queue, internal):
if not cpu:
(cpu, _) = get_cpu()
def print_queue(q):
i = 0
for (obj, desc, time) in cpu.event_desc[q]:
if not internal and desc.startswith("Internal:"):
continue
if obj:
desc = "[" + obj.name + "] " + desc
print "%3d :%15s : %s" % (i, number_str(time, 10), desc)
i += 1
if queue >= 0:
if queue > 1:
print "No queue", queue, "defined."
return
print_queue(queue)
else:
print "Event Steps Description"
print_queue(Sim_Queue_Step)
print
print "Event Time Description"
print_queue(Sim_Queue_Time)
new_command("print-event-queue", print_event_queue_cmd,
[arg(obj_t('processor', 'processor'), "cpu-name", "?"),
arg(int_t, "queue", "?", -1),
arg(flag_t, "-i")],
alias = "peq",
type = ["Inspecting Simulated State"],
short = "print event queue for processor",
doc = """
The simulator keeps an event queue for each processor. Interrupts,
exceptions, and other events are posted on this event queue. For each event,
the time to its execution and a brief description of it are printed.
The time unit depends on the timing model of the queue; the default is
the number of instructions before the event is trigged.
If no CPU is specified, the selected frontend CPU is used.
A <i>queue</i> argument of 0 means that only the step queue is printed,
and for 1, only the time queue. Default is to print both queues.
The flag -i enables printing of Simics internal events.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="977")
#
# -------------------- break, tbreak --------------------
#
def do_break(object, t, address, length, r, w, x, temp):
if length < 1:
print "The breakpoint length must be >= 1 bytes."
return
access = 0
mode = ""
if r: access = access | Sim_Access_Read; mode = mode + "r"
if w: access = access | Sim_Access_Write; mode = mode + "w"
if x or access == 0:
access = access | Sim_Access_Execute
mode = mode + "x"
id = SIM_breakpoint(object, t, access, address, length, temp)
if temp:
pr("Temporary breakpoint ")
else:
pr("Breakpoint ")
pr(`id` + " set on address " + number_str(address, 16))
if length > 1:
pr(", length " + number_str(length, 10))
pr(" with access mode '" + mode + "'\n")
bp_list = conf.sim.breakpoints[:]
for i in range(len(bp_list)):
obj = bp_list[i][11]
if obj == object:
try:
r = SIM_get_attribute_idx(obj, "breakpoints", i+1)
if i+1 != id and address <= r[2] and address+length-1 >= r[1]:
print "Note: overlaps with breakpoint", r[0]
except:
pass
return (id,)
def break_cmd(object, address, len, r, w, x):
break_type = Sim_Break_Physical
if object.classname == "context":
break_type = Sim_Break_Virtual
return do_break(object, break_type, address, len, r, w, x, 0)
new_command("break", break_cmd,
[arg(uint64_t, "address"),
arg(uint64_t, "length", "?", 1),
arg(flag_t, "-r"), arg(flag_t, "-w"), arg(flag_t, "-x")],
namespace = "breakpoint",
type = ["Breakpoints", "Debugging"],
short="set breakpoint",
see_also = ["unbreak", "delete", "enable", "ignore", "set-prefix",
"set-substr", "set-pattern", "list-breakpoints"],
doc = """
Add breakpoint (read, write, or execute) on an object implementing the
breakpoint interface. This is typically a memory space object such as
physical memory; e.g., <cmd>phys_mem0.break 0xff3800</cmd>. Accesses
intersecting the given range will trigger the breakpoint. By default
the breakpoint will only trigger for instruction execution, but any
subset of read, write, and execute accesses can be set to trigger
using combinations of <arg>-r</arg>, <arg>-w</arg>, and <arg>-x</arg>.
<arg>length</arg> is the interval length in bytes (default is 1).
Breakpoints inserted with the <cmd>tbreak</cmd> command are
automatically disabled when they have triggered.
The default action at a triggered breakpoint is to return to the
frontend. This can be changed by using haps. When an execution
breakpoint is triggered, Simics will return to the command prompt
before the instructions is executed, while instructions triggering
read or write breakpoints will complete before control is returned to
the command prompt.
To break on a virtual address, use a context object:
<cmd>primary_context.break 0x1ff00</cmd>
Several breakpoints can be set on the same address and Simics will
break on them in turn. If hap handlers (callback functions) are
connected to the breakpoints they will also be executed in turn. Hap
handlers are called before the access is performed, allowing the user
to read a memory value that may be overwritten by the access. See the
Simics Reference Manual for a description of hap handlers.
Each breakpoint is associated with an id (printed when the breakpoint
is set or by the <cmd>list-breakpoints</cmd> command) which is used
for further references to the breakpoint.
For convenience there are also a <cmd>break</cmd> command which sets a
breakpoint on memory connected to the current frontend CPU (see
<cmd>pselect</cmd>). Default is to break on virtual address accesses
(in the current context). By prefixing the address with <arg>p:</arg>
it is possible to break on physical accesses as well (cf.
<cmd>phys_mem0.break</cmd>); e.g., <cmd>break p:0xffc0</cmd>.
Several attributes can be set for a breakpoint for breaking only when
some conditions are true. See the <cmd>disable</cmd>,
<cmd>enable</cmd>, <cmd>ignore</cmd>, <cmd>set-prefix</cmd>,
<cmd>set-substr</cmd> and <cmd>set-pattern</cmd> commands for more
details.
Breakpoints can be removed using <cmd>delete</cmd>.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1043")
def tbreak_cmd(object, address, len, r, w, x):
break_type = Sim_Break_Physical
if object.classname == "context":
break_type = Sim_Break_Virtual
else:
break_type = Sim_Break_Physical
return do_break(object, break_type, address, len, r, w, x, 1)
new_command("tbreak", tbreak_cmd,
[arg(int_t, "address"), arg(int_t, "length", "?", 1),
arg(flag_t, "-r"), arg(flag_t, "-w"), arg(flag_t, "-x")],
namespace = "breakpoint",
short="set temporary breakpoint on current processor",
doc_with = "<breakpoint>.break", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1112")
def classic_break_cmd(address, len, r, w, x):
if address[0] == "p":
obj = current_processor().physical_memory
kind = Sim_Break_Physical
else:
obj = current_processor().current_context
if address[0] == "l":
kind = Sim_Break_Linear
else:
kind = Sim_Break_Virtual
return do_break(obj, kind, address[1], len, r, w, x, 0)
new_command("break", classic_break_cmd,
[arg(addr_t, "address"), arg(uint64_t, "length", "?", 1),
arg(flag_t, "-r"), arg(flag_t, "-w"), arg(flag_t, "-x")],
alias = "b",
short="set breakpoint on current processor",
doc_with = "<breakpoint>.break", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1131")
#
# -------------------- unbreak --------------------
#
_removed_breakpoint = 0
def _remove_breakpoint(id, address, length, access):
bp = conf.sim.breakpoints[id]
if not bp or bp[6] & (Sim_Breakpoint_Simulation | Sim_Breakpoint_Private):
return
_removed_breakpoint = 1
try:
SIM_breakpoint_remove(id, access, address, length)
except SimExc_General, msg:
print msg
def unbreak_cmd(poly, address, length, r, w, x):
global _removed_breakpoint
_removed_breakpoint = 0
access = 0
if r: access = access | Sim_Access_Read
if w: access = access | Sim_Access_Write
if x or access == 0:
access = access | Sim_Access_Execute
if poly[0] == int_t:
id = poly[1]
bp = conf.sim.breakpoints[id]
if not bp or bp[6] & (Sim_Breakpoint_Simulation | Sim_Breakpoint_Private):
print "Cannot change simulation internal breakpoints."
return
_remove_breakpoint(id, address, length, access)
else:
for bp in conf.sim.breakpoints[:]:
_remove_breakpoint(bp[0], address, length, access)
new_command("unbreak", unbreak_cmd,
[arg((int_t,flag_t), ("id","-all")), arg(int_t, "address"), arg(int_t, "length"),
arg(flag_t, "-r"),arg(flag_t, "-w"), arg(flag_t, "-x")],
type = ["Breakpoints", "Debugging"],
short = "remove breakpoint range",
see_also = ['<breakpoint>.break', 'delete'],
doc = """
Removes an address range from a breakpoint, splitting the breakpoint if
necessary. <arg>-r</arg> (read), <arg>-w</arg> (write) and <arg>-x</arg>
(execute) specify the type of breakpoint that should be removed in the given
address range. It defaults to <em>execute</em> if no flag is
given. <arg>id</arg> is the id number of the breakpoint to operate on. To
operate on all breakpoints at once, use the <arg>-all</arg>
flag. <cmd>list-breakpoints</cmd> prints all breakpoints' id.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1177")
#
# -------------------- delete --------------------
#
# get breakpoint id from integer or flag
def get_id(poly):
if poly[0] == flag_t:
id = 0l
else:
id = poly[1]
return id
def delete_cmd(poly):
id = get_id(poly)
if id:
bp = conf.sim.breakpoints[id]
if bp and bp[6] & (Sim_Breakpoint_Simulation | Sim_Breakpoint_Private):
print "Cannot remove simulation internal breakpoints."
return
try:
SIM_delete_breakpoint(id) # 0 deletes all
except Exception, msg:
print msg
new_command("delete", delete_cmd,
[arg((flag_t,int_t), ("-all", "id"))],
type = ["Breakpoints", "Debugging"],
short = "remove a breakpoint",
see_also = ['<breakpoint>.break', 'enable', 'ignore', 'set-prefix', 'set-substr', 'set-pattern', 'list-breakpoints'],
doc = """
Removes a breakpoint. <i>id</i> is the id of the breakpoint to delete. Use
<cmd>list-breakpoints</cmd> to list all breakpoints' id. If the flag
<arg>-all</arg> is given, all breakpoints will be deleted.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1219")
#
# -------------------- enable, disable --------------------
#
def enable_disable_cmd(poly, val):
id = get_id(poly)
try:
if id == 0:
bps = conf.sim.breakpoints[:]
for i in range(len(bps)):
bps[i][5] = val
conf.sim.breakpoints[bps[i][0]] = bps[i]
else:
bp = SIM_get_attribute_idx(conf.sim, "breakpoints", id)
bp[5] = val
SIM_set_attribute_idx(conf.sim, "breakpoints", id, bp)
except Exception, msg:
print msg
def enable_cmd(poly):
enable_disable_cmd(poly, 1)
def disable_cmd(poly):
enable_disable_cmd(poly, 0)
new_command("enable", enable_cmd,
[arg((flag_t,int_t), ("-all", "id"))],
type = ["Breakpoints", "Debugging"],
short = "enable/disable breakpoint",
see_also = ['<breakpoint>.break', 'delete', 'ignore', 'list-breakpoints'],
doc = """
Enable/disable instruction breakpoint. <i>id</i> is id number of the
breakpoint to enable/disable. Use <b>list-breakpoints</b> to list
breakpoint id:s. If '-all' is given all breakpoints will be
enabled/disabled. Simics will not stop on a disabled breakpoint,
however Simics will still count it.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1255")
new_command("disable", disable_cmd,
[arg((flag_t,int_t), ("-all", "id"))],
short = "enable/disable breakpoint",
doc_with = "enable", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1267")
#
# -------------------- ignore --------------------
#
def ignore_cmd(id, num):
try:
bp = SIM_get_attribute_idx(conf.sim, "breakpoints", id)
# activate_at = hits + num + 1
bp[4] = bp[3] + num + 1
bp[5] = 0
SIM_set_attribute_idx(conf.sim, "breakpoints", id, bp)
except Exception, msg:
print msg
new_command("ignore", ignore_cmd,
[arg(int_t, "id"), arg(int_t, "num")],
type = ["Breakpoints", "Debugging"],
short = "set ignore count for a breakpoint",
see_also = ['enable', 'list-breakpoints'],
doc = """
Sets the ignore count for a breakpoint. This means that the next <i>num</i>
times the breakpoint is reached it will not trigger (hap handlers will
not be called). To break next time set <i>num</i> to 0.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1286")
#
# -------------------- set-prefix --------------------
#
def set_prefix_cmd(id, prefix):
try:
bp = SIM_get_attribute_idx(conf.sim, "breakpoints", id)
if not (bp[2] & 4):
print "This can only be applied to execution breakpoints (access type x)."
return
bp[7] = prefix
SIM_set_attribute_idx(conf.sim, "breakpoints", id, bp)
except Exception, msg:
print msg
new_command("set-prefix", set_prefix_cmd,
[arg(int_t, "id"), arg(str_t, "prefix")],
type = ["Breakpoints", "Debugging"],
short = "set a syntax prefix for a breakpoint",
doc_items = [('Note', 'Only supported for execution breakpoints.')],
see_also = ['set-substr', 'set-pattern'],
doc = """
Set a syntax prefix for a breakpoint. When set Simics will only break
on instructions with a certain syntax prefix. For example <b>set-prefix</b> 1
"add" will cause breakpoint 1 only to stop if the instruction begins
with "add". The text to compare the prefix with for an instruction is
the one which the instruction is disassembled to.
Set prefix to the empty string ("") to remove this extra condition.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1311")
#
# -------------------- set-substr --------------------
#
def set_substr_cmd(id, substr):
try:
bp = SIM_get_attribute_idx(conf.sim, "breakpoints", id)
if not (bp[2] & 4):
print "This can only be applied to execution breakpoints (access type x)."
return
bp[8] = substr
SIM_set_attribute_idx(conf.sim, "breakpoints", id, bp)
except Exception, msg:
print msg
new_command("set-substr", set_substr_cmd,
[arg(int_t, "id"), arg(str_t, "substr")],
type = ["Breakpoints", "Debugging"],
short = "set a syntax substring for a breakpoint",
doc_items = [('NOTE', 'Only supported for execution breakpoints.')],
see_also = ['set-prefix', 'set-pattern'],
doc = """
When set Simics will only break on instructions with a certain syntax
substring. For example <b>set-substr</b> 1 "r31" will make breakpoint 1 only
stop if the instruction has a substring "r31".
Set sub-string to the empty string ("") to remove this extra condition.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1341")
#
# -------------------- set-pattern --------------------
#
def set_pattern_cmd(id, pattern, mask):
if len(pattern) % 2 == 1 or len(mask) % 2 == 1:
print "Pattern and mask must have a length that corresponds to one or several bytes."
return
try:
bp = SIM_get_attribute_idx(conf.sim, "breakpoints", id)
if not (bp[2] & 4):
print "This can only be applied to execution breakpoints (access type x)."
return
bp[9] = pattern
bp[10] = mask
SIM_set_attribute_idx(conf.sim, "breakpoints", id, bp)
except Exception, msg:
print msg
new_command("set-pattern", set_pattern_cmd,
[arg(int_t, "id"), arg(str_t, "pattern"), arg(str_t, "mask")],
type = ["Breakpoints", "Debugging"],
short = "set an instruction pattern for a breakpoint",
doc_items = [('NOTE', 'Only supported for execution breakpoints.')],
see_also = ['set-prefix', 'set-substr'],
doc = """
When set for breakpoint <i>id</i> Simics will only break on
instructions with a certain bit-pattern. First the <i>mask</i> will be
applied to the instruction and then the result will be compared with
the <i>pattern</i>. For example <b>set-pattern</b> 1 "0x0100" "0x0101"
will specialize breakpoint 1 to break on instructions whose first byte
has the lowest bit set and the second not.
Since an instruction may be longer than the longest supported
integer in the frontend, both pattern and mask must be supplied
as strings.
Set pattern and mask to the empty string ("") to remove this extra condition.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1375")
#
# -------------------- list-breakpoints --------------------
#
def list_breakpoints_cmd(all):
found = 0
assert_cpu()
bp_list = conf.sim.breakpoints[:]
for (id, type, access, hits, activate_at, active, flags, prefix, substr,
pattern, mask, obj) in bp_list:
if flags & (Sim_Breakpoint_Simulation | Sim_Breakpoint_Private) and not all:
continue
ranges = obj.breakpoints[id]
if len(ranges) == 1:
print
continue
acc = ""
if access & Sim_Access_Read: acc = acc + "r"
if access & Sim_Access_Write: acc = acc + "w"
if access & Sim_Access_Execute: acc = acc + "x"
if not found:
print (" Id Type Enb %-*s %-*s Hits Space"
% (18, "Start", 18, "Stop"))
found = 1
pr("%3d %-8s %-3s 0x%016x 0x%016x %6d %s\n" %
(id,
iff(type == Sim_Break_Physical,
"phys",
iff(type == Sim_Break_Virtual, "virt", "lin")) + "-" + acc,
iff(active,"yes","no"),
ranges[1],
ranges[2],
hits,
obj.name))
for r in range(3, len(ranges), 2):
print "%s0x%016x 0x%016x" % (" "*17, ranges[r], ranges[r + 1])
if activate_at > hits:
print " Ignore count:", number_str(activate_at - hits - 1, 10)
if prefix:
print " Prefix:", prefix
if substr:
print " Substr:", substr
if pattern:
print " Pattern: 0x%s, Mask: 0x%s" % (pattern, mask)
time_bp = 0
for cpu in all_processors():
for q in (Sim_Queue_Step, Sim_Queue_Time):
for (obj, desc, time) in cpu.event_desc[q]:
if not obj and desc.startswith("User breakpoint"):
if q == Sim_Queue_Time:
unit = "cycle"
else:
unit = "step"
if found and not time_bp:
print
time_bp = 1
print ("Breakpoint at %-5s %s (%s)"
% (unit, number_str(time, 10), cpu.name))
if not found and not time_bp:
print "No breakpoints set."
return
new_command("list-breakpoints", list_breakpoints_cmd,
[arg(flag_t, "-all")],
alias = ["ib", "info-breakpoints"],
type = ["Breakpoints", "Debugging"],
short = "print information about breakpoints",
see_also = ['<breakpoint>.break', 'delete', 'enable', 'ignore', 'set-prefix', 'set-substr', 'set-pattern'],
doc = """
Prints information about all breakpoints set. The following
information is printed for memory breakpoints: the id (used by other
commands to refer to the breakpoint), if the breakpoint is set on
physical or virtual addresses and the access type (r = read, w =
write, or x = execute), if enabled (see the <b>enable</b> command),
the address range of the breakpoint, how many times the breakpoint
has been triggered, and what memory space or context object it is set in.
If prefix, substring and/or pattern conditions are set it will be
printed as well (see <b>set-prefix</b>, <b>set-substr</b> and
<b>set-pattern</b> command).
Time breakpoints are also listed.
If <arg>-all</arg> is passed as argument, <cmd>list-breakpoints</cmd> will also
list all internal breakpoints set for simulation purposes.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1471")
#
# -------------------- map --------------------
#
def map_cmd(obj):
map = obj.map
map.sort()
print "base object fn offs length"
for line in map:
print "0x%016x %-20s %2d 0x%-16x 0x%-16x" % (line[0], line[1].name,
line[2], line[3],
line[4])
if len(line) > 5:
# map in port-space does not have all fields
if line[5] and line[5] != line[1]:
print " target -> %s" % line[5].name
if line[6] != 0:
print " priority %d" % line[6]
if line[7] != 0:
output = " width %d bytes" % line[7]
if line[8] == Sim_Swap_Bus:
output += ", byte swap on bus width"
elif line[8] == Sim_Swap_Trans:
output += ", byte swap on transaction size"
elif line[8] == Sim_Swap_Bus_Trans:
output += ", byte swap on bus width and transaction size"
print output
try:
deftarg = obj.default_target
if deftarg:
print "%-18s %-20s %2d 0x%-16x %-18s" % ("- default -", deftarg[0].name,
deftarg[1], deftarg[2], '-')
if deftarg[3]:
print " target -> %s" % deftarg[3].name
except AttributeError:
pass
new_command("map", map_cmd,
[],
namespace = "memory-space",
type = ["Memory", "Configuration", "Inspecting Simulated State"],
short = "list memory map",
see_also = ['<memory-space>.add-map', '<memory-space>.del-map'],
doc = """
Prints the memory map of the memory space object, one line per entry
in the map attribute of the memory space. The <em>base</em> column is
the starting address of the map. The <em>object</em> column contains
the object mapped at that address. <em>fn</em> is the function number
and <em>offs</em> is the offset for the object. <em>length</em> is the
number of bytes mapped.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1534")
new_command("map", map_cmd,
[],
namespace = "port-space",
short = "list port map",
type = ["Memory", "Configuration", "Inspecting Simulated State"],
see_also = ['<port-space>.add-map', '<port-space>.del-map'],
doc = """
Prints the port map of the port space object, one line per entry
in the map attribute of the port space. The <em>base</em> column is
the starting address of the map. The <em>object</em> column contains
the object mapped at that address. <em>fn</em> is the function number
and <em>offs</em> is the offset for the object. <em>length</em> is the
number of bytes mapped.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1549")
#
# add-map / del-map in memory-space
#
swap_names = {}
swap_names['none'] = Sim_Swap_None
swap_names['bus'] = Sim_Swap_Bus
swap_names['bus-trans'] = Sim_Swap_Bus_Trans
swap_names['trans'] = Sim_Swap_Trans
def add_map_cmd(space, object, base, length, fn, offset, target,
pri, align_size, swap):
if swap not in swap_names:
print "Unknown byte swapping requested: '%s'" % swap
SIM_command_has_problem()
return
try:
space.map += [[base, object, fn, offset, length, target, pri,
align_size, swap_names[swap]]]
except Exception, msg:
print "Failed mapping '%s' in '%s': %s" % (object.name,
space.name, msg)
SIM_command_has_problem()
return
else:
print "Mapped '%s' in '%s' at address 0x%x." % (object.name,
space.name, base)
def swap_expander(comp):
return get_completions(comp, swap_names)
new_command("add-map", add_map_cmd,
[arg(obj_t('object'), 'device'),
arg(uint64_t, 'base'),
arg(uint64_t, 'length'),
arg(int_t, 'function', '?', 0),
arg(uint64_t, 'offset', '?', 0),
arg(obj_t('object'), 'target', '?', None),
arg(int_t, 'priority', '?', 0),
arg(uint64_t, 'align-size', '?', 8),
arg(str_t, 'swap', '?', 'none', expander = swap_expander)],
namespace = "memory-space",
type = ["Memory", "Configuration"],
see_also = ['<memory-space>.map', '<memory-space>.del-map'],
short = "map device in a memory-space",
doc = """
Map <param>device</param> into a memory-space at address <param>base</param>
and with length <param>length</param>. Different mappings of the same device
may be indentified by a device specific <param>function</param> number. For
translator and bridge mappings, a <param>target</param> device should be given.
The mapping may specify an offset into the device's memory space, using the
<param>offset</param> argument. If several device mappings overlap, the
<param>priority</param> is used to select what device will receive memory
accesses. The priority is an integer between 0 and 255, where 0 is highest.
For devices that do not support large accesses, the <param>align-size</param>
governs how accesses are split before the device is accessed. A device mapping
may swap the bytes of an access based on the <param>swap</param> argument, that
should be one of <tt>none</tt>, <tt>bus</tt>, <tt>bus-trans</tt> and
<tt>trans</tt>. For a description of these, see the documentation of the
<attr>map</attr> attribute in the <class>memory-space</class> class.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1595")
def del_map_cmd(space, object, fn, base):
map = [x for x in space.map if
not (x[1] == object
and (fn == -1 or fn == x[2])
and (base == -1 or base == x[0]))]
if len(map) == len(space.map):
print "No matching mappings in %s." % (space.name)
return
try:
space.map = map
if fn == -1:
func_str = ""
else:
func_str = "%d " % fn
if base == -1:
addr_str = ""
else:
addr_str = "at 0x%x " % base
if fn == -1 and base == -1:
print "Removing all mappings of '%s' from '%s'." % (object.name,
space.name)
else:
print ("Removing mapping %sof '%s' %sfrom '%s'."
% (func_str, object.name, addr_str, space.name))
except Exception, msg:
print "Failed removing mappings for '%s' from '%s': %s" % (object.name,
space.name,
msg)
def mapped_objs_expander(comp, space):
objs = [x[1].name for x in space.map]
return get_completions(comp, objs)
new_command("del-map", del_map_cmd,
[arg(obj_t('object'), 'device', expander = mapped_objs_expander),
arg(int_t, 'function', '?', -1),
arg(int_t, 'base', '?', -1)],
namespace = "memory-space",
type = ["Memory", "Configuration"],
see_also = ['<memory-space>.map', '<memory-space>.add-map'],
short = "remove device map from a memory-space",
doc = """
Remove the mapping of <param>device</param> from a memory-space. If a function
number is given by the <param>function</param> argument, then only mappings
with a matching number are removed. If an <param>address</param> is given,
only mappings with a matching address are removed. If both a
<param>function</param> and an <param>address</param> are specified, then only
mappings with a matching function number, at the specified address, are
removed.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1660")
#
# add-map / del-map for port-space
#
def add_map_cmd(space, object, base, length, fn, offset):
try:
space.map += [[base, object, fn, offset, length]]
except Exception, msg:
print "Failed mapping '%s' in '%s': %s" % (object.name,
space.name, msg)
SIM_command_has_problem()
return
else:
print "Mapped '%s' in '%s' at address 0x%x." % (object.name,
space.name, base)
new_command("add-map", add_map_cmd,
[arg(obj_t('object'), 'device'),
arg(uint64_t, 'base'),
arg(uint64_t, 'length'),
arg(int_t, 'function', '?', 0),
arg(uint64_t, 'offset', '?', 0)],
namespace = "port-space",
type = ["Memory", "Configuration"],
see_also = ['<port-space>.map', '<port-space>.del-map'],
short = "map device in a port-space",
doc = """
Map <param>device</param> into a port-space at address <param>base</param>
and with length <param>length</param>. Different mappings of the same device
may be indentified by a device specific <param>function</param> number. The
mapping may specify an offset into the device's memory space, using the
<param>offset</param> argument.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1694")
def del_map_cmd(space, object, fn):
map = [x for x in space.map if
not (x[1] == object and (fn == -1 or fn == x[2]))]
if len(map) == len(space.map):
print "No matching mappings in %s." % (space.name)
return
space.map = map
try:
space.map = map
if fn == -1:
print "Removing all mappings of '%s' from '%s'." % (object.name,
space.name)
else:
print "Removing mapping %d of '%s' from '%s'." % (fn, object.name,
space.name)
except Exception, msg:
print "Failed removing mappings for '%s' from '%s': %s" % (object.name,
space.name,
msg)
def mapped_objs_expander(comp, space):
objs = [x[1].name for x in space.map]
return get_completions(comp, objs)
new_command("del-map", del_map_cmd,
[arg(obj_t('object'), 'device', expander = mapped_objs_expander),
arg(int_t, 'function', '?', -1)],
namespace = "port-space",
type = ["Memory", "Configuration"],
see_also = ['<port-space>.map', '<port-space>.add-map'],
short = "remove device map from a port-space",
doc = """
Remove the mapping of <param>device</param> from a port-space. If a function
number is given by the <param>function</param> argument, then only mappings
with a matching number is removed.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1736")
#
# -------------------- set --------------------
#
def obj_set_cmd(obj, address, value, size, little_endian, big_endian):
if size < 1 or size > 8:
print "size must be 1-8 bytes."
return
try:
cpu = current_processor()
except:
# do not really need a processor
cpu = None
if little_endian and big_endian:
print "Cannot use both -l and -b."
return
if not little_endian and not big_endian and size > 1:
if not cpu:
print "When no processor exists, -l or -b has to be specified."
SIM_command_has_problem()
return
if cpu.big_endian:
big_endian = 1
else:
little_endian = 1
val_list = []
for i in range(size):
val_list.append(value & 0xff)
value = value >> 8
if big_endian:
val_list.reverse()
try:
ex = obj.iface.memory_space.write(obj, cpu,
address, tuple(val_list), 1)
if ex != Sim_PE_No_Exception:
print "Failed writing memory (exception %d)" % ex
SIM_command_has_problem()
except Exception, msg:
print "Failed writing memory: %s" % msg
SIM_command_has_problem()
def set_cmd(address, value, size, le, be):
obj_set_cmd(current_processor().physical_memory, address, value, size, le, be)
new_command("set", set_cmd,
[arg(int_t,"address"), arg(int_t,"value"), arg(int_t, "size", "?", 4),
arg(flag_t,"-l"), arg(flag_t,"-b")],
type = ["Memory", "Changing Simulated State"],
short = "set physical address to specified value",
see_also = ["get", "x", "pselect"],
doc = """
Set the <i>size</i> bytes of physical memory at location
<i>address</i> to <i>value</i>. The default <i>size</i> is 4 bytes,
but can be anywhere between 1 and 8 (inclusive).
If <i>value</i> is larger than the specified size, behavior is undefined.
The <i>-l</i> and <i>-b</i> flags are used to select little-endian and
big-endian byte order, respectively. If neither is given, the byte
order of the currently selected processor is used.
The non-namespace version of this command operates on the physical memory
associated with the current processor.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1800")
new_command("set", obj_set_cmd,
[arg(int_t,"address"), arg(int_t,"value"), arg(int_t, "size", "?", 4),
arg(flag_t,"-l"), arg(flag_t,"-b")],
short = "set physical address to specified value",
see_also = ["get", "signed"],
namespace = "memory-space",
doc_with = "set", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1821")
#
# -------------------- get --------------------
#
def obj_get_cmd(obj, address, size, little_endian, big_endian):
if size < 1 or size > 8:
print "size must be 1-8 bytes."
return
try:
cpu = current_processor()
except:
# do not really need a processor
cpu = None
if little_endian and big_endian:
print "Cannot use both -l and -b."
return
if not little_endian and not big_endian and size > 1:
if not cpu:
print "When no processor exists, -l or -b has to be specified."
SIM_command_has_problem()
return
if cpu.big_endian:
big_endian = 1
else:
little_endian = 1
try:
bytes = obj.iface.memory_space.read(obj, cpu,
address, size, 1)
except Exception, msg:
print "Failed reading memory: %s" % msg
SIM_command_has_problem()
return 0
# Make sure we have the msb in bytes[0]
if little_endian:
bytes = list(bytes)
bytes.reverse()
ret = 0
for x in bytes:
ret = (ret << 8) | x
return ret
def get_cmd(address, size, le, be):
return obj_get_cmd(current_processor().physical_memory, address, size, le, be)
new_command("get", get_cmd,
[arg(int_t,"address"), arg(int_t, "size", "?", 4),
arg(flag_t,"-l"), arg(flag_t,"-b")],
type = ["Memory", "Inspecting Simulated State"],
short = "get value of physical address",
pri = 1000,
see_also = ["x", "set", "signed"],
doc = """
Get value of physical memory location. The size argument specifies how
many bytes should be read. This defaults to 4, but can be any number
of bytes between 1 and 8 (inclusive).
The <i>-l</i> and <i>-b</i> flags are used to select little-endian and
big-endian byte order, respectively. If neither is given, the byte
order of the currently selected processor is used.
This command operates on the physical memory associated with the
current processor.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1879")
new_command("get", obj_get_cmd,
[arg(int_t,"address"), arg(int_t, "size", "?", 4),
arg(flag_t,"-l"), arg(flag_t,"-b")],
short = "get value of physical address",
namespace = "memory-space",
pri = 1000,
doc_with = "get", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1899")
#
# -------------------- read-reg --------------------
#
def obj_read_reg_cmd(cpu, reg_name):
try:
return local_read_int_register(cpu, reg_name)
except:
return SIM_read_register(cpu, SIM_get_register_number(cpu, reg_name))
def read_reg_cmd(cpu, reg_name):
if not cpu:
(cpu, _) = get_cpu()
return obj_read_reg_cmd(cpu, reg_name)
def read_default_reg_cmd(reg_name):
try:
(cpu, _) = get_cpu()
val = obj_read_reg_cmd(cpu, reg_name)
except:
(exception, value, traceback) = sys.exc_info()
raise CliError, ("'%' command: reading register '" + reg_name
+ "' failed (" + str(value) + ").\n\nIf you meant to "
+ "use a path like %simics%, check that you quote "
+ "the string properly, i.e., \"%simics%/...\"")
return val
def exp_regs(comp):
cpu,_ = get_cpu()
regs = [ SIM_get_register_name(cpu, r) for r in SIM_get_all_registers(cpu) ]
return get_completions(comp, regs)
def register_number_cmd(cpu, reg_name):
return SIM_get_register_number(cpu, reg_name)
new_command("register-number", register_number_cmd,
[arg(str_t, "reg-name", expander = exp_regs)],
type = ["Registers", "Inspecting Simulated State"],
short = "get the number of a processor register",
namespace = "processor",
see_also = ['%', 'read-reg'],
doc = """
Returns the register number for a named processor register.
The register number is used as hap indexing for example.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1942")
new_command("read-reg", read_reg_cmd,
[arg(obj_t('processor', 'processor'), "cpu-name", "?"),
arg(str_t, "reg-name", expander = exp_regs)],
type = ["Registers", "Inspecting Simulated State"],
short = "read a register",
namespace_copy = ("processor", obj_read_reg_cmd),
see_also = ['%', 'write-reg', 'pregs', 'pselect'],
doc = """
This command reads a CPU register. For example, to read the
<tt>eax</tt> register in an x86 processor called <obj>cpu0</obj>,
write <cmd>read-reg cpu0 eax</cmd>. You can also use the method
variant: <cmd>cpu0.read-reg eax</cmd>, or the more convenient variant
<cmd>%eax</cmd> that reads a register from the selected frontend CPU.
If no <param>cpu-name</param> is supplied, the current frontend
processor is used.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1952")
new_command("%", read_default_reg_cmd,
[arg(str_t, doc = "reg-name", expander = exp_regs)],
pri = 1000,
check_args = 0,
type = ["Registers", "Inspecting Simulated State"],
short = "read register by name",
repeat = read_default_reg_cmd,
see_also = ["read-reg", "write-reg", "pregs"],
doc ="""
Returns the value of the register <arg>reg-name</arg> for the current
processor. This is a convenient way to use register values in expressions like
<cmd>disassemble (%pc <math>-</math> 4*3) 10</cmd>.
Use <cmd>pselect</cmd> to select the current processor.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1969")
#
# -------------------- write-reg --------------------
#
def obj_write_reg_cmd(cpu, reg_name, value):
value = uint64_t([("int", value)])[0]
try:
local_write_int_register(cpu, reg_name, value)
except:
SIM_write_register(cpu, SIM_get_register_number(cpu, reg_name), value)
def write_reg_cmd(cpu, reg_name, value):
if not cpu:
(cpu, _) = get_cpu()
obj_write_reg_cmd(cpu, reg_name, value)
new_command("write-reg", write_reg_cmd,
[arg(obj_t('processor', 'processor'), "cpu-name", "?"),
arg(str_t, "reg-name", expander = exp_regs),
arg(integer_t, "value")],
type = ["Registers", "Changing Simulated State"],
short = "write to register",
namespace_copy = ("processor", obj_write_reg_cmd),
see_also = ['%', 'read-reg', 'pregs', 'pselect'],
doc = """
Use this command to set the value of a CPU register. For example, to
set the <tt>eax</tt> register on the x86 processor <obj>cpu0</obj> to
3, write <cmd>write-reg cpu0 eax 3</cmd>. You can also use the method
variant: <cmd>cpu0.write-reg eax 3</cmd>.
This function may or may not have the correct side-effects, depending
on target and register. If no <param>cpu-name</param> is given, the
current frontend processor is used.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="2000")
#
# -------------------- trace-cr, break-cr --------------------
#
class base_cr_tracker(tracker):
def __init__(self, stop, cmd, short, doc, type, see_also = []):
tracker.__init__(self, stop, cmd, "register", self.expander, short, doc,
namespace = "processor",
see_also = see_also,
group = type,
expander_cpu = self.expander_cpu)
self.hap = "Core_Control_Register_Write"
self.map = {}
self.catchall = {}
def expander(self, comp, cpu):
iface = cpu.iface.int_register
regs = [ SIM_get_register_name(cpu, r)
for r in SIM_get_all_registers(cpu)
if iface.register_info(cpu, r, Sim_RegInfo_Catchable) ]
return get_completions(comp, regs)
def expander_cpu(self, comp):
return self.expander(comp, SIM_current_processor())
# These two are here so that they can be overridden
def get_register_number(self, obj, regname):
return SIM_get_register_number(obj, regname)
def get_register_name(self, obj, reg):
return SIM_get_register_name(obj, reg)
def filter(self, *args):
return SIM_simics_is_running()
def show(self, regname, obj, regno, value):
if not regname:
try:
regname = self.get_register_name(obj, regno)
except:
regname = "[%s] Unknown register %d" % (obj.name, regno)
if value < 0:
value += 1 << 64
print "[%s] %s <- %s" % (obj.name, regname, number_str(value, 16))
def list(self, obj):
if obj in self.catchall.keys():
print "[%s] %s enabled for all control registers" % (obj.name, iff(self.stop, "breaking", "tracing"))
else:
print "[%s] %s enabled for these control registers:" % (obj.name, iff(self.stop, "breaking", "tracing"))
if obj in self.map.keys():
for reg in self.map[obj].keys():
print " %s" % self.get_register_name(obj, reg)
def resolve_target(self, obj, regname):
return (regname, self.get_register_number(obj, regname))
def is_tracked(self, obj, target):
regname, regno = target
return ((obj in self.catchall.keys())
or (obj in self.map.keys() and self.map[obj].has_key(regno)))
def track_all(self, obj):
if self.catchall.has_key(obj):
return
if not (obj in self.map.keys()):
self.map[obj] = {}
for regno,hdl in self.map[obj].items():
SIM_hap_delete_callback_obj_id("Core_Control_Register_Write",
obj, hdl)
del self.map[obj][regno]
self.catchall[obj] = SIM_hap_add_callback_obj(
"Core_Control_Register_Write", # hap
obj, # trigger object
0, # flags
self.callback, # callback
None) # user value
def track_none(self, obj):
if (obj in self.catchall.keys()):
SIM_hap_delete_callback_obj_id("Core_Control_Register_Write",
obj,
self.catchall[obj])
del self.catchall[obj]
else:
if not (obj in self.map.keys()):
self.map[obj] = {}
for regno,hdl in self.map[obj].items():
SIM_hap_delete_callback_obj_id("Core_Control_Register_Write",
obj, hdl)
del self.map[obj][regno]
def track_on(self, obj, target):
regname, regno = target
if obj in self.catchall.keys():
print "[%s] Already %s all control registers" % (obj.name, iff(self.stop, "breaking on", "tracing"))
return
if self.is_tracked(obj, target):
print "[%s] Already %s %s" % (obj.name, iff(self.stop, "breaking on", "tracing"), regname)
return
if not obj.iface.int_register.register_info(obj, regno, Sim_RegInfo_Catchable):
print "[%s] Cannot %s on %s" % (obj.name, iff(self.stop, "break", "trace"), regname)
return
if not (obj in self.map.keys()):
self.map[obj] = {}
self.map[obj][regno] = SIM_hap_add_callback_obj_index(
"Core_Control_Register_Write", # hap
obj, # trigger object
0, # flags
self.callback, # callback
regname, # user value
regno) # index
def track_off(self, obj, target):
regname, regno = target
if obj in self.catchall.keys():
# All tracked, remove all
self.track_none(obj)
# Reinstall all catchable registers, except the one removed
iface = obj.iface.int_register
for r in SIM_get_all_registers(obj):
if r != regno:
if iface.register_info(obj, r, Sim_RegInfo_Catchable):
regname = SIM_get_register_name(obj, r)
self.track_on(obj, (regname, r))
return
if not self.is_tracked(obj, target):
print "[%s] Not %s %s" % (obj.name, iff(self.stop, "breaking on", "tracing"), regname)
return
SIM_hap_delete_callback_obj_id("Core_Control_Register_Write", obj,
self.map[obj][regno])
del self.map[obj][regno]
cr_tracker = base_cr_tracker
if "cr_tracker" in dir():
trace_cr_cmds = cr_tracker(0, "trace-cr",
short = "trace control register updates",
type = "inspect/change",
see_also = [ "break-cr" ],
doc = """
Enables and disables tracing of control register updates. When this
is enabled, every time the specified control register is updated
during simulation a message is printed. The message will name the
register being updated, and the new value. The new value will be
printed even if it is identical to the previous value.
The <i>reg-name</i> parameter specifies which control register should
be traced. The available control registers depends on the simulated
target.
Instead of a register name, the <tt>-all</tt> flag may be given. This
will enable or disable tracing of all control register.
""")
break_cr_cmds = cr_tracker(1, "break-cr",
short = "break on control register updates",
type = "breakpoint",
see_also = [ "trace-cr", "<breakpoint>.break" ],
doc = """
Enables and disables breaking simulation on control register updates.
When this is enabled, every time the specified control register is
updated during simulation a message is printed. The message will name
the register being updated, and the new value. The new value will be
printed even if it is identical to the previous value.
The <i>reg-name</i> parameter specifies which control register should
be traced. The available control registers depends on the simulated
target.
Instead of a register name, the <tt>-all</tt> flag may be given. This
will enable or disable tracing of all control register.
""")
#
# -------------------- trace-exception, break-exception --------------------
#
class exception_tracker(tracker):
def __init__(self, stop, cmd, short, doc, type, see_also = []):
tracker.__init__(self, stop, cmd, ((int_t, str_t), ("number", "name")),
(0, self.expander), short, doc,
group = type, see_also = see_also)
self.hap = "Core_Exception"
self.map = {}
self.catchall = 0
self.names = {}
def expander(self, comp):
try:
cpu = current_processor()
except:
return []
if self.names.has_key(cpu):
names = self.names[cpu]
else:
iface = cpu.iface.exception
names = [ iface.get_name(cpu, exc).replace(' ', '_')
for exc in iface.all_exceptions(cpu) ]
self.names[cpu] = names
return get_completions(comp, names)
# These two are here so that they can be overridden
def get_exception_number(self, excname, cpu = None):
if not cpu:
cpu = current_processor()
return cpu.iface.exception.get_number(cpu, excname)
def get_exception_name(self, exc, cpu = None):
if not cpu:
cpu = current_processor()
return cpu.iface.exception.get_name(cpu, exc)
def show(self, excname, cpu, excno):
if not excname:
excname = self.get_exception_name(excno, cpu)
print ("[%s] (@ cycle %s) Exception %d: %s"
% (cpu.name, number_str(SIM_cycle_count(cpu), 10),
excno, excname))
def list(self):
if self.catchall:
print "%s enabled for all exceptions" % iff(self.stop, "breaking", "tracing")
else:
print "%s enabled for these exceptions:" % iff(self.stop, "breaking", "tracing")
l = self.map.keys()
l.sort()
for exc in l:
print " %3d %s" % (exc, self.get_exception_name(exc))
def resolve_target(self, exc):
if type(exc) == type("hej"):
try:
name = exc
num = self.get_exception_number(exc)
except:
# some targets have spaces in the exception name
name = exc.replace('_', ' ')
num = self.get_exception_number(name)
else:
name = self.get_exception_name(exc)
num = exc
return (name, num)
def is_tracked(self, target):
excname, excno = target
return self.catchall or self.map.has_key(excno)
def track_all(self):
if self.catchall:
return
for key,hdl in self.map.items():
SIM_hap_delete_callback_id(self.hap, hdl)
del self.map[key]
self.catchall = SIM_hap_add_callback(self.hap,
self.callback, None)
def track_none(self):
if self.catchall:
SIM_hap_delete_callback_id(self.hap, self.catchall)
self.catchall = 0
else:
for key,hdl in self.map.items():
SIM_hap_delete_callback_id(self.hap, hdl)
del self.map[key]
def track_on(self, target):
excname, excno = target
if self.catchall:
print "Already %s all exceptions" % iff(self.stop, "breaking", "tracing")
return
if self.is_tracked(target):
print "Already %s %s" % (iff(self.stop, "breaking", "tracing"), excname)
return
self.map[excno] = SIM_hap_add_callback_index(self.hap,
self.callback, excname,
excno)
def track_off(self, target):
excname, excno = target
if self.catchall:
# Remove the global exception handler
self.track_none()
# Install new handlers for all exceptions except the one turned off
cpu = current_processor()
for e in cpu.iface.exception.all_exceptions(cpu):
if e != excno:
self.track_on(self.resolve_target(e))
return
if not self.is_tracked(target):
print "Not %s %s" % (iff(self.stop, "breaking", "tracing"), excname)
return
SIM_hap_delete_callback_id(self.hap, self.map[excno])
del self.map[excno]
trace_exception_cmds = exception_tracker(0, "trace-exception",
short = "trace exceptions",
type = "inspect/change",
see_also = [ "break-exception" ],
doc = """
Enables and disables tracing of exceptions. When this
is enabled, every time the specified exception occurs
during simulation a message is printed.
The <i>exception</i> parameter specifies which exception should
be traced. The available exceptions depends on the simulated
target.
Instead of an exception, the <tt>-all</tt> flag may be given. This
will enable or disable tracing of all exceptions.
""")
break_exception_cmds = exception_tracker(1, "break-exception",
short = "break on CPU exceptions",
type = "breakpoint",
see_also = [ "trace-exception", "<breakpoint>.break" ],
doc = """
Enables and disables breaking simulation on exceptions.
When this is enabled, every time the specified exception occurs
uring simulation a message is printed.
The <i>exception</i> parameter specifies which exception should
be traced. The available exceptions depends on the simulated
target.
Instead of an exception, the <tt>-all</tt> flag may be given. This
will enable or disable tracing of all exceptions.
""")
#
# -------------------- trace-breakpoint --------------------
#
class base_bp_tracker(tracker):
def __init__(self, stop, cmd, short, doc, type, see_also = []):
tracker.__init__(self, stop, cmd, "breakpoint", self.expander, short, doc,
see_also = see_also, group = type)
self.hap = "Core_Breakpoint_Memop"
self.map = {}
self.catchall = 0
def expander(self, comp):
bp_list = conf.sim.breakpoints[:]
bp_ids = map(lambda x: str(x[0]), bp_list)
return get_completions(comp, bp_ids)
def show(self, arg, obj, id, memop):
step = None
if memop.ini_ptr:
step = SIM_step_count(memop.ini_ptr.queue)
val = ""
mem_op_type = SIM_get_mem_op_type(memop)
if mem_op_type == Sim_Trans_Store:
if memop.size <= 8:
val = number_str(SIM_get_mem_op_value_cpu(memop), 16) + " "
else:
val = ""
bits = ""
if mem_op_type != Sim_Trans_Instr_Fetch:
bits = "%d-bit " % (memop.size * 8)
print "breakpoint %d (step %s) %s %s%s@ %s" % (id, number_str(step, 10), SIM_get_mem_op_type_name(mem_op_type), bits, val, number_str(memop.logical_address, 16))
def list(self):
if self.catchall:
print "%s enabled for all breakpoints" % iff(self.stop, "breaking", "tracing")
else:
print "%s enabled for these breakpoints:" % iff(self.stop, "breaking", "tracing")
for id in self.map.keys():
print " %s" % id # TODO: Print as in list-breakpoints
def resolve_target(self, id):
return id
def is_tracked(self, id):
return self.catchall or self.map.has_key(id)
def track_all(self):
if self.catchall: return
for id,hdl in self.map.items():
SIM_hap_delete_callback_id(self.hap, hdl)
del self.map[id]
self.catchall = SIM_hap_add_callback(self.hap,
self.callback, None)
def track_none(self):
if self.catchall:
SIM_hap_delete_callback_id(self.hap, self.catchall)
self.catchall = 0
else:
for id,hdl in self.map.items():
SIM_hap_delete_callback_id(self.hap, hdl)
del self.map[id]
def track_on(self, id):
if self.catchall:
print "Already tracing all breakpoints"
return
if self.is_tracked(id):
print "Already tracing %s" % id
return
self.map[id] = SIM_hap_add_callback_index(self.hap,
self.callback, id, int(id))
def track_off(self, id):
if self.catchall:
print "Disabling tracing of all breakpoints"
self.track_none()
return
if not self.is_tracked(id):
print "Not tracing %s" % id
return
SIM_hap_delete_callback_id(self.hap, self.map[id])
del self.map[id]
bp_tracker = base_bp_tracker
if "bp_tracker" in dir():
trace_bp_cmds = bp_tracker(0, "trace-breakpoint",
short = "trace breakpoints",
type = "inspect/change",
see_also = [],
doc = """
Enables and disables tracing of breakpoints. When enabled, breakpoint
hits will be traced to the console instead of stopping the simulation.
The <i>id</i> parameter specifies the breakpoint to trace.
Instead of an id, the <tt>-all</tt> flag may be given. This
will enable or disable tracing of all breakpoints.
""")
#
# -------------------- pregs --------------------
#
def pregs_cmd(cpu, a):
if not cpu:
(cpu, _) = get_cpu()
local_pregs(cpu, a)
def obj_pregs_cmd(obj, all):
local_pregs(obj, all)
new_command("pregs", pregs_cmd,
[arg(obj_t('processor', 'processor'), "cpu-name", "?"),
arg(flag_t, "-all")],
type = ["Registers", "Inspecting Simulated State"],
short = "print cpu registers",
namespace_copy = ("processor", obj_pregs_cmd),
doc = """
Prints the current integer register file of the processor
<i>cpu_name</i>. If no CPU is specified, the current CPU will be
selected. The -all flag causes additional registers, such as control
registers and floating point registers to be printed.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="2466")
#
# -------------------- info --------------------
#
def common_processor_get_info(obj):
exec_mode = "normal"
if SIM_class_has_attribute(obj.classname, "turbo_execution_mode") and obj.turbo_execution_mode:
exec_mode = "turbo"
elif SIM_class_has_attribute(obj.classname, "ooo-mode"):
exec_mode = obj.ooo_mode
if obj.freq_mhz == int(obj.freq_mhz):
clock_freq = "%d" % obj.freq_mhz
else:
clock_freq = "%f" % obj.freq_mhz
try:
phys_mem = obj.physical_memory.name
except Exception, e:
phys_mem = None
ret = [("Execution mode", exec_mode),
("Clock frequency", "%s MHz" % clock_freq),
("CPI", "%.2f" % (1.0 * obj.step_rate[1] / obj.step_rate[0])),
("Physical memory", phys_mem)]
if SIM_class_has_attribute(obj.classname, "physical-io") and obj.physical_io != None:
ret += [("Physical I/O", obj.physical_io.name)]
return ret
def default_processor_get_info(obj):
return [ (None,
common_processor_get_info(obj)) ]
def processor_info_cmd(obj):
# Add default version if not already defined
obj_funcs = get_obj_funcs(obj)
if not obj_funcs.has_key('get_info'):
obj_funcs['get_info'] = default_processor_get_info
info_cmd(obj)
new_command("info", processor_info_cmd,
[],
short = "print information about the processor",
namespace = "processor",
doc = "Print detailed information about the configuration of the processor.", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="2516")
#
# -------------------- diff, diff-gen --------------------
#
def diff_gen(cmd, file):
old_out = sys.stdout
try:
fd = open(file, "w")
except:
pr("Error while opening file %s for writing\n" % file)
return
sys.stdout = fd
try:
eval_cli_line(cmd)
sys.stdout = old_out;
fd.close()
except:
fd.close()
sys.stdout = old_out;
raise
new_command("diff-gen", diff_gen,
[arg(str_t, "cmd"), arg(filename_t(), "file", "?", "last-command-output.txt")],
type = "internal commands",
short = "diff command output",
doc = """
Writes the output of the command cmd to the file.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="2546")
def diff(cmd, file):
diff_gen(cmd, file+".cmp")
os.system("diff "+file+" "+file+".cmp")
new_command("diff", diff,
[arg(str_t, "cmd"),
arg(filename_t(exist = 1), "file",
"?", "last-command-output.txt")],
type = "internal commands",
short = "diff command output",
doc = """
Uses system diff to compare the output in the file with the output of the
command cmd.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="2557")
#
# -------------------- logical-to-physical --------------------
#
def logical_to_physical_cmd(cpu, laddr):
if not cpu:
(cpu, _) = get_cpu()
return translate_to_physical(cpu, laddr)
def obj_logical_to_physical_cmd(obj, laddr):
return translate_to_physical(obj, laddr)
new_command("logical-to-physical", logical_to_physical_cmd,
[arg(obj_t('processor', 'processor'), "cpu-name", "?"),
arg(addr_t,"address")],
alias = "l2p",
type = ["Memory", "Inspecting Simulated State"],
short = "translate logical address to physical",
namespace_copy = ("processor", obj_logical_to_physical_cmd),
doc = """
Translate the given logical <i>address</i> to a physical one. The
operation is performed as read from processor <i>cpu-name</i>. On x86
a logical address can be given on the form
<segment register>:<offset>
or l:<linear address>. If no prefix is given ds:<offset> will be
assumed. If the CPU is omitted the current CPU will be used. No
side-effects will occur, i.e. if the translation is not in the
TLB. The method variant of this command can also be used.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="2579")
#
# -------------------- x --------------------
#
def x_cmd_repeat(cpu, addr_spec, length):
global _last_x_addr
_last_x_addr = (_last_x_addr[0], _last_x_addr[1] + length)
if type(cpu) is types.StringType:
(cpu, _) = get_cpu(cpu)
elif not cpu:
(cpu, _) = get_cpu()
cpu_x_cmd(cpu, _last_x_addr, length)
def x_cmd(cpu, addr_spec, length):
if not cpu:
(cpu, _) = get_cpu()
cpu_x_cmd(cpu, addr_spec, length)
def cpu_x_cmd(cpu, addr_spec, length):
global _last_x_addr
_last_x_addr = addr_spec
(kind, addr) = addr_spec
if kind == 'p':
src = physmem_source(cpu.physical_memory)
else:
src = virtmem_source(cpu.physical_memory, cpu, kind)
try:
hexdump(src, addr, length)
except Exception, msg:
print msg
def memory_space_x_cmd_repeat(obj, addr, length):
global _last_x_addr
_last_x_addr = _last_x_addr + length
memory_space_x_cmd(obj, _last_x_addr, length)
def memory_space_x_cmd(obj, address, length):
global _last_x_addr
_last_x_addr = address
try:
hexdump(physmem_source(obj), address, length)
except Exception, msg:
print msg
def get_address_prefix(cpu):
try:
f = get_obj_funcs(cpu)['get_address_prefix']
return f()
except KeyError, msg:
return "v"
def translate_to_physical(cpu, addr):
try:
f = get_obj_funcs(cpu)['translate_to_physical']
return f(cpu, addr)
except KeyError, msg:
if addr[0] not in ["", "v"]:
raise CliError, "Illegal address prefix '" + addr[0] + "'."
return SIM_logical_to_physical(cpu, 1, addr[1])
def any(pred, set):
return filter(pred, set) != []
def hexdump(source, addr, length, align=True):
bpl = 16 # bytes per line
blen = bpl * 2 + bpl / 2
if addr + length - 1 <= 0xffffffff:
addr_fmt = "0x%08x"
else:
addr_fmt = "0x%016x"
addr_prefix = source.addr_prefix()
line = chars = prefix = ""
def flush():
if (line):
pr(prefix + line + ' ' * (blen - len(line) + 1) + tags + ' '
+ chars + '\n')
def line_prefix(addr):
if align:
addr &= ~(bpl - 1)
return addr_prefix + addr_fmt % addr + ' '
def get_tag(addr):
if have_tags:
return source.get_tag(addr)
else:
return ""
# Look for tagged memory (need to know if tag column should be displayed)
have_tags = any(source.have_tag, range(addr, addr + length, 16))
if align:
# Align printed lines on a "bytes-per-line" byte boundary
alignment = addr & (bpl - 1) # leftmost bytes to skip
line += ' ' * (alignment * 2 + (alignment + 1) // 2)
chars = ' ' * alignment
tags = get_tag(addr)
prefix = line_prefix(addr)
else:
alignment = 0
for i in range(length):
if ((i + alignment) % bpl) == 0:
flush()
prefix = line_prefix(addr + i)
line = chars = ""
tags = get_tag(addr + i)
if ((i + alignment) & 1) == 0:
line += ' '
val = source.get_byte(addr + i)
ch = "."
if isinstance(val, str):
line += val
else:
line += "%02x" % val
# Until we know how the terminal will display characters > 0x7e,
# don't print them (see bug 1177)
if 0x20 <= val < 0x7f:
ch = chr(val)
chars += ch
flush()
source.finish()
class physmem_source:
def __init__(self, obj):
self.obj = obj
self.unhandled = self.outside = self.tag_unavailable = 0
def addr_prefix(self):
return "p:"
def get_byte(self, addr):
try:
[byte] = self.obj.memory[addr]
except SimExc_InquiryUnhandled, msg:
self.unhandled = 1
return "??"
except SimExc_Memory, msg:
self.outside = 1
return "**"
return byte
def have_tag(self, addr):
try:
return VT_read_phys_memory_tags_mask(self.obj, addr, 1)
except SimExc_InquiryUnhandled, msg:
return 0
except SimExc_Memory, msg:
return 0
def get_tag(self, addr):
try:
if VT_read_phys_memory_tags_mask(self.obj, addr, 1):
return "%d" % SIM_read_phys_memory_tags(self.obj, addr, 1)
else:
self.tag_unavailable = 1
return '/'
except SimExc_InquiryUnhandled, msg:
return '?'
except SimExc_Memory, msg:
return '*'
def finish(self):
if self.outside:
pr("addresses marked \"**\" are outside physical memory\n")
if self.unhandled:
pr("addresses marked \"??\" do not support inquiry\n")
if self.tag_unavailable:
pr("addresses marked \"/\" do not have tags\n")
class virtmem_source(physmem_source):
def __init__(self, obj, cpu, kind):
self.obj = obj
self.cpu = cpu
self.kind = kind
self.unhandled = self.outside = self.no_translation = 0
self.tag_unavailable = 0
def addr_prefix(self):
if self.kind == "":
return get_address_prefix(self.cpu) + ":"
else:
return self.kind + ":"
def get_byte(self, addr):
try:
paddr = translate_to_physical(self.cpu, (self.kind, addr))
except SimExc_Memory, msg:
self.no_translation = 1
return "--"
return physmem_source.get_byte(self, paddr)
def have_tag(self, addr):
try:
paddr = translate_to_physical(self.cpu, (self.kind, addr))
except SimExc_Memory, msg:
self.no_translation = 1
return 0
return physmem_source.have_tag(self, paddr)
def get_tag(self, addr):
try:
paddr = translate_to_physical(self.cpu, (self.kind, addr))
except SimExc_Memory, msg:
return "-"
return physmem_source.get_tag(self, paddr)
def finish(self):
physmem_source.finish(self)
if self.no_translation:
pr("addresses marked \"--\" have no translation\n")
new_command("x", x_cmd,
[arg(obj_t('processor', 'processor'), "cpu-name", "?"),
arg(addr_t, "address"),
arg(int_t, "size", "?", 16)],
type = ["Memory", "Inspecting Simulated State"],
short = "examine raw memory contents",
namespace_copy = ("processor", cpu_x_cmd),
repeat = x_cmd_repeat,
see_also = ["disassemble", "get", "set"],
doc = """
Display the contents of a memory space starting at <arg>address</arg>. Either
the memory space is explicitly specified as in
<cmd><memory-space>.x</cmd> or the CPU connected to the memory space can be
specified; e.g., <cmd><processor>.x</cmd>. By itself, <cmd>x</cmd> operates
on the memory connected to the current frontend processor.
If the memory is accessed via a CPU, the type of <arg>address</arg> is
specified by a prefix. For physical addresses use
<cmd>p:<var>address</var></cmd>; for virtual addresses,
<cmd>v:<var>address</var></cmd> on non-x86 targets. On x86, use
<cmd><var>segment-register</var>:<var>offset</var></cmd> or
<cmd>l:<var>address</var></cmd> for x86 linear addresses.
If no prefix is given it will be interpreted as a virtual address. On x86 the
default is <cmd>ds:<var>address</var></cmd> (data segment addressing).
The <arg>size</arg> argument specifies the number of bytes to examine. When
examining virtual memory, only addresses which can be found in the TLB or
hardware page tables (if any) are shown. Unmapped addresses are shown
as "<tt>--</tt>", undefined physical addresses as "<tt>**</tt>".""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="2812")
new_command("x", memory_space_x_cmd,
[arg(int_t, "address"), arg(int_t, "size", "?", 16)],
short = "examine raw memory contents",
namespace = "memory-space",
repeat = memory_space_x_cmd_repeat,
doc_with = "x", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="2843")
def sum_mem(obj, cpu, type, addr, length, w8, w16, w32):
sum8 = 0
sum16 = 0
sum32 = 0
buff = []
if cpu and type in ["", "v"]:
type = get_address_prefix(cpu)
line = ""
last_byte = [0, 0, 0, 0]
for i in range(length):
outside = unhandled = 0
try:
if type == "" or type == "p":
phys_addr = addr + i;
else:
phys_addr = translate_to_physical(cpu, (type, addr + i))
[byte] = obj.memory[[phys_addr,phys_addr]]
except SimExc_InquiryUnhandled, msg:
print "Inquiry not handled"
return
except SimExc_Memory, msg:
print "Got exception reading memory"
return
if i != 0 and (i % 4) == 0:
sum32 = sum32 + ((last_byte[0] << 0) | (last_byte[1] << 8) | (last_byte[2] << 16) | (last_byte[3] << 24))
sum32 = sum32 & 0xFFFFFFFF
if i != 0 and (i % 2) == 0:
sum16 = sum16 + ((last_byte[(i - 2) % 4] << 0) | (last_byte[(i - 1) % 4] << 8))
sum16 = sum16 & 0xFFFF
last_byte[i % 4] = byte
sum8 = sum8 + byte
sum8 = sum8 & 0xFF
if w8 + w16 + w32 == 0:
w8 = 1
if w8:
print "Checksum 8-bit: %02x" % sum8
if w16:
if length % 2:
print "Range not a multiple of 2 bytes, %d bytes skipped" % (length % 2)
else:
last16 = (last_byte[(length - 2) % 4] << 0) | (last_byte[(length - 1) % 4] << 8)
sum16 = (sum16 + last16) & 0xFFFF
print "Last 16-bit: %04x" % last16
print "Checksum 16-bit: %04x" % sum16
if w32:
if length % 4:
print "Range not a multiple of 4 bytes, %d bytes skipped" % (length % 4)
else:
last32 = (last_byte[0] << 0) | (last_byte[1] << 8) | (last_byte[2] << 16) | (last_byte[3] << 24)
sum32 = (sum32 + last32) & 0x0FFFFFFFF
print "Checksum 32-bit: %08x" % sum32
def sum_cmd(cpu, addr_spec, length, w8, w16, w32):
if not cpu:
(cpu, _) = get_cpu()
try:
sum_mem(cpu.physical_memory, cpu, addr_spec[0], addr_spec[1], length, w8, w16, w32)
except Exception, msg:
print msg
def obj_sum_cmd(obj, address, size, w8, w16, w32):
sum_cmd(obj.name, address, size, w8, w16, w32)
new_command("sum", sum_cmd,
[arg(obj_t('processor', 'processor'), "cpu-name", "?"),
arg(addr_t, "address"),
arg(int_t, "size"),
arg(flag_t, "-w8"),
arg(flag_t, "-w16"),
arg(flag_t, "-w32")],
type = "inspect/change",
short = "sum a memory range",
deprecated = "python",
namespace_copy = ("processor", obj_sum_cmd),
doc = """
Sum a memory range. The width of the running sum is specified with the
<arg>-w8</arg> (default), <arg>-w16</arg>, or <arg>-w32</arg> flag, standing
for 8, 16, and 32 bits respectively.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="2925")
#
# -------------------- disassemble --------------------
#
_last_disassemble_addr = 0
def disassemble_cmd_sub(cpu, addr, t, count):
global _last_disassemble_addr
for i in range(count):
instr_len = local_print_disassemble_line(cpu, addr, t, 0)
if instr_len == 0:
break
addr = addr + instr_len
# save the last address if repeat
_last_disassemble_addr = addr
def disassemble_cmd_rep(cpu, address, count):
global _last_disassemble_addr
if not cpu:
(cpu, _) = get_cpu()
if address[0] == "p":
t = 0
elif address[0] in ["v", "cs", ""]:
t = 1
else:
raise CliError, "Illegal address prefix '%s'." % address[0]
t = iff(address[0] == "p", 0, 1)
addr = _last_disassemble_addr
disassemble_cmd_sub(cpu, addr, t, count)
def disassemble_cmd(cpu, address, count):
if not cpu:
(cpu, _) = get_cpu()
if address[0] == "p":
t = 0
elif address[0] in ["v", "cs", ""]:
t = 1
else:
raise CliError, "Illegal address prefix '%s'." % address[0]
if count <= 0:
raise CliError, "Illegal instruction count."
t = iff(address[0] == "p", 0, 1)
if address[1] == -1:
if address[0] == "p":
addr = SIM_logical_to_physical(cpu, Sim_DI_Instruction, SIM_get_program_counter(cpu))
else:
addr = SIM_get_program_counter(cpu)
else:
addr = address[1]
disassemble_cmd_sub(cpu, addr, t, count)
new_command("disassemble", disassemble_cmd,
[arg(obj_t('processor', 'processor'), "cpu-name", "?"),
arg(addr_t, "address", "?", ("v",-1)),
arg(int_t, "count", "?", 1)],
alias = "da",
repeat = disassemble_cmd_rep,
type = ["Memory", "Inspecting Simulated State", "Execution"],
short = "disassemble instructions",
namespace_copy = ("processor", disassemble_cmd),
see_also = ["x", "disassemble-settings", "<processor>.aprof-views"],
doc = """
Disassembles <arg>count</arg> instructions starting at
<arg>address</arg> for processor <arg>cpu-name</arg>. If the processor
is not given the current frontend processor will be used. The method
variant can also be used to select a processor; e.g.,
<cmd>cpu0.disassemble</cmd>.
On some architectures, <arg>address</arg> must be word aligned. A
physical address is given by prefixing the address with <cmd>p:</cmd>
(e.g., <cmd>p:0xf000</cmd>). With no prefix, a virtual address will be
assumed. If the address is omitted the current program counter will be
used. <arg>count</arg> defaults to 1 instruction.
Global disassembly settings, such as whether to print the raw opcodes,
can be set by the <cmd>disassemble-settings</cmd> command.
This command will also include various profiling statistics for the
address of each instruction, one column for each profiler view listed
in the processor attribute <tt>aprof-views</tt>. For descriptions of
the columns, use the <cmd><processor>.aprof-views</cmd> command.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3002")
#
# -------------------- set-pc --------------------
#
def cpu_set_pc_cmd(cpu_obj, address):
try:
SIM_set_program_counter(cpu_obj, address)
except Exception, msg:
print "Failed setting program counter: %s" % msg
SIM_command_has_problem()
def set_pc_cmd(address):
try:
SIM_set_program_counter(current_processor(), address)
except Exception, msg:
print "Failed setting program counter: %s" % msg
SIM_command_has_problem()
new_command("set-pc", set_pc_cmd,
[arg(int_t, "address",)],
type = ["Registers", "Changing Simulated State", "Execution"],
short = "set the current processor's program counter",
doc = """
Set program counter (instruction pointer) of the CPU (defaults to
the current frontend processor) to <i>address</i>.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3051")
new_command("set-pc", cpu_set_pc_cmd,
[arg(int_t, "address",)],
namespace = "processor",
short = "set the program counter",
doc_with = "set-pc", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3060")
#
# -------------------- load-binary --------------------
#
def mem_space_load_binary_cmd(obj, the_file, poff, v, use_pa):
try:
return SIM_load_binary(obj, the_file, poff, use_pa, v)
except SimExc_IOError,msg:
print "Error reading kernel file '%s': %s" % (the_file, msg)
SIM_command_has_problem()
return
except SimExc_Memory, msg:
print msg
SIM_command_has_problem()
return
def load_binary_cmd(the_file, poff, v, use_pa, logical):
if logical:
mem = current_processor()
else:
mem = current_processor().physical_memory
return mem_space_load_binary_cmd(mem, the_file, poff, v, use_pa)
new_command("load-binary", load_binary_cmd,
[arg(filename_t(simpath = 1, exist = 1), "filename"),
arg(int64_t, "offset", "?", 0),
arg(flag_t, "-v"), arg(flag_t, "-pa"), arg(flag_t, "-l")],
type = ["Memory", "Changing Simulated State", "Execution"],
short = "load binary (executable) file into memory",
see_also = ["add-directory"],
doc = """
Load a binary (executable) file into the given physical or virtual memory
space. The supported formats are ELF, Motorola S-Record, PE32 and PE32+.
If an offset is supplied, it will be added to the load
address taked from the file.
By default the virtual load address from the file is used, but the
physical address can be used instead by specifying the
<param>-pa</param> flag.
The <param>-v</param> flag turns on verbose mode, printing information
about the loaded file.
When used as a global command, it will use the currently selected
processor to find the memory space to load the binary into. If the
<param>-l</param> flag is given, it will load it into the virtual
memory space, otherwise it will use the physical memory space.
When using the namespace command on a <class>processor</class> object,
it will load the binary into the virtual memory space of that
processor.
When using the namespace command on a <class>memory-space</class>
object, it will load the binary directly into that memory space.
The return value is the address of the execution
entry point. This value is typically used in a call to
<cmd>set-pc</cmd>.
<cmd>load-binary</cmd> uses Simics's Search Path and path markers (%simics%,
%script%) to find the file to load. Refer to Simics User Guide (CLI chapter)
for more information on how Simics's Search Path is used to locate files.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3089")
new_command("load-binary", mem_space_load_binary_cmd,
[arg(filename_t(simpath = 1, exist = 1), "filename"), arg(int_t, "offset", "?", 0),
arg(flag_t, "-v"), arg(flag_t, "-pa")],
namespace = "memory-space",
short = "load binary (executable) file into memory",
doc_with = "load-binary", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3131")
new_command("load-binary", mem_space_load_binary_cmd,
[arg(filename_t(simpath = 1, exist = 1), "filename"), arg(int_t, "offset", "?", 0),
arg(flag_t, "-v"), arg(flag_t, "-pa")],
namespace = "processor",
short = "load binary (executable) file into memory",
doc_with = "load-binary", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3138")
#
# -------------------- load-file --------------------
#
def mem_space_load_file_cmd(obj, the_file, base_address):
try:
SIM_load_file(obj, the_file, base_address, 0)
return
except (SimExc_Memory, SimExc_IOError), msg:
print "Error loading '%s' to memory: %s." % (the_file, msg)
SIM_command_has_problem()
def load_file_cmd(the_file, base_address):
mem = current_processor().physical_memory
return mem_space_load_file_cmd(mem, the_file, base_address)
new_command("load-file", load_file_cmd,
[arg(filename_t(exist = 1, simpath = 1), "filename"), arg(int_t, "offset", "?", 0)],
type = ["Memory", "Changing Simulated State"],
see_also = ["add-directory"],
short = "load file into memory",
doc = """
Loads a file with name <i>filename</i> into the memory space
(defaulting to the current frontend processor's physical memory
space), starting at physical address <i>offset</i>. Default offset is
0.
<cmd>load-file</cmd> uses Simics's Search Path and path markers (%simics%,
%script%) to find the file to load. Refer to Simics User Guide (CLI chapter)
for more information on how Simics's Search Path is used to locate files.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3163")
new_command("load-file", mem_space_load_file_cmd,
[arg(filename_t(exist = 1, simpath = 1), "filename"), arg(int_t, "offset", "?", 0)],
namespace = "memory-space",
short = "load file into memory",
doc_with = "load-file", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3180")
#
# -------------------- enable-real-time-mode --------------------
#
def set_real_time_mode_cmd(max_speed, check_interval):
assert_cpu()
SIM_set_attribute(conf.sim, "real-time-max-speed", max_speed)
SIM_set_attribute(conf.sim, "real-time-ms-interval", check_interval)
SIM_set_attribute(conf.sim, "real-time-enable", 1)
new_command("enable-real-time-mode", set_real_time_mode_cmd,
[arg(int_t, "speed", "?", 100),
arg(int_t, "check_interval", "?", 1000)],
alias = "",
type = ["Execution", "Speed"],
short = "set real time mode for Simics",
doc = """
In some cases Simics might run faster (in real-time) than the machine
it simulates; this can happen if the OS is in a tight idle loop or an
instruction halts execution waiting for an interrupt.
Running faster than the simulated machine can cause problems for interactive
programs which might time-out faster than what the user can handle.
With the <cmd>enable-real-time-mode</cmd> command Simics will periodically check
its actual running speed and sleep for a while if it is too high.
This also reduces Simics CPU usage.
<arg>speed</arg> specifies the percentage (as an integer) of the how fast the Simics
target machine is allowed to run compared to the host machine; default is 100%.
<arg>check_interval</arg> specifies how often the measurement should take place in
milliseconds of the simulator time; default is 1000.
The <cmd>disable-real-time-mode</cmd> will deactivate any real-time behavior and
Simics will run as fast as possible again.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3195")
#
# -------------------- disable-real-time-mode --------------------
#
def disable_real_time_mode_cmd():
assert_cpu()
SIM_set_attribute(conf.sim, "real-time-enable", 0)
new_command("disable-real-time-mode", disable_real_time_mode_cmd,
[],
alias = "",
short = "disable real time mode for Simics",
doc_with = "enable-real-time-mode", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3229")
#
# -------------------- other memory-space commands --------------------
#
def get_memory_space_info(obj):
return [(None,
[("Snoop device", obj.snoop_device),
("Timing model", obj.timing_model)])]
new_info_command("memory-space", get_memory_space_info)
def get_port_space_info(obj):
return []
new_info_command("port-space", get_port_space_info)
#
# -------------------- context --------------------
#
def context_on_cmd(obj):
SIM_set_attribute(obj, "active", 1)
def context_off_cmd(obj):
SIM_set_attribute(obj, "active", 0)
new_command("on", context_on_cmd,
[],
namespace = "context",
type = ["Symbolic Debugging", "Debugging"],
short = "switch on context object",
doc = """
<b><context>.on</b> activates the effects of a context object,
i.e., breakpoints on virtual addresses. <b><context>.off</b> deactivates a
context object.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3262")
new_command("off", context_off_cmd,
[],
namespace = "context",
short = "switch off context object",
doc_with = "<context>.on", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3272")
def obj_set_context_cmd(cpu, ctx_name):
try:
ctx = SIM_get_object(ctx_name)
except SimExc_General:
ctx = SIM_new_object("context", ctx_name)
pr("New context '%s' created.\n" % ctx_name)
if ctx.classname != "context":
pr("%s is not a context object.\n" % ctx_name)
return
if ctx.symtable:
st = ctx.symtable
if not st.arch_compat[cpu.architecture]:
print ("Context's symtable %s has architecture %s; cannot attach."
% (st.name, st.arch))
return
st.arch = cpu.architecture
cpu.current_context = ctx
def set_context_cmd(ctx_name):
obj_set_context_cmd(current_processor(), ctx_name)
new_command("set-context", set_context_cmd,
[arg(str_t, "context",
expander = object_expander("context"))],
type = ["Symbolic Debugging", "Debugging"],
see_also = ['new-context', '<context>.symtable'],
short = "set the current context of a CPU",
doc = """
Sets the current context of the processor <i>cpu</i> (or the selected
cpu) to <i>context</i>. If the context does not exist, it is created.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3301")
new_command("set-context", obj_set_context_cmd,
[arg(str_t, "context",
expander = object_expander("context"))],
namespace = "processor",
short = "set the current context of a CPU",
doc_with = "set-context", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3311")
def new_context_cmd(name):
try:
SIM_get_object(name)
pr("An object called '%s' already exists.\n" % name)
return
except:
try:
SIM_new_object("context", name)
except:
pr("Error creating context '%s'\n" % name)
new_command("new-context", new_context_cmd,
[arg(str_t, "name")],
type = ["Symbolic Debugging", "Debugging"],
see_also = ['set-context', '<context>.symtable'],
short = "create a new context",
doc = """
Create a new context object called <i>name</i>. The context is initially not
bound to any processor.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3329")
def get_context_info(ctx):
return [(None,
[("Symbol table", ctx.symtable)])]
new_info_command("context", get_context_info)
def get_context_status(ctx):
return [(None,
[("Active", iff(ctx.active, "yes", "no"))])]
new_status_command("context", get_context_status)
def context_until_active(ctx, direction):
global _do_disassemble
def context_change(data, ctx, arg):
SIM_break_simulation("")
hap_handle = SIM_hap_add_callback_obj(
"Core_Context_Change", ctx, 0, context_change, None)
_do_disassemble = 1
try:
_started_sim()
if direction == 'reverse':
VT_reverse(0)
else:
SIM_continue(0)
finally:
SIM_hap_delete_callback_id("Core_Context_Change", hap_handle)
def context_run_until_active_cmd(ctx):
context_until_active(ctx, 'forward')
new_command("run-until-active", context_run_until_active_cmd,
[],
namespace = "context",
short = "run until context becomes active",
doc = """
Run until context become active.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3372")
def context_reverse_until_active_cmd(ctx):
context_until_active(ctx, 'reverse')
new_command("reverse-until-active", context_reverse_until_active_cmd,
[],
namespace = "context",
short = "reverse until context becomes active",
doc = """
Reverse until context become active.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3382")
step_cmds = ((True, "step-line", ["step", "s"],
Step_State_Command_Step,
"run to the next source line"),
(False, "step-instruction", ["stepi", "si"],
Step_State_Command_Stepi,
"run to the next instruction"),
(True, "reverse-step-line", ["rstep", "rs"],
Step_State_Command_Unstep,
"back to the previous source line"),
(False, "reverse-step-instruction", ["rstepi", "rsi",
"unstep-instruction", "ui"],
Step_State_Command_Unstepi,
"back to the previous instruction"),
(True, "next-line", ["next", "n"],
Step_State_Command_Next,
"run to the next source line, skipping subroutine calls"),
(True, "next-instruction", ["nexti", "ni"],
Step_State_Command_Nexti,
"run to the next instruction, skipping subroutine calls"),
(True, "reverse-next-line", ["rnext", "rn"],
Step_State_Command_Prev,
"back to the previous source line, skipping subroutine calls"),
(True, "reverse-next-instruction", ["rnexti", "rni"],
Step_State_Command_Previ,
"back to the previous instruction, skipping subroutine calls"),
(True, "finish-function", ["finish", "fin"],
Step_State_Command_Finish,
"finish the current function"),
(True, "uncall-function", ["uncall"],
Step_State_Command_Uncall,
"go back to when the current function was called"))
def step_cmd(ctx, cmd):
global _do_disassemble
def src_step_callback_ordered(ctx, p1, p2):
SIM_break_simulation("")
def src_step_callback(data, ctx, cpu):
VT_in_time_order(ctx, src_step_callback_ordered, 0, 0)
if ctx == None:
ctx = SIM_current_processor().current_context
run = ctx.iface._source_step.source_step(ctx, cmd)
if (run & (Step_Flag_Forward | Step_Flag_Backward)) != 0:
_do_disassemble = 1
hap_handle = SIM_hap_add_callback(
"Core_Source_Step", src_step_callback, None)
try:
if run == Step_Result_Run_Backward:
VT_reverse(0)
else:
SIM_continue(0)
finally:
SIM_hap_delete_callback_id("Core_Source_Step", hap_handle)
for glob, name, alias, cmd, desc in step_cmds:
for namespace in ["context"] + [[], [""]][glob]:
if namespace:
f = (lambda cmd: lambda ctx: step_cmd(ctx, cmd))(cmd)
else:
f = (lambda cmd: lambda: step_cmd(None, cmd))(cmd)
new_command(name, f, [],
type = ["Symbolic Debugging", "Debugging"],
alias = alias,
namespace = namespace,
repeat = f,
see_also = ["<context>.%s" % n for (g, n, a, c, d)
in step_cmds if n != name],
short = desc,
doc = """
<b>step-line</b> causes the simulation to run until it reaches another
source line. <b>reverse-step-line</b> does the same thing, except for
running backwards.
<b>next-line</b> causes the simulation to run until it reaches another
source line, but will not stop in subroutine calls.
<b>reverse-next-line</b> is the same as <b>next-line</b>, except for
running backwards. <b>next-instruction</b> and
<b>reverse-next-instruction</b> are like <b>next-line</b> and
<b>reverse-next-line</b>, respectively, except for stepping just one
instruction instead of an entire source line.
<b>finish-function</b> causes the simulation to run until the current
function has returned. <b>uncall-function</b> causes the simulation to
run backwards until just before the current function was called.
These commands can either be called as context namespace commands,
e.g., <i>context</i><tt>.step-line</tt>, in which case the command
will apply to that context; or not, e.g., <tt>step-line</tt>, in which
case the command will operate on the current context of the current
processor.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3448")
#
# ----------------------- devs -----------------------
#
def devs_cmd(objname):
all_objs = SIM_get_all_objects()
if objname == "":
list = all_objs
else:
try:
list = [ SIM_get_object(objname) ]
except Exception:
print "No device object: %s" % objname
return
first = 1
for obj in list:
try:
count = obj.access_count
except Exception, e:
continue
dev_name = obj.name
dev_str = "%8d %-13s " % (count, dev_name)
for obj2 in all_objs:
try:
map = obj2.map
except Exception, e:
continue
map.sort()
for line in map:
if line[1].name == dev_name:
if first:
print " Count Device Space Range Func"
first = 0
pr(dev_str)
dev_str = " "
pr("%-16s 0x%016x - 0x%016x %4d\n" % (
obj2.name,
line[0],
long(line[0]) + line[4] - 1,
line[2]))
if first:
print "No mappings found"
new_command("devs", devs_cmd,
args = [arg(str_t, "object-name", "?", "", expander = conf_object_expander)],
type = ["Configuration", "Inspecting Simulated State"],
short = "list all devices in Simics",
doc = """
Print a list of all devices in Simics, with information about how
many times each device has been accessed, and where it is mapped.
The mappings are presented as start and end offsets within a
named memory space. The function number associated with each
different mapping for a device is also printed.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3525")
#
# -------------------- break-io, trace-io --------------------
#
class io_tracker(tracker):
def __init__(self, stop, cmd, short, doc, type, see_also = []):
tracker.__init__(self, stop, cmd, "device", self.expander,
short, doc, group = type, see_also = see_also)
self.map = {}
def show(self, arg, obj, memop):
if SIM_mem_op_is_from_cpu(memop):
cpu = memop.ini_ptr
else:
cpu = current_processor()
if memop.size <= 8:
if cpu.big_endian:
value = number_str(SIM_get_mem_op_value_be(memop), 16)
else:
value = number_str(SIM_get_mem_op_value_le(memop), 16)
value_string = "%s (%s)" % (value, iff(cpu.big_endian, "BE", "LE"))
else:
value_string = ""
print ("[%s -> %s] %s: %s %d %s"
% ((memop.ini_ptr and memop.ini_ptr.name), obj.name,
iff(SIM_mem_op_is_read(memop), "Read", "Write"),
number_str(memop.physical_address, 16),
memop.size, value_string))
def list(self):
print "%s enabled for these devices:" % iff(self.stop, "breaking", "tracing")
for obj in self.map.keys():
print " %s" % obj.name
def resolve_target(self, target):
return SIM_get_object(target)
def is_tracked(self, obj):
return self.map.has_key(obj)
def is_device(self, obj):
try:
obj.log_buffer
return 1
except:
return 0
def get_all_devices(self):
return filter(self.is_device, SIM_get_all_objects())
def expander(self, string):
return get_completions(string, [ obj.name for obj in self.get_all_devices() ]);
def install_hap(self, obj):
r = SIM_hap_add_callback_obj("Core_Device_Access_Memop", obj, 0, self.callback, None)
self.map[obj] = r
def uninstall_hap(self, obj):
r = self.map[obj]
SIM_hap_delete_callback_obj_id("Core_Device_Access_Memop", obj, r)
del self.map[obj]
def track_all(self):
for obj in self.get_all_devices():
if not self.is_tracked(obj):
self.install_hap(obj)
def track_none(self):
for obj in self.map.keys():
self.uninstall_hap(obj)
def track_on(self, obj):
if self.is_tracked(obj):
return
if not self.is_device(obj):
print "I/O breakpoints are not possible on %s" % obj.name
return
self.install_hap(obj)
def track_off(self, obj):
if self.is_tracked(obj):
self.uninstall_hap(obj)
trace_io_cmds = io_tracker(0, "trace-io",
short = "trace device accesses",
type = "inspect/change",
see_also = [ "break-io" ],
doc = """
Enables and disables tracing of device accesses. When this is
enabled, every time the specified device is accessed during simulation
a message is printed.
The <i>device</i> parameter specifies the device object that should be
traced.
Instead of an object name, the <tt>-all</tt> flag may be given. This
will enable or disable tracing of all devices.
""")
break_cr_cmds = io_tracker(1, "break-io",
short = "break on device accesses",
type = "breakpoint",
see_also = [ "trace-io", "<breakpoint>.break" ],
doc = """
Enables and disables breaking simulation on device accesses. When
this is enabled, every time the specified device is accessed during
simulation a message is printed and the simulation stops.
The <i>device</i> parameter specifies which device object should be
traced.
Instead of an object name, the <tt>-all</tt> flag may be given. This
will enable or disable breaking on accesses to all device.
""")
#
# -------------------- magic-break-enable --------------------
#
def magic_enable_cmd():
VT_magic_break_enable(1)
def magic_disable_cmd():
VT_magic_break_enable(0)
def magic_query_cmd():
return VT_magic_break_query()
new_command("magic-break-enable", magic_enable_cmd,
[],
type = ["Breakpoints", "Debugging", "Profiling"],
short = "install magic instruction hap handler",
doc = """
Installs (<tt>magic-break-enable</tt>) or removes
(<tt>magic-break-disable</tt>) the magic breakpoint handler. A magic
breakpoint is a magic instruction with argument 0, except on the SPARC
target, where it is a magic instruction with the top 6 bits of the
22-bit parameter field set to 000100 (binary). Note that <tt>break-hap
Core_Magic_Instruction</tt> will break on all magic instructions.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3669")
new_command("magic-break-disable", magic_disable_cmd,
[],
short = "remove magic instruction hap handler",
doc_with = "magic-break-enable", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3682")
new_command("magic-break-query", magic_query_cmd,
[],
short = "returns 1 if magic instruction handler is installed",
doc_with = "magic-break-enable", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3687")
#
# -------------------- instruction-profile-mode --------------------
#
def ipm_expander(string):
return get_completions(string, ["no-instruction-profile", "instruction-branch-profile", "instruction-cache-access-trace","instruction-fetch-trace"])
ipm_mode_translation = {"no-instruction-profile" : "no-instruction-fetch",
"instruction-branch-profile" : "no-instruction-fetch",
"instruction-cache-access-trace" : "instruction-cache-access-trace",
"instruction-fetch-trace" : "instruction-fetch-trace"}
def obsolete_ipm_hap(mode, obj):
for cpu in SIM_get_all_processors():
cpu.instruction_fetch_mode = mode
def obsolete_ipm(mode):
print "If you want to profile instructions, use the instruction profiling"
print "system described in the User Guide."
print
print "If you want your memory hierarchy to receive instruction fetches,"
print "use the instruction-fetch-mode command."
print
print "This command will now set the instruction-fetch-mode on all CPUs"
print "according to the mode given as argument."
if mode:
if (SIM_initial_configuration_ok()):
obsolete_ipm_hap(ipm_mode_translation[mode], None)
else:
SIM_hap_add_callback("Core_Initial_Configuration", obsolete_ipm_hap, ipm_mode_translation[mode])
new_command("instruction-profile-mode", obsolete_ipm,
args = [arg(str_t, "mode", "?", "", expander = ipm_expander)],
alias = "ipm",
type = "deprecated commands",
deprecated = "instruction-fetch-mode",
short="set or get current mode for instruction profiling",
doc = """
Sets the instruction fetch mode of all cpus on the system according to the mode passed as argument.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3725")
def instruction_fetch_mode(cpu, mode):
if mode:
cpu.instruction_fetch_mode = mode
else:
print cpu.name + ":", cpu.instruction_fetch_mode
def ifm_expander(string):
return get_completions(string, ["no-instruction-fetch", "instruction-cache-access-trace", "instruction-fetch-trace"])
new_command("instruction-fetch-mode", instruction_fetch_mode,
args = [arg(str_t, "mode", "?", "", expander = ifm_expander)],
alias = "ifm",
type = ["Execution", "Profiling", "Tracing"],
namespace = "processor",
short="set or get current mode for instruction fetching",
doc = """
This command selects how instruction fetches are sent for the memory hierarchy
during simulation. If set to <i>no-instruction-fetch</i>, the memory hierarchy
won't receive any instruction fetch. If set to
<i>instruction-cache-access-trace</i>, the memory hierarchy will receive one
(and only one) instruction fetch every time a new cache line is accessed. The
size of this cache line is defined by the attribute
<i>instruction-fetch-line-size</i> in the processor object. If set to
<i>instruction-fetch-trace</i>, all instruction fetches will be visible. Note
that on x86 target, <i>instruction-cache-trace-access</i> is not available. On
some other, <i>instruction-fetch-trace</i> is actually
<i>instruction-cache-trace-access</i> with a line size equal to the instruction
size (sparc-v9). Using this command without argument will print out the current
mode.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3744")
def instruction_fetch_mode_global(mode):
for cpu in SIM_get_all_processors():
instruction_fetch_mode(cpu, mode)
new_command("instruction-fetch-mode", instruction_fetch_mode_global,
args = [arg(str_t, "mode", "?", "", expander = ifm_expander)],
alias = "ifm",
type = ["Execution", "Profiling", "Tracing"],
short="set or get current mode for instruction fetching",
doc_with = "<processor>.instruction-fetch-mode", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3770")
def istc_enable():
try:
if conf.sim.istc == "off":
print "Turning I-STC on"
conf.sim.istc = "on"
else:
print "I-STC was already turned on"
except:
print "Enable I-STC failed. Make sure a configuration is loaded."
new_command("istc-enable", istc_enable,
args = [],
type = ["Execution", "Speed"],
short="enable I-STC",
group_short = "enable or disable internal caches",
doc = """
These commands are for advanced users only. They allow the user to control the
usage of Simics internal caches. The Simulator Translation Caches (STCs) are
designed to increase execution performance. The D-STC caches data translations
(logical to physical to real (host) address), while the I-STC caches
instruction translations of taken jumps. By default the STCs are <b>on</b>.
When a memory hierarchy is connected (such as a cache module) it must have been
designed to work along with the STCs otherwise it may not be called for all
the memory transactions it is interested in. These commands can be used to
detect if too many translations are kept in the STCs, causing the simulation
to be faulty. Turning the STCs off means that current contents will be
flushed and no more entries will be inserted into the STCs.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3787")
def istc_disable():
try:
if conf.sim.istc == "on":
print "Turning I-STC off and flushing old data"
conf.sim.istc = "off"
else:
print "I-STC was already turned off"
except:
print "Disable istc failed. Make sure a configuration is loaded."
new_command("istc-disable", istc_disable,
args = [],
short = "disable I-STC",
doc_with = "istc-enable", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3816")
def dstc_enable():
try:
if conf.sim.dstc == "off":
print "Turning D-STC on"
conf.sim.dstc = "on"
else:
print "D-STC was already turned on"
except:
print "Enable D-STC failed. Make sure a configuration is loaded."
new_command("dstc-enable", dstc_enable,
args = [],
short = "enable D-STC",
doc_with = "istc-enable", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3831")
def dstc_disable():
try:
if conf.sim.dstc == "on":
print "Turning D-STC off and flushing old data"
conf.sim.dstc = "off"
else:
print "D-STC was already turned off"
except:
print "Disable D-STC failed. Make sure a configuration is loaded."
new_command("dstc-disable", dstc_disable,
args = [],
short = "disable D-STC",
doc_with = "istc-enable", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3846")
def stc_status():
try:
if conf.sim.dstc == "on":
print "D-STC is currently *ON*"
else:
print "D-STC is currently *OFF*"
if conf.sim.istc == "on":
print "I-STC is currently *ON*"
else:
print "I-STC is currently *OFF*"
except:
print "Failed getting stc status. Make sure a configuration is loaded."
new_command("stc-status", stc_status,
args = [],
short = "show I- and D-STC status",
doc_with = "istc-enable", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3864")
def default_address_not_mapped_handler(arg, space, pa, access_type, size):
print "Access at 0x%x in %s where nothing is mapped." % (pa, space.name)
raise SimExc_Break, "Address not mapped"
try:
SIM_hap_add_callback("Core_Address_Not_Mapped",
default_address_not_mapped_handler, None)
except:
import sys
sys.stderr.write("Core_Address_Not_Mapped install failed. "
"(Expected during doc-build.)\n")
#
# ram commands
#
def get_ram_info(obj):
size = obj.image.size
mult = ''
if (size % 1024) == 0:
size = size // 1024
mult = 'K'
if (size % 1024) == 0:
size = size // 1024
mult = 'M'
if (size % 1024) == 0:
size = size // 1024
mult = 'G'
return [ (None,
[ ("Image", obj.image),
("Image size", "%d%s" % (size, mult)) ] ) ]
sim_commands.new_info_command('ram', get_ram_info)
sim_commands.new_info_command('rom', get_ram_info)
# No status command, since there is no status, really
class image_source:
def __init__(self, image):
self.image = image
self.outside = 0
def addr_prefix(self):
return ""
def get_byte(self, addr):
try:
return self.image.byte_access[addr]
except:
self.outside = 1
return "--"
def have_tag(self, addr):
return 0
def finish(self):
if self.outside:
pr("addresses marked \"--\" are outside the image\n")
def image_x_cmd_repeat(obj, offset, length):
global _last_image_x_addr
_last_image_x_addr += length
image_x_cmd(obj, _last_image_x_addr, length)
def image_x_cmd(obj, offset, length):
global _last_image_x_addr
_last_image_x_addr = offset
hexdump(image_source(obj), offset, length, align=False)
new_command("x", image_x_cmd,
[arg(int_t, "offset"), arg(int_t, "size", "?", 16)],
type = ["Memory", "Inspecting Simulated State"],
short = "examine image data",
namespace = "image",
repeat = image_x_cmd_repeat,
doc = """
Displays <arg>length</arg> bytes starting at <arg>offset</arg> from the
image.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3939")
def image_set_cmd(image, address, value, size, little_endian, big_endian):
if size < 1 or size > 8:
print "size must be 1-8 bytes."
return
if little_endian and big_endian:
print "Cannot use both -l and -b."
return
if not little_endian and not big_endian:
if current_processor().big_endian:
big_endian = 1
else:
little_endian = 1
if address + size > image.size:
print "address outside image"
return
values = [ (value >> (i * 8)) & 0xff for i in range(size) ]
if big_endian:
values.reverse()
for i in range(size):
image.byte_access[address + i] = values[i]
new_command("set", image_set_cmd,
[arg(int_t, "address"),
arg(int_t, "value"),
arg(int_t, "size", "?", 4),
arg(flag_t, "-l"), arg(flag_t, "-b")],
type = ["Memory", "Changing Simulated State", "Disk"],
short = "set bytes in image to specified value",
see_also = ["set"],
namespace = "image",
doc = """
Sets <arg>size</arg> bytes in an image at offset <arg>address</arg> to
<arg>value</arg>. The default <arg>size</arg> is 4 bytes, but can be anywhere
between 1 and 8 inclusive.
If <arg>value</arg> is larger than the specified size, behavior is undefined.
The <arg>-l</arg> and <arg>-b</arg> flags are used to select little-endian and
big-endian byte order, respectively. If neither is given, the byte order of the
currently selected processor is used.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3975")
def image_save(image, filename, start, length):
try:
image.iface.image.save_to_file(image, filename, start, length)
except Exception, msg:
print msg
new_command("save", image_save,
[ arg(filename_t(), "filename"),
arg(int_t, "start-byte", "?", 0),
arg(int_t, "length", "?", 0) ],
type = ["Memory", "Disk", "Configuration"],
short = "save image to disk",
namespace = "image",
doc = """
Writes the image binary data (in raw form) to <arg>filename</arg>. If
<arg>start</arg> and/or <arg>length</arg> are given, they specify the start
offset and number of bytes to write respectively; otherwise, the whole image is
copied.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="4002")
def image_save_diff(image, filename):
try:
os.stat(filename)
print "%s already exists." % filename
SIM_command_has_problem()
return
except:
pass
try:
image.iface.image.save_diff(image, filename)
except Exception, msg:
print msg
new_command("save-diff-file", image_save_diff,
[ arg(filename_t(), "filename") ],
type = ["Memory", "Disk", "Configuration"],
short = "save changes since last checkpoint",
see_also = ['<image>.add-diff-file', '<image>.save'],
namespace = 'image',
doc = """
Writes changes to the image since the last checkpoint to a named file
in craff format.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="4028")
def image_add_partial_diff(image, filename, start, size):
files = image.files
files += [[filename, "ro", start, size]]
try:
image.files = files
except Exception, msg:
print "Error adding", filename, "to", image.name, ":", msg
SIM_command_has_problem()
def image_add_diff(image, filename):
image_add_partial_diff(image, filename, 0, 0)
new_command("add-diff-file", image_add_diff,
[ arg(filename_t(), "filename") ],
type = ["Memory", "Disk", "Configuration"],
short = "add a diff file to the image",
see_also = ['<image>.save-diff-file'],
namespace = "image",
doc = """
Adds a diff file to the list of files for an image. The diff file was typically
created with <cmd>save-diff-file</cmd>, or by a saved configuration.
This should not be done if the image contains changed (unsaved) data.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="4050")
new_command("add-partial-diff-file", image_add_partial_diff,
[ arg(filename_t(), "filename"),
arg(int_t, "start"),
arg(int_t, "size") ],
type = ["Memory", "Disk", "Configuration"],
short = "add a partial diff file to the image",
see_also = ['<image>.add-diff-file', '<image>.save-diff-file'],
namespace = "image",
doc = """
Adds a partial diff file to the list of files for an image. The diff
file was typically created with the 'save-diff-file' command, by one of
the dump-*-partition commands, or by a saved configuration.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="4061")
def binary_amount(n):
suffixes = "Byte kB MB GB TB".split()
import math
if n <= 0:
index = 0
else:
index = min(int(math.log(n, 1024)), len(suffixes) - 1)
return "%.4g %s" % (n / float(1 << (index * 10)), suffixes[index])
def set_memory_limit_cmd(limit, swapdir):
if limit == None and not swapdir:
lim = SIM_get_class_attribute("image", "memory-limit")
if lim:
print "Image memory limited to", binary_amount(lim)
else:
print "Image memory not limited"
sd = conf.prefs.swap_dir
print "Swap directory:", sd
return
if limit < 0:
SIM_command_has_problem()
print "Illegal memory limit."
return
elif limit > 0:
limit = limit << 20
if limit > conf.sim.host_phys_mem:
print "Warning: Limit larger than the amount of physical memory."
return
sim.classes['image'].classattrs.memory_limit = limit
print "Image memory limited to", binary_amount(limit)
elif limit == 0:
sim.classes['image'].classattrs.memory_limit = limit
print "Image memory not limited."
if swapdir:
conf.prefs.swap_dir = swapdir
# swap dir may be mangled, so print out the result
print "Swap dir set to", conf.prefs.swap_dir
new_command("set-memory-limit", set_memory_limit_cmd,
[ arg(int_t, "limit", "?", None),
arg(str_t, "swapdir", "?", None) ],
type = "image commands",
short = "limit memory usage",
doc = """
Limits the in-memory footprint of all image objects to <arg>limit</arg>
megabytes. This only limits the memory consumed by image pages in memory.
While this is typically a very large part of Simics's memory usage, other
data structures are not limited by this command.
If <arg>limit</arg> is zero, the memory limit is removed.
If <arg>swapdir</arg> is specified, it indicates a directory to use for
swap files. If no argument is given, the current setting is
displayed.
Simics sets a default memory limit at startup that depends on the amount of
memory, and number of processors, on the host system.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="4115")
def set_default_limit():
total = conf.sim.host_phys_mem
ncpus = conf.sim.host_num_cpus
if total > sys.maxint:
# in 32-bit simics limit to 2GB (more might be OK on some systems)
total = sys.maxint
# remove 64MB for system
total -= 0x4000000
# lowest limit is 256MB, or the amount of memory in the system
min_total = min(total, 0x10000000)
# use less memory on multi-pro
ncpus = (ncpus + 1) / 2
# leave some memory for non-image stuff
total = int(max(total * 0.7 / ncpus, min_total)) & ~0xfffff
SIM_set_class_attribute("image", "memory-limit", total)
set_default_limit()
#
# ---------------------- data profiler commands ---------------------
#
def data_profiler_clear_cmd(obj):
try:
dpi = obj.iface._data_profiler
except:
pr("%s is not a data profiler!\n" % obj)
return
dpi.clear(obj)
new_command("clear", data_profiler_clear_cmd,
args = [],
namespace = "data-profiler",
type = ["Profiling"],
short = "clear data profiler",
doc = """
Reset all counters of the data profiler to zero.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="4172")
#
# -------------------- address profiler commands --------------------
#
def address_profile_info_cmd(obj, sum, max):
try:
SIM_get_interface(obj, "address-profiler")
api = obj.iface.address_profiler
except:
pr("%s is not an address profiler!\n" % obj)
return
num = api.num_views(obj)
if num == 0:
pr("%s has no views defined!\n" % obj)
return
pr("%s has %d address profiler view%s:\n" % (obj.name, num,
["s", ""][num == 1]))
for i in range(num):
addr_bits = api.address_bits(obj, i)
maxaddr = (1 << addr_bits) - 1
pr("View %d: %s\n" % (i, api.description(obj, i)))
gran_log2 = api.granularity_log2(obj, i)
pr(" %d-bit %s addresses, granularity %d byte%s\n" %
(addr_bits,
["virtual", "physical"][api.physical_addresses(obj, i)],
1 << gran_log2, ["s", ""][gran_log2 == 0]))
if sum:
pr(" Total counts: %d\n" % api.sum(obj, i, 0, maxaddr))
if max:
pr(" Maximal count: %d\n" % api.max(obj, i, 0, maxaddr))
new_command("address-profile-info", address_profile_info_cmd,
args = [arg(flag_t, "-sum"), arg(flag_t, "-max")],
namespace = "address_profiler",
type = ["Profiling"],
short = "general info about an address profiler",
doc = """
Print general info about an object implementing the address-profiler
interface, such as a list of the available views. If the <tt>-sum</tt>
or <tt>-max</tt> flags are given, will also print the sum or max of
each view over the entire address space.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="4211")
def aprof_views_cmd(cpu, add, remove, view, clear):
def indexof(aprof, view):
index = 0
for ap, v in cpu.aprof_views:
if aprof == ap and view == v:
return index
index += 1
return -1
if clear:
if add or remove:
print "Error! Too many options."
return
cpu.aprof_views = []
elif add:
if remove:
print "Error! Cannot specify both add and remove."
return
index = indexof(add, view)
if index >= 0:
print "View %d of %s is already selected." % (view, add.name)
return
numviews = add.iface.address_profiler.num_views(add)
if view < 0 or view >= numviews:
print "Error! View %d is out of range for %s." % (view, add.name)
return
temp = cpu.aprof_views
temp.append([add, view])
cpu.aprof_views = temp
elif remove:
index = indexof(remove, view)
if index < 0:
print "View %d of %s is not selected." % (view, remove.name)
return
temp = cpu.aprof_views
del temp[index]
cpu.aprof_views = temp
else:
if len(cpu.aprof_views) < 1:
print "No address profiler views selected for %s." % cpu.name
return
print ("The following address profiler views are selected for %s:"
% cpu.name)
i = 1
for ap, view in cpu.aprof_views:
api = ap.iface.address_profiler
print (" %d. (%s) View %d of %s (%s)"
% (i, ["virtual", "physical"]
[api.physical_addresses(ap, view)],
view, ap.name, api.description(ap, view)))
i += 1
aprof_obj_t = obj_t("address_profiler", "address_profiler")
new_command("aprof-views", aprof_views_cmd,
args = [arg(aprof_obj_t, "add", spec = "?", default = None),
arg(aprof_obj_t, "remove", spec = "?", default = None),
arg(int_t, "view", spec = "?", default = 0),
arg(flag_t, "-clear")],
namespace = "processor",
type = ["Profiling"],
short = "manipulate list of selected address profiling views",
doc = """
Manipulate the processor attribute <tt>aprof-views</tt>, which
determines which address profiler views are displayed alongside
disassembled code.
The <i>add</i> and <i>view</i> arguments select an address profiler
view to add to the list. Alternatively, the <i>remove</i> and
<i>view</i> arguments specify an address profiler view to remove from
the list. <i>view</i> defaults to 0 if not specified.
If called with the <tt>-clear</tt> flag, remove all address profiler
views from the list.
If called without arguments, print a detailed list of the currently
selected address profiler views for the processor. """, filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="4276")
#
# ------- address profiler toplist --------
#
def address_profile_toplist_cmd(ap, symtbl, samples, start, stop, view, cnt_ival):
try:
SIM_get_interface(ap, "address-profiler")
api = ap.iface.address_profiler
except:
pr("%s is not an address profiler!\n" % ap)
return
num_views = api.num_views(ap)
if not num_views > view:
pr("%s does not have view %d!\n" % (ap, view))
gran = 2**(api.granularity_log2(ap, view))
#
# sample_list = [[count, address], ...]
#
sample_list = [[0L, 0L]] * samples
for (count, addr) in api.iter(ap, view, start, stop):
if count > sample_list[-1][0]:
sample_list[-1] = [count, addr]
sample_list.sort(lambda a,b: cmp(b[0], a[0]))
sample_list.sort(lambda a,b: cmp(a[1], b[1]))
#
# top_list = [[count, address, length], ...]
#
top_list = []
for (s_cnt, s_addr) in sample_list:
match = False
for entr in top_list:
if entr[1] + entr[2] == s_addr and entr[0] - cnt_ival <= s_cnt and entr[0] + cnt_ival >= s_cnt:
match = True
entr[2] += gran
if not match:
top_list.append([s_cnt, s_addr, gran])
top_list.sort()
top_list.reverse()
entry = 1
if top_list[0][0] == 0:
return
print "Data Profile Toplist (count, address, source):"
for (count, addr, length) in top_list:
if count == 0:
break
if symtbl:
pos = symtbl.source_at[addr]
if pos:
pstr = "%s, %s" % (pos[0], pos[2])
else:
pstr = "unknown"
else:
pstr = "unknown"
if length > gran:
print "%3d. %12d 0x%08x - 0x%08x in %s" % (
entry, count, addr, addr + length - gran, pstr)
else:
print "%3d. %12d 0x%08x %s" % (entry, count, addr, pstr)
entry += 1
new_command("address-profile-toplist", address_profile_toplist_cmd,
args = [arg(obj_t("symtable", "symtable"), "symtable", spec = "?", default = None),
arg(int_t, "samples", spec = "?", default = 100),
arg(int_t, "start", spec = "?", default = 0x0),
arg(int_t, "stop", spec = "?", default = 0xfffffffc),
arg(int_t, "view", spec = "?", default = 0),
arg(int_t, "count_interval", spec = "?", default = 1)],
namespace = "address_profiler",
type = ["Profiling"],
short = "print toplist of address profiling data",
doc = """
Print address profiling regions sorted by count. The <i>symtable</i>
attribute can be used to map data profiling regions in form of
physical addresses to source function and file information.
The <i>samples</i> argument specifies the number of sampling points
used to create the list containing the highest count. The sampling
range is determined by <i>start</i> and <i>stop</i>. The default
values are 100 samples in the interval 0x0 - 0xfffffffc. The
granularity is defined by the data profiler object.
The <i>view</i> attribute selects the address profiler view.
The <i>count_interval</i> attribute defines the range in which sampled
data regions will match even thought the data profiler count is not
equal. Ex. Assume that the samples in the region 0x20c - 0x20c has a
count of 4711 and region 0x20d - 0x20f a count of 4713. The regions
will be considered on region if <i>count_interval</i> is 4 but not if
1.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="4364")
def rshift_round_up(x, s):
return (x + (1 << s) - 1) >> s
def pow2_bytes_to_str(n):
orig_n = n
if n < 10:
return "%d byte%s" % (1 << n, ["s", ""][n == 0])
for prefix in ["kilo", "Mega", "Giga", "Tera", "Peta", "Exa"]:
n -= 10
if n < 10:
return "%d %sbyte%s" % (1 << n, prefix, ["s", ""][n == 0])
return "2^%d bytes" % orig_n
# Return a string representation of num (positive) of at most maxlen
# characters. Use prefixes (and round up) if necesary. The string
# representations of zero and overflow are configurable. If
# minprefixnum is given, num will be forced to use the prefix that
# minprefixnum would have gotten (or bigger), even in the face of
# precision loss.
def num_to_str(num, maxlen, zero = "0", almostzero = None,
overflow = "inf", minprefixnum = 0):
if num == 0:
return zero
pnum = max(num, minprefixnum)
pstr = "%d" % pnum
if len(pstr) <= maxlen:
return "%d" % num
for suffix in ["k", "M", "G", "T", "P", "E"]:
num = num/1000
pnum = pnum/1000
pstr = "%d%s" % (pnum, suffix)
if len(pstr) <= maxlen:
if num == 0:
return almostzero
return "%d%s" % (num, suffix)
return overflow
def long_and_short_num(num):
numstr = num_to_str(num, 6)
numstr2 = "%d" % num
if numstr != numstr2:
numstr = "%s (%s)" % (numstr2, numstr)
return numstr
def address_profile_data_cmd(ap, view, address, cell_bits, row_bits,
table_bits, start, stop, maxlines, sameprefix):
cell_width = 6
cells_per_line = 3
def zero_out_low_bits(x, b):
return (x >> b) << b
def print_header():
cellgran = gran - cells_per_line
sh = cellgran % 4
cellgran -= sh
columns = 1 << cells_per_line
columnheaderdigits = (cells_per_line + sh + 3)/4
pr("column offsets:\n")
pr("%*s*" % (int(2 + addr_size), "0x%x" % (1 << cellgran)))
for i in range(columns):
pr(" %*s" % (int(cell_width),
"0x%0*x" % (int(columnheaderdigits), i << sh)))
pr("\n")
pr("-" * (2 + addr_size + 1 + columns*(1 + cell_width)) + "\n")
def print_lines(start, numlines, gran):
cellgran = 1 << (gran - cells_per_line)
tsum = 0
left = []
lines = []
m = 0
for i in range(numlines):
left.append("0x%0*x:" % (int(addr_size), start))
line = []
for j in range(1 << cells_per_line):
c = api.sum(ap, view, start, start + cellgran - 1)
m = max(m, c)
line.append(c)
start += cellgran
tsum += c
lines.append(line)
if sameprefix:
mp = m
else:
mp = 0
for i in range(len(left)):
pr(left[i])
for c in lines[i]:
pr(" %*s" % (int(cell_width), num_to_str(
c, cell_width, zero = ".", almostzero = "o",
minprefixnum = mp)))
pr("\n")
return tsum
def find_firstlast_counter(a, b, first):
if a >= b:
if a == b:
return a
return -1
# Basecase: linear search.
if b - a < 100:
if first:
start = b + 1
fun = min
else:
start = a - 1
fun = max
best = start
for count, addr in api.iter(ap, view, a, b):
best = fun(best, addr)
if best == start:
return -1
return best
# Recursion: split interval in half.
guess = (b + a)/2
if first:
for count, addr in api.iter(ap, view, a, guess):
return find_firstlast_counter(a, addr, first)
return find_firstlast_counter(guess + 1, b, first)
else:
for count, addr in api.iter(ap, view, guess, b):
return find_firstlast_counter(addr, b, first)
return find_firstlast_counter(a, guess - 1, first)
try:
SIM_get_interface(ap, "address-profiler")
api = ap.iface.address_profiler
except:
pr("%s is not an address profiler!\n" % ap)
return
if api.num_views(ap) == 0:
pr("%s has no views defined!\n" % ap)
return
addr_bits = api.address_bits(ap, view)
lastaddr = (1 << addr_bits) - 1
addr_size = rshift_round_up(addr_bits, 2) # address size in hex digits
prof_gran_bits = api.granularity_log2(ap, view)
gran = mingran = cells_per_line + prof_gran_bits
bit_args = 0
if cell_bits != None:
bit_args += 1
if cell_bits < prof_gran_bits:
print "Cells must contain at least %d bits." % prof_gran_bits
return
if row_bits != None:
bit_args += 1
if row_bits < mingran:
print "Rows must contain at least %d bits." % mingran
return
if table_bits != None:
bit_args += 1
if table_bits < mingran:
print "Table must contain at least %d bits." % mingran
return
if bit_args > 1:
print ("You may specify at most one of cell-bits, row-bits"
+ " and table-bits.")
return
# If no range is specified, find the minimal range that contains
# all counts.
if start == None and stop == None and address == None and bit_args == 0:
start = find_firstlast_counter(0, lastaddr, first = 1)
if start == -1:
start = 0
stop = lastaddr
else:
stop = find_firstlast_counter(0, lastaddr, first = 0)
# If user specified address argument, make sure we include it.
if address != None:
start = min(start, address)
stop = max(stop, address)
# Determine what interval to display.
if start != None or stop != None:
if start == None or stop == None:
print "You must specify both start and stop (or neither of them)."
return
if address != None or bit_args != 0:
print "You cannot specify both start+stop and address+bits."
return
for x in [start, stop]:
if x < 0 or x >= (1 << addr_bits):
print "0x%x is not a %d-bit address." % (x, addr_bits)
return
stop += 1 # let stop point to first address after interval
while 1:
if start > stop:
tmp = start
start = stop
stop = start
start = zero_out_low_bits(start, gran)
stop = zero_out_low_bits(stop + (1 << gran) - 1, gran)
length = stop - start
numlines = rshift_round_up(length, gran)
if numlines <= maxlines:
break
gran += 1
stop -= 1 # stop points to last address again
else:
if address == None:
address = 0
elif bit_args == 0:
print ("You must specify cell-bits, row-bits or table-bits"
+ " when address is specified.")
return
if address < 0 or address >= (1 << addr_bits):
print "0x%x is not a %d-bit address!" % (address, addr_bits)
return
if table_bits != None:
if table_bits > addr_bits:
print "Address space is only %d bits!" % addr_bits
return
length = 1 << table_bits
start = zero_out_low_bits(address, table_bits)
stop = start + length - 1
while 1:
numlines = rshift_round_up(length, gran)
if numlines <= maxlines:
break
gran += 1
else:
if row_bits == None:
row_bits = cell_bits + cells_per_line
if row_bits > addr_bits:
print "Address space is only %d bits!" % addr_bits
return
gran = row_bits
numlines = min(maxlines, 1 << (addr_bits - gran))
start = max(0, (zero_out_low_bits(address, gran)
- numlines*(1 << (gran - 1))))
gran_log2 = api.granularity_log2(ap, view)
cellgran = gran - cells_per_line
totalsum = api.sum(ap, view, 0, lastaddr)
# Print table.
print "View %d of %s: %s" % (view, ap.name, api.description(ap, view))
print ("%d-bit %s addresses, profiler granularity %d byte%s" %
(api.address_bits(ap, view),
["virtual", "physical"][api.physical_addresses(ap, view)],
1 << gran_log2, ["s", ""][gran_log2 == 0]))
if totalsum > 0:
print ("Each cell covers %d address bits (%s)."
% (cellgran, pow2_bytes_to_str(cellgran)))
print # empty line
print_header()
sum = print_lines(start, numlines, gran)
print # empty line
print ("%s counts shown. %s not shown."
% (long_and_short_num(sum), long_and_short_num(totalsum - sum)))
print # empty line
else:
print # empty line
print " Profiler is empty."
print # empty line
new_command("address-profile-data", address_profile_data_cmd,
args = [arg(int_t, "view", spec = "?", default = 0),
arg(int_t, "address", spec = "?", default = None),
arg(int_t, "cell-bits", spec = "?", default = None),
arg(int_t, "row-bits", spec = "?", default = None),
arg(int_t, "table-bits", spec = "?", default = None),
arg(int_t, "start", spec = "?", default = None),
arg(int_t, "stop", spec = "?", default = None),
arg(int_t, "lines", spec = "?", default = 20),
arg(flag_t, "-same-prefix")],
namespace = "address_profiler",
type = ["Profiling"],
short = "linear map of address profiling data",
doc = """
Display a map of (a part of) the address space covered by the address
profiler, and the counts of one of its views associated with each
address. The view is specified by the <i>view</i> argument; default is
view 0. The default behavior is to display the smallest interval that
contains all counts; you can change this with either the <i>start</i>
and <i>stop</i> or the <i>address</i> and <i>cell-bits</i>,
<i>row-bits</i> or <i>table-bits</i> arguments.
Cells that have zero counts are marked with ".". Cells that have a
non-zero count, but were rounded to zero, are marked with "o".
If one of <i>cell-bits</i>, <i>row-bits</i> or <i>table-bits</i> is
specified, then each cell, or each table row, or the entire table is
limited to that many bits of address space. By default the display
starts at address 0, but if an address is specified with the
<i>address</i> argument, the displayed interval is shifted to make
that address is visible.
If <i>start</i> and <i>stop</i> are specified, the display is limited
to the smallest interval containing both addresses.
The maximum number of lines in the table is limited by the
<i>lines</i> argument (the default is 20 lines). The scale of the map
is adjusted to fit this limit.
Normally, the display chooses an appropriate prefix for the count of
each cell; with the <tt>-same-prefix</tt> flag, all counts will be
forced to have the same prefix. This is useful if a lot of small but
non-zero values makes it hard to spot the really big values.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="4663")
def address_profile_summary_cmd(ap, view, lines):
try:
SIM_get_interface(ap, "address-profiler")
api = ap.iface.address_profiler
except:
pr("%s is not an address profiler!\n" % ap)
return
if api.num_views(ap) == 0:
pr("%s has no views defined!\n" % ap)
return
addr_bits = api.address_bits(ap, view)
addr_size = rshift_round_up(addr_bits, 2) # address size in hex digits
minlength = api.granularity_log2(ap, view)
num_width = 6
maxlines = lines
lcount = 0
hcount = 1
start = 2
length = 3
def init_lines():
lc = api.sum(ap, view, 0, (1 << (addr_bits - 1)) - 1)
hc = api.sum(ap, view, (1 << (addr_bits - 1)), (1 << addr_bits) - 1)
if hc + lc == 0:
return None
line = {lcount: lc, hcount: hc, start: 0, length: addr_bits}
trim_empty_halves(line)
return [line]
def trim_empty_halves(line):
if line[length] == minlength:
return
if line[lcount] == 0:
line[length] -= 1
line[start] += (1 << line[length])
line[lcount] = api.sum(
ap, view, line[start],
line[start] + (1 << (line[length] - 1)) - 1)
line[hcount] -= line[lcount]
trim_empty_halves(line)
return
if line[hcount] == 0:
line[length] -= 1
line[hcount] = api.sum(ap, view,
line[start] + (1 << (line[length] - 1)),
line[start] + (1 << line[length]) - 1)
line[lcount] -= line[hcount]
trim_empty_halves(line)
return
def density(line):
return float(line[lcount] + line[hcount])/(1 << line[length])
def add_line(lines, line):
trim_empty_halves(line)
d = density(line)
i = len(lines)
lines.append(line)
while i > 0 and d > density(lines[i - 1]):
lines[i], lines[i - 1] = lines[i - 1], lines[i]
i -= 1
def split_last(lines):
i = len(lines) - 1
while lines[i][length] == minlength:
i -= 1
if i < 0:
return 0 # no more lines to split
line = lines.pop(i)
ilen = 1 << (line[length] - 2)
c1 = api.sum(ap, view, line[start], line[start] + ilen - 1)
c2 = line[lcount] - c1
c3 = api.sum(ap, view, line[start] + 2*ilen, line[start] + 3*ilen - 1)
c4 = line[hcount] - c3
line1 = {lcount: c1, hcount: c2, start: line[start],
length: (line[length] - 1)}
add_line(lines, line1)
line2 = {lcount: c3, hcount: c4, start: line[start] + 2*ilen,
length: (line[length] - 1)}
add_line(lines, line2)
return 1 # success
# Find interesting intervals.
lines = init_lines()
if lines == None:
pr("Profiler is empty.\n")
return
while len(lines) < maxlines and split_last(lines):
pass
# Sort intervals by start address.
tmplist = [(line[start], line) for line in lines]
tmplist.sort()
lines = [line for (key, line) in tmplist]
# Print intervals.
pr(" %*s %*s %*s %*s %s\n"
% (int(addr_size) + 2, "start",
int(addr_size) + 2, "end",
int(num_width), "count",
int(num_width), "length",
"counts/byte"))
sum = 0
for line in lines:
count = line[lcount] + line[hcount]
sum += count
stop = line[start] + (1 << line[length]) - 1
pr(" 0x%0*x 0x%0*x %*s %*s %f\n"
% (int(addr_size), line[start],
int(addr_size), stop,
int(num_width), num_to_str(count, num_width),
int(num_width), num_to_str(1 << line[length], num_width),
density(line)))
pr("Total sum is %s.\n" % long_and_short_num(sum))
new_command("address-profile-summary", address_profile_summary_cmd,
args = [arg(int_t, "view", spec = "?", default = 0),
arg(int_t, "lines", spec = "?", default = 10)],
namespace = "address_profiler",
type = "internal commands",
short = "short summary of the contents of the address profiler",
doc = """
Print a short summary of the address intervals that have a nonzero
count.
The view of the address profiler is selected with the <i>view</i>
parameter (default is view 0). <i>lines</i> determines the length of
the summary (the amount of information presented is increased until it
fills the specified number of lines).""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="4826")
#
# ----------------- current-processor ----------------
#
def cur_proc_cmd():
try:
return SIM_current_processor().name
except SimExc_Lookup, msg:
print msg
SIM_command_has_problem()
new_command("current-processor", cur_proc_cmd,
type = ["Command-Line Interface"],
short = "return current processor",
doc = """
Returns the name of the currently executing processor.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="4852")
def cli_history_cmd(maxlines):
hist = conf.sim.cmd_history
if maxlines != -1:
hist = hist[max(len(hist) - maxlines, 0):]
pr("".join(" " + line + "\n" for line in hist))
if sys.platform == "win32":
new_command("cli-history", cli_history_cmd,
[arg(integer_t, "maxlines", "?", -1)],
type = ["Command-Line Interface"],
short = "display command-line history",
doc = """
List the most recently typed command lines. If specified, at most
<var>maxlines</var> lines are displayed.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="4866")
# This file should only contain internal commands which should not be
# seen by the end-user.
from cli import *
#
# VTmem debug commands
#
def mm_list_types_cmd(max):
DBG_mm_list_types(max)
new_command("mm-list-types", mm_list_types_cmd, [arg(int_t, "max", "?", 0)], type="internal commands",
short = "list all object types currently active", doc = """
Each object created has a type associated to it. This command goes through all active
objects and creates a list sorted by type and memory footprint.
""", filename="/mp/simics-3.0/src/core/common/debug_commands.py", linenumber="13")
def mm_list_sites_cmd(file, maxsites):
DBG_mm_list_sites(file, maxsites, 0)
new_command("mm-list-sites", mm_list_sites_cmd,
[arg(str_t, "file", "?", '*'),
arg(int_t, "maxsites", "?", 32)],
type = "internal commands",
short = "list busiest allocation sites",
doc = """
List the <i>maxsites</i> allocation sites that have the most memory allocated.
If <i>file</i> is specified, constrain the list to allocation sites in source
files matching that string (* matches any sequence of characters).""", filename="/mp/simics-3.0/src/core/common/debug_commands.py", linenumber="22")
def mm_snoop_type_cmd(flag, type):
if flag:
enable = 0
else:
enable = 1
DBG_mm_snoop_type(type, enable)
new_command("mm-snoop-type", mm_snoop_type_cmd,
[arg(flag_t, "-d"), arg(str_t, "type")],
type = "internal commands",
short = "turn on/off type snooping",
doc = """
Turn on (or, with <tt>-d</tt>, off) snooping of all allocations/deallocations
of the type <i>type</i>. A <i>type</i> of <tt>*</tt> means that all types
are to be snooped.""", filename="/mp/simics-3.0/src/core/common/debug_commands.py", linenumber="39")
def mm_hash_stat_cmd():
DBG_mm_hash_stat()
new_command("mm-hash-stat", mm_hash_stat_cmd, [], type = "internal commands",
short = "show some internal memory statistics",
doc = """
Shows some esoteric internal memory management hash table statistics only of
interest to the VTmem maintainer.""", filename="/mp/simics-3.0/src/core/common/debug_commands.py", linenumber="51")
#
# ------------------- no-problem -------------------
#
def no_problem_cmd():
SIM_set_attribute(SIM_get_object("sim"), "no-problem", 1)
new_command("no-problem", no_problem_cmd, [], type="internal commands",
short = "disable back to front on problems", doc = """
Disable back to front on problems.
""", filename="/mp/simics-3.0/src/core/common/debug_commands.py", linenumber="63")
#
# ------------------- uncatch-signal -------------------
#
def uncatch_signal_cmd(signo):
if (DBG_uncatch_signal(signo) == -1):
print "Failed to remove signal handler for %d" % signo
SIM_command_has_problem()
else:
print "Signal %d not caught by Simics anymore" % signo
new_command("uncatch-signal", uncatch_signal_cmd,
args = [arg(int_t, "signo")],
type="internal commands",
short = "remove signal handler for specified signal number", doc = """
Disable a Simics installed signal handler, the default behavior will be used instead.
This can be useful if you have an error which only happens when occasionally and
you want to generate a core file to analyze the problem when it has happened.
""", filename="/mp/simics-3.0/src/core/common/debug_commands.py", linenumber="80")
#
# ------------------- dump-sr-stat -------------------
#
def dump_sr_stat_cmd(filename):
cpu = current_processor()
try:
if not cpu.iface.processor.dump_sr_stat(filename):
print "Failed to dump statistics"
except:
print "Simics does not have SR statistics."
new_command("dump-sr-stat", dump_sr_stat_cmd,
[ arg(filename_t(),"file_name") ],
type = "internal commands",
short = "print service routine statistics",
doc = """
On a Simics compiled with the <b>-gs</b> Simgen flag, this command generates a
file with statistics on which service routines and parameter combinations that
has been executed during this session. This file can be used as an input file
to Simgen (<b>-as</b> and <b>-s</b> flags) to sort and specialize service
routines, making a faster sim.
""", filename="/mp/simics-3.0/src/core/common/debug_commands.py", linenumber="102")
#
# -------------------- dump-dstc --------------------
#
def dump_dstc():
DBG_dump_dstc(current_processor())
new_command("dstc-dump", dump_dstc,
[],
alias = "",
type = "internal commands",
short = "print contents of D-STC",
doc = """
For debugging Simics internals: lists contents of data STCs. The STC tables
are used to speed up memory access operations, and cache translations
between virtual, physical, and real addresses.Note: this command
will only dump the currently active STC set, if there are multiple.
""", filename="/mp/simics-3.0/src/core/common/debug_commands.py", linenumber="121")
#
# -------------------- infinite-loop --------------------
#
def infinite_loop_cmd():
DBG_infinite_loop()
new_command("infinite-loop", infinite_loop_cmd,
[],
type = "internal commands",
short = "enter infinite loop",
doc = """
For debugging Simics internals: Place Simics in an infinite loop
until an attached debugger clears a flag.""", filename="/mp/simics-3.0/src/core/common/debug_commands.py", linenumber="140")
#
# -------------------- print-last-exceptions -------------
#
def print_last_exceptions_cmd():
DBG_print_exception_list()
new_command("print-last-exceptions", print_last_exceptions_cmd,
[],
type = "internal commands",
short = "print the 16 last exceptions",
doc = """
Print out the 16 last exceptions, if they were cleared properly and which code line generated them.
""", filename="/mp/simics-3.0/src/core/common/debug_commands.py", linenumber="155")
#
# -------------------- turbo-debug --------------------
#
def turbo_debug():
conf.cpu0.rax = 0x0000000000000000L
conf.cpu0.rbx = 0x0123456789abcdefL
conf.cpu0.rcx = 0xf0f0f0f0f0f0f0f0L
conf.cpu0.rdx = 0x0000000012345678L
conf.cpu0.rsi = 0x0000000000000009L
DBG_turbo_debug()
new_command("turbo-debug", turbo_debug,
[],
alias = "",
type = "internal commands",
short = "run some turbo tests",
doc = "Runs a number of tests of the turbo code generation.", filename="/mp/simics-3.0/src/core/common/debug_commands.py", linenumber="175")
def print_internal_counters(long_descriptions):
try:
counters = conf.sim.internal_counters
desc = conf.sim.internal_counters_desc
except:
print "Internal profiling is not available"
return
print
for group, list in desc:
print "%s" % group
for name, short_desc, long_desc in list:
count = str(counters[name])
pad = "."*(65 - len(count) - len(short_desc))
print " %s%s%s" % (short_desc, pad, count)
if long_descriptions:
print " (%s) %s" % (name, long_desc)
print
print
new_command("print-internal-counters", print_internal_counters,
args = [arg(flag_t, "-long-descriptions")],
type="internal commands",
short = "print values of internal counters",
doc = """
Print values of internal debugging counters.""", filename="/mp/simics-3.0/src/core/common/debug_commands.py", linenumber="201")
| gpl-2.0 | 2,509,845,110,772,104,700 | 35.228821 | 230 | 0.563231 | false |
googleapis/googleapis-gen | google/cloud/assuredworkloads/v1beta1/assuredworkloads-v1beta1-py/docs/conf.py | 1 | 12568 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# google-cloud-assuredworkloads documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
__version__ = "0.1.0"
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.6.3"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_flags = ["members"]
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# Allow markdown includes (so releases.md can include CHANGLEOG.md)
# http://www.sphinx-doc.org/en/master/markdown.html
source_parsers = {".md": "recommonmark.parser.CommonMarkParser"}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"google-cloud-assuredworkloads"
copyright = u"2020, Google, LLC"
author = u"Google APIs" # TODO: autogenerate this bit
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for Python",
"github_user": "googleapis",
"github_repo": "google-cloud-python",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-assuredworkloads-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"google-cloud-assuredworkloads.tex",
u"google-cloud-assuredworkloads Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"google-cloud-assuredworkloads",
u"Google Cloud Assuredworkloads Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"google-cloud-assuredworkloads",
u"google-cloud-assuredworkloads Documentation",
author,
"google-cloud-assuredworkloads",
"GAPIC library for Google Cloud Assuredworkloads API",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("http://python.readthedocs.org/en/latest/", None),
"gax": ("https://gax-python.readthedocs.org/en/latest/", None),
"google-auth": ("https://google-auth.readthedocs.io/en/stable", None),
"google-gax": ("https://gax-python.readthedocs.io/en/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None),
"grpc": ("https://grpc.io/grpc/python/", None),
"requests": ("http://requests.kennethreitz.org/en/stable/", None),
"proto": ("https://proto-plus-python.readthedocs.io/en/stable", None),
"protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| apache-2.0 | 8,126,131,519,976,620,000 | 32.425532 | 87 | 0.696133 | false |
weijia/object_view | object_view/operations_local.py | 1 | 2265 | import json
import os
import threading
from django.http import HttpResponse
import time
from libtool import find_root_path
from object_filter.operations import get_tag_info_for_obj
from objsys.obj_tagging_local import UfsFilter
from ufs_utils.django_utils import retrieve_param
from ufs_utils.misc import ensure_dir
from ufs_utils.obj_tools import get_hostname
class ExportTagsThread(UfsFilter, threading.Thread):
def run(self):
final_data = []
for obj in self.get_obj_filters():
final_data.append({"tags": get_tag_info_for_obj(obj), "ufs_url": obj.ufs_url,
"uuid": obj.uuid, "full_path": obj.full_path,
"description": obj.description, "size": obj.size})
######
# Quitting, so save last_timestamp
if 0 != len(final_data):
export_json_to_folder({"data": final_data, "host": get_hostname()}, "../tag_dump/")
else:
print "No more tag applied"
def handle_export_tags(request):
data = retrieve_param(request)
t = ExportTagsThread()
t.set_data(data)
#t.set_tag_app('user:' + request.user.username)
t.start()
return HttpResponse('{"result": "Apply tags processing"}', mimetype="application/json")
def export_json_to_folder(final_data, relative_path):
root_dir = find_root_path(__file__, "approot")
dump_root = os.path.join(root_dir, relative_path)
ensure_dir(dump_root)
dump_filename = os.path.join(root_dir, relative_path + str(time.time()) + ".json")
f = open(dump_filename, "w")
f.write(json.dumps(final_data, indent=4))
f.close()
class ExportTagsThread(UfsFilter, threading.Thread):
def run(self):
final_data = []
for obj in self.get_obj_filters():
final_data.append({"tags": get_tag_info_for_obj(obj), "ufs_url": obj.ufs_url,
"uuid": obj.uuid, "full_path": obj.full_path,
"description": obj.description, "size": obj.size})
######
# Quitting, so save last_timestamp
if 0 != len(final_data):
export_json_to_folder({"data": final_data, "host": get_hostname()}, "../tag_dump/")
else:
print "No more tag applied" | bsd-3-clause | -8,763,156,990,591,533,000 | 36.766667 | 95 | 0.60883 | false |
galaxyproject/cargo-port | bin/verify.py | 1 | 2061 | #!/usr/bin/env python
import os
import sys
# Conditional import to ensure we can run without non-stdlib on py2k.
if sys.version_info.major > 2:
from builtins import str
from builtins import zip
from builtins import object
import json
import subprocess
import logging
from cargoport.utils import (package_to_path,
symlink_depot,
verify_file,
verify_filetype,
XUnitReportBuilder,
yield_packages)
logging.basicConfig(level=logging.INFO)
log = logging.getLogger()
def main(galaxy_package_file, dryrun=False):
visited_paths = []
api_data = {'data': []}
with open(galaxy_package_file, 'r') as handle:
retcode = 0
xunit = XUnitReportBuilder()
xunit.ok("I.Am.Alive")
for ld in yield_packages(handle):
nice_name = package_to_path(**ld)
if not os.path.exists(ld['id']):
continue
output_package_path = os.path.join(ld['id'], nice_name) + ld['ext']
if not os.path.exists(output_package_path):
continue
visited_paths.append(os.path.abspath(output_package_path))
if os.path.exists(output_package_path) and os.path.getsize(output_package_path) == 0:
log.error("Empty download, removing %s %s", ld['url'], output_package_path)
cleanup_file(output_package_path)
xunit.failure(nice_name, "EmptyFile", "%s was found to be empty" % output_package_path)
err = verify_file(output_package_path, ld['sha256sum'].strip())
if err is not None:
xunit.failure(nice_name, "ValidationError", err)
err = verify_filetype(output_package_path, ld['ext'].strip(), dryrun=dryrun)
if err is not None:
xunit.failure(nice_name, "ValidationError", err)
with open('report.xml', 'w') as xunit_handle:
xunit_handle.write(xunit.serialize())
sys.exit(retcode)
if __name__ == '__main__':
main(sys.argv[1], dryrun=(False if len(sys.argv) <= 2 else True))
| mit | -1,555,088,605,659,912,000 | 31.203125 | 103 | 0.614265 | false |
s20121035/rk3288_android5.1_repo | external/lldb/examples/customization/bin-utils/binutils.py | 2 | 3609 | "Collection of tools for displaying bit representation of numbers."""
import StringIO
def binary(n, width=None):
"""
Return a list of (0|1)'s for the binary representation of n where n >= 0.
If you specify a width, it must be > 0, otherwise it is ignored. The list
could be padded with 0 bits if width is specified.
"""
l = []
if width and width <= 0:
width = None
while n > 0:
l.append(1 if n&1 else 0)
n = n >> 1
if width:
for i in range(width - len(l)):
l.append(0)
l.reverse()
return l
def twos_complement(n, width):
"""
Return a list of (0|1)'s for the binary representation of a width-bit two's
complement numeral system of an integer n which may be negative.
"""
val = 2**(width-1)
if n >= 0:
if n > (val-1):
return None
# It is safe to represent n with width-bits.
return binary(n, width)
if n < 0:
if abs(n) > val:
return None
# It is safe to represent n (a negative int) with width-bits.
return binary(val*2 - abs(n))
# print binary(0xABCD)
# [1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1]
# print binary(0x1F, 8)
# [0, 0, 0, 1, 1, 1, 1, 1]
# print twos_complement(-5, 4)
# [1, 0, 1, 1]
# print twos_complement(7, 4)
# [0, 1, 1, 1]
# print binary(7)
# [1, 1, 1]
# print twos_complement(-5, 64)
# [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1]
def positions(width):
"""Helper function returning a list describing the bit positions.
Bit positions greater than 99 are truncated to 2 digits, for example,
100 -> 00 and 127 -> 27."""
return ['{0:2}'.format(i)[-2:] for i in reversed(range(width))]
def utob(debugger, command_line, result, dict):
"""Convert the unsigned integer to print its binary representation.
args[0] (mandatory) is the unsigned integer to be converted
args[1] (optional) is the bit width of the binary representation
args[2] (optional) if specified, turns on verbose printing"""
args = command_line.split()
try:
n = int(args[0], 0)
width = None
if len(args) > 1:
width = int(args[1], 0)
if width < 0:
width = 0
except:
print utob.__doc__
return
if len(args) > 2:
verbose = True
else:
verbose = False
bits = binary(n, width)
if not bits:
print "insufficient width value: %d" % width
return
if verbose and width > 0:
pos = positions(width)
print ' '+' '.join(pos)
print ' %s' % str(bits)
def itob(debugger, command_line, result, dict):
"""Convert the integer to print its two's complement representation.
args[0] (mandatory) is the integer to be converted
args[1] (mandatory) is the bit width of the two's complement representation
args[2] (optional) if specified, turns on verbose printing"""
args = command_line.split()
try:
n = int(args[0], 0)
width = int(args[1], 0)
if width < 0:
width = 0
except:
print itob.__doc__
return
if len(args) > 2:
verbose = True
else:
verbose = False
bits = twos_complement(n, width)
if not bits:
print "insufficient width value: %d" % width
return
if verbose and width > 0:
pos = positions(width)
print ' '+' '.join(pos)
print ' %s' % str(bits)
| gpl-3.0 | 4,791,216,865,151,303,000 | 28.581967 | 194 | 0.559158 | false |
andrew-lundgren/gwpy | gwpy/cli/coherencegram.py | 1 | 4469 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) Joseph Areeda (2015)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
#
""" Coherence plots
"""
from numpy import percentile
from .cliproduct import CliProduct
__author__ = 'Joseph Areeda'
class Coherencegram(CliProduct):
"""Derived class to implement the coherence-spectrogram"""
def get_action(self):
"""Return the string used as "action" on command line."""
return 'coherencegram'
def init_cli(self, parser):
"""Set up the argument list for this product"""
self.arg_chan2(parser)
self.arg_freq2(parser)
self.arg_ax_linx(parser)
self.arg_ax_ylf(parser)
self.arg_ax_intlin(parser)
self.arg_imag(parser)
self.arg_plot(parser)
return
def get_max_datasets(self):
"""Coherencegram only handles 1 set of 2 at a time"""
return 2
def get_min_datasets(self):
"""Coherence requires 2 datasets to calculate"""
return 2
def is_image(self):
"""This plot is image type"""
return True
def freq_is_y(self):
"""This plot puts frequency on the y-axis of the image"""
return True
def get_ylabel(self, args):
"""Text for y-axis label"""
return 'Frequency (Hz)'
def get_title(self):
"""Start of default super title, first channel is appended to it"""
return "Coherence spectrogram: "
def get_color_label(self):
return self.scale_text
def get_sup_title(self):
"""We want both channels in the title"""
sup = self.get_title() + self.timeseries[0].channel.name
sup += " vs. " + self.timeseries[1].channel.name
return sup
def gen_plot(self, arg_list):
"""Generate the plot from time series and arguments"""
self.is_freq_plot = True
secpfft = 0.5
if arg_list.secpfft:
secpfft = float(arg_list.secpfft)
ovlp_frac = 0.9
if arg_list.overlap:
ovlp_frac = float(arg_list.overlap)
self.secpfft = secpfft
self.overlap = ovlp_frac
ovlap_sec = secpfft*ovlp_frac
stride = int(self.dur/(self.width * 0.8))
stride = max(stride, secpfft+(1-ovlp_frac)*32)
stride = max(stride, secpfft*2)
coh = self.timeseries[0].coherence_spectrogram(
self.timeseries[1], stride, fftlength=secpfft, overlap=ovlap_sec)
norm = False
if arg_list.norm:
coh = coh.ratio('mean')
norm = True
# set default frequency limits
self.fmax = coh.band[1]
self.fmin = 1 / secpfft
# default time axis
self.xmin = self.timeseries[0].times.value.min()
self.xmax = self.timeseries[0].times.value.max()
# set intensity (color) limits
if arg_list.imin:
lo = float(arg_list.imin)
elif norm:
lo = 0.5
else:
lo = 0.01
if norm or arg_list.nopct:
imin = lo
else:
imin = percentile(coh, lo*100)
if arg_list.imax:
up = float(arg_list.imax)
elif norm:
up = 2
else:
up = 100
if norm or arg_list.nopct:
imax = up
else:
imax = percentile(coh, up)
# plot the thing
if norm:
self.plot = coh.plot(vmin=imin, vmax=imax)
self.scale_text = 'Normalized to mean'
elif arg_list.logcolors:
self.plot = coh.plot(norm='log', vmin=imin, vmax=imax)
self.scale_text = r'log_10 Coherence'
else:
self.plot = coh.plot(vmin=imin, vmax=imax)
self.scale_text = r'Coherence'
# pass the scaling to the annotater
self.imin = imin
self.imax = imax
return
| gpl-3.0 | -5,225,253,726,674,151,000 | 28.20915 | 77 | 0.586932 | false |
thoma5B/Django-Wiki | wiki/views/mixins.py | 1 | 1736 | from __future__ import unicode_literals
from __future__ import absolute_import
import logging
from django.views.generic.base import TemplateResponseMixin
from wiki.core.plugins import registry
from wiki.conf import settings
log = logging.getLogger(__name__)
class ArticleMixin(TemplateResponseMixin):
"""A mixin that receives an article object as a parameter (usually from a wiki
decorator) and puts this information as an instance attribute and in the
template context."""
def dispatch(self, request, article, *args, **kwargs):
self.urlpath = kwargs.pop('urlpath', None)
self.article = article
self.children_slice = []
if settings.SHOW_MAX_CHILDREN > 0:
try:
for child in self.article.get_children(
max_num=settings.SHOW_MAX_CHILDREN +
1,
articles__article__current_revision__deleted=False,
user_can_read=request.user):
self.children_slice.append(child)
except AttributeError as e:
log.error(
"Attribute error most likely caused by wrong MPTT version. Use 0.5.3+.\n\n" +
str(e))
raise
return super(ArticleMixin, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
kwargs['urlpath'] = self.urlpath
kwargs['article'] = self.article
kwargs['article_tabs'] = registry.get_article_tabs()
kwargs['children_slice'] = self.children_slice[:]
# kwargs['children_slice_more'] = len(self.children_slice) > 20
kwargs['plugins'] = registry.get_plugins()
return kwargs
| gpl-3.0 | 2,542,029,970,468,408,300 | 35.93617 | 97 | 0.608871 | false |
tynn/numpy | numpy/lib/histograms.py | 4 | 34578 | """
Histogram-related functions
"""
from __future__ import division, absolute_import, print_function
import operator
import numpy as np
from numpy.compat.py3k import basestring
__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges']
# range is a keyword argument to many functions, so save the builtin so they can
# use it.
_range = range
def _hist_bin_sqrt(x):
"""
Square root histogram bin estimator.
Bin width is inversely proportional to the data size. Used by many
programs for its simplicity.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
return x.ptp() / np.sqrt(x.size)
def _hist_bin_sturges(x):
"""
Sturges histogram bin estimator.
A very simplistic estimator based on the assumption of normality of
the data. This estimator has poor performance for non-normal data,
which becomes especially obvious for large data sets. The estimate
depends only on size of the data.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
return x.ptp() / (np.log2(x.size) + 1.0)
def _hist_bin_rice(x):
"""
Rice histogram bin estimator.
Another simple estimator with no normality assumption. It has better
performance for large data than Sturges, but tends to overestimate
the number of bins. The number of bins is proportional to the cube
root of data size (asymptotically optimal). The estimate depends
only on size of the data.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
return x.ptp() / (2.0 * x.size ** (1.0 / 3))
def _hist_bin_scott(x):
"""
Scott histogram bin estimator.
The binwidth is proportional to the standard deviation of the data
and inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x)
def _hist_bin_doane(x):
"""
Doane's histogram bin estimator.
Improved version of Sturges' formula which works better for
non-normal data. See
stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
if x.size > 2:
sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
sigma = np.std(x)
if sigma > 0.0:
# These three operations add up to
# g1 = np.mean(((x - np.mean(x)) / sigma)**3)
# but use only one temp array instead of three
temp = x - np.mean(x)
np.true_divide(temp, sigma, temp)
np.power(temp, 3, temp)
g1 = np.mean(temp)
return x.ptp() / (1.0 + np.log2(x.size) +
np.log2(1.0 + np.absolute(g1) / sg1))
return 0.0
def _hist_bin_fd(x):
"""
The Freedman-Diaconis histogram bin estimator.
The Freedman-Diaconis rule uses interquartile range (IQR) to
estimate binwidth. It is considered a variation of the Scott rule
with more robustness as the IQR is less affected by outliers than
the standard deviation. However, the IQR depends on fewer points
than the standard deviation, so it is less accurate, especially for
long tailed distributions.
If the IQR is 0, this function returns 1 for the number of bins.
Binwidth is inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
iqr = np.subtract(*np.percentile(x, [75, 25]))
return 2.0 * iqr * x.size ** (-1.0 / 3.0)
def _hist_bin_auto(x):
"""
Histogram bin estimator that uses the minimum width of the
Freedman-Diaconis and Sturges estimators if the FD bandwidth is non zero
and the Sturges estimator if the FD bandwidth is 0.
The FD estimator is usually the most robust method, but its width
estimate tends to be too large for small `x` and bad for data with limited
variance. The Sturges estimator is quite good for small (<1000) datasets
and is the default in the R language. This method gives good off the shelf
behaviour.
.. versionchanged:: 1.15.0
If there is limited variance the IQR can be 0, which results in the
FD bin width being 0 too. This is not a valid bin width, so
``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal.
If the IQR is 0, it's unlikely any variance based estimators will be of
use, so we revert to the sturges estimator, which only uses the size of the
dataset in its calculation.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
See Also
--------
_hist_bin_fd, _hist_bin_sturges
"""
fd_bw = _hist_bin_fd(x)
sturges_bw = _hist_bin_sturges(x)
if fd_bw:
return min(fd_bw, sturges_bw)
else:
# limited variance, so we return a len dependent bw estimator
return sturges_bw
# Private dict initialized at module load time
_hist_bin_selectors = {'auto': _hist_bin_auto,
'doane': _hist_bin_doane,
'fd': _hist_bin_fd,
'rice': _hist_bin_rice,
'scott': _hist_bin_scott,
'sqrt': _hist_bin_sqrt,
'sturges': _hist_bin_sturges}
def _ravel_and_check_weights(a, weights):
""" Check a and weights have matching shapes, and ravel both """
a = np.asarray(a)
if weights is not None:
weights = np.asarray(weights)
if weights.shape != a.shape:
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
return a, weights
def _get_outer_edges(a, range):
"""
Determine the outer bin edges to use, from either the data or the range
argument
"""
if range is not None:
first_edge, last_edge = range
if first_edge > last_edge:
raise ValueError(
'max must be larger than min in range parameter.')
if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
raise ValueError(
"supplied range of [{}, {}] is not finite".format(first_edge, last_edge))
elif a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
first_edge, last_edge = 0, 1
else:
first_edge, last_edge = a.min(), a.max()
if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
raise ValueError(
"autodetected range of [{}, {}] is not finite".format(first_edge, last_edge))
# expand empty range to avoid divide by zero
if first_edge == last_edge:
first_edge = first_edge - 0.5
last_edge = last_edge + 0.5
return first_edge, last_edge
def _get_bin_edges(a, bins, range, weights):
"""
Computes the bins used internally by `histogram`.
Parameters
==========
a : ndarray
Ravelled data array
bins, range
Forwarded arguments from `histogram`.
weights : ndarray, optional
Ravelled weights array, or None
Returns
=======
bin_edges : ndarray
Array of bin edges
uniform_bins : (Number, Number, int):
The upper bound, lowerbound, and number of bins, used in the optimized
implementation of `histogram` that works on uniform bins.
"""
# parse the overloaded bins argument
n_equal_bins = None
bin_edges = None
if isinstance(bins, basestring):
bin_name = bins
# if `bins` is a string for an automatic method,
# this will replace it with the number of bins calculated
if bin_name not in _hist_bin_selectors:
raise ValueError(
"{!r} is not a valid estimator for `bins`".format(bin_name))
if weights is not None:
raise TypeError("Automated estimation of the number of "
"bins is not supported for weighted data")
first_edge, last_edge = _get_outer_edges(a, range)
# truncate the range if needed
if range is not None:
keep = (a >= first_edge)
keep &= (a <= last_edge)
if not np.logical_and.reduce(keep):
a = a[keep]
if a.size == 0:
n_equal_bins = 1
else:
# Do not call selectors on empty arrays
width = _hist_bin_selectors[bin_name](a)
if width:
n_equal_bins = int(np.ceil((last_edge - first_edge) / width))
else:
# Width can be zero for some estimators, e.g. FD when
# the IQR of the data is zero.
n_equal_bins = 1
elif np.ndim(bins) == 0:
try:
n_equal_bins = operator.index(bins)
except TypeError:
raise TypeError(
'`bins` must be an integer, a string, or an array')
if n_equal_bins < 1:
raise ValueError('`bins` must be positive, when an integer')
first_edge, last_edge = _get_outer_edges(a, range)
elif np.ndim(bins) == 1:
bin_edges = np.asarray(bins)
if np.any(bin_edges[:-1] > bin_edges[1:]):
raise ValueError(
'`bins` must increase monotonically, when an array')
else:
raise ValueError('`bins` must be 1d, when an array')
if n_equal_bins is not None:
# gh-10322 means that type resolution rules are dependent on array
# shapes. To avoid this causing problems, we pick a type now and stick
# with it throughout.
bin_type = np.result_type(first_edge, last_edge, a)
if np.issubdtype(bin_type, np.integer):
bin_type = np.result_type(bin_type, float)
# bin edges must be computed
bin_edges = np.linspace(
first_edge, last_edge, n_equal_bins + 1,
endpoint=True, dtype=bin_type)
return bin_edges, (first_edge, last_edge, n_equal_bins)
else:
return bin_edges, None
def _search_sorted_inclusive(a, v):
"""
Like `searchsorted`, but where the last item in `v` is placed on the right.
In the context of a histogram, this makes the last bin edge inclusive
"""
return np.concatenate((
a.searchsorted(v[:-1], 'left'),
a.searchsorted(v[-1:], 'right')
))
def histogram_bin_edges(a, bins=10, range=None, weights=None):
r"""
Function to calculate only the edges of the bins used by the `histogram` function.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines the bin edges, including the rightmost
edge, allowing for non-uniform bin widths.
If `bins` is a string from the list below, `histogram_bin_edges` will use
the method chosen to calculate the optimal bin width and
consequently the number of bins (see `Notes` for more detail on
the estimators) from the data that falls within the requested
range. While the bin width will be optimal for the actual data
in the range, the number of bins will be computed to fill the
entire range, including the empty portions. For visualisation,
using the 'auto' option is suggested. Weighted data is not
supported for automated bin size selection.
'auto'
Maximum of the 'sturges' and 'fd' estimators. Provides good
all around performance.
'fd' (Freedman Diaconis Estimator)
Robust (resilient to outliers) estimator that takes into
account data variability and data size.
'doane'
An improved version of Sturges' estimator that works better
with non-normal datasets.
'scott'
Less robust estimator that that takes into account data
variability and data size.
'rice'
Estimator does not take variability into account, only data
size. Commonly overestimates number of bins required.
'sturges'
R's default method, only accounts for data size. Only
optimal for gaussian data and underestimates number of bins
for large non-gaussian datasets.
'sqrt'
Square root (of data size) estimator, used by Excel and
other programs for its speed and simplicity.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. The first element of the range must be less than or
equal to the second. `range` affects the automatic bin
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
(instead of 1). This is currently not used by any of the bin estimators,
but may be in the future.
Returns
-------
bin_edges : array of dtype float
The edges to pass into `histogram`
See Also
--------
histogram
Notes
-----
The methods to estimate the optimal number of bins are well founded
in literature, and are inspired by the choices R provides for
histogram visualisation. Note that having the number of bins
proportional to :math:`n^{1/3}` is asymptotically optimal, which is
why it appears in most estimators. These are simply plug-in methods
that give good starting points for number of bins. In the equations
below, :math:`h` is the binwidth and :math:`n_h` is the number of
bins. All estimators that compute bin counts are recast to bin width
using the `ptp` of the data. The final bin count is obtained from
``np.round(np.ceil(range / h))``.
'Auto' (maximum of the 'Sturges' and 'FD' estimators)
A compromise to get a good value. For small datasets the Sturges
value will usually be chosen, while larger datasets will usually
default to FD. Avoids the overly conservative behaviour of FD
and Sturges for small and large datasets respectively.
Switchover point is usually :math:`a.size \approx 1000`.
'FD' (Freedman Diaconis Estimator)
.. math:: h = 2 \frac{IQR}{n^{1/3}}
The binwidth is proportional to the interquartile range (IQR)
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good for large
datasets. The IQR is very robust to outliers.
'Scott'
.. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}}
The binwidth is proportional to the standard deviation of the
data and inversely proportional to cube root of ``x.size``. Can
be too conservative for small datasets, but is quite good for
large datasets. The standard deviation is not very robust to
outliers. Values are very similar to the Freedman-Diaconis
estimator in the absence of outliers.
'Rice'
.. math:: n_h = 2n^{1/3}
The number of bins is only proportional to cube root of
``a.size``. It tends to overestimate the number of bins and it
does not take into account data variability.
'Sturges'
.. math:: n_h = \log _{2}n+1
The number of bins is the base 2 log of ``a.size``. This
estimator assumes normality of data and is too conservative for
larger, non-normal datasets. This is the default method in R's
``hist`` method.
'Doane'
.. math:: n_h = 1 + \log_{2}(n) +
\log_{2}(1 + \frac{|g_1|}{\sigma_{g_1}})
g_1 = mean[(\frac{x - \mu}{\sigma})^3]
\sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}}
An improved version of Sturges' formula that produces better
estimates for non-normal datasets. This estimator attempts to
account for the skew of the data.
'Sqrt'
.. math:: n_h = \sqrt n
The simplest and fastest estimator. Only takes into account the
data size.
Examples
--------
>>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5])
>>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1))
array([0. , 0.25, 0.5 , 0.75, 1. ])
>>> np.histogram_bin_edges(arr, bins=2)
array([0. , 2.5, 5. ])
For consistency with histogram, an array of pre-computed bins is
passed through unmodified:
>>> np.histogram_bin_edges(arr, [1, 2])
array([1, 2])
This function allows one set of bins to be computed, and reused across
multiple histograms:
>>> shared_bins = np.histogram_bin_edges(arr, bins='auto')
>>> shared_bins
array([0., 1., 2., 3., 4., 5.])
>>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1])
>>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins)
>>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins)
>>> hist_0; hist_1
array([1, 1, 0, 1, 0])
array([2, 0, 1, 1, 2])
Which gives more easily comparable results than using separate bins for
each histogram:
>>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto')
>>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto')
>>> hist_0; hist1
array([1, 1, 1])
array([2, 1, 1, 2])
>>> bins_0; bins_1
array([0., 1., 2., 3.])
array([0. , 1.25, 2.5 , 3.75, 5. ])
"""
a, weights = _ravel_and_check_weights(a, weights)
bin_edges, _ = _get_bin_edges(a, bins, range, weights)
return bin_edges
def histogram(a, bins=10, range=None, normed=False, weights=None,
density=None):
r"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines the bin edges, including the rightmost
edge, allowing for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string, it defines the method used to calculate the
optimal bin width, as defined by `histogram_bin_edges`.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. The first element of the range must be less than or
equal to the second. `range` affects the automatic bin
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
normed : bool, optional
.. deprecated:: 1.6.0
This keyword is deprecated in NumPy 1.6.0 due to confusing/buggy
behavior. It will be removed in NumPy 2.0.0. Use the ``density``
keyword instead. If ``False``, the result will contain the
number of samples in each bin. If ``True``, the result is the
value of the probability *density* function at the bin,
normalized such that the *integral* over the range is 1. Note
that this latter behavior is known to be buggy with unequal bin
widths; use ``density`` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
(instead of 1). If `density` is True, the weights are
normalized, so that the integral of the density over the range
remains 1.
density : bool, optional
If ``False``, the result will contain the number of samples in
each bin. If ``True``, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the ``normed`` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `density` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize, histogram_bin_edges
Notes
-----
All but the last (righthand-most) bin is half-open. In other words,
if `bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which
*includes* 4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist * np.diff(bin_edges))
1.0
.. versionadded:: 1.11.0
Automated Bin Selection Methods example, using 2 peak random data
with 2000 points:
>>> import matplotlib.pyplot as plt
>>> rng = np.random.RandomState(10) # deterministic random data
>>> a = np.hstack((rng.normal(size=1000),
... rng.normal(loc=5, scale=2, size=1000)))
>>> plt.hist(a, bins='auto') # arguments are passed to np.histogram
>>> plt.title("Histogram with 'auto' bins")
>>> plt.show()
"""
a, weights = _ravel_and_check_weights(a, weights)
bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights)
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = np.dtype(np.intp)
else:
ntype = weights.dtype
# We set a block size, as this allows us to iterate over chunks when
# computing histograms, to minimize memory usage.
BLOCK = 65536
# The fast path uses bincount, but that only works for certain types
# of weight
simple_weights = (
weights is None or
np.can_cast(weights.dtype, np.double) or
np.can_cast(weights.dtype, complex)
)
if uniform_bins is not None and simple_weights:
# Fast algorithm for equal bins
# We now convert values of a to bin indices, under the assumption of
# equal bin widths (which is valid here).
first_edge, last_edge, n_equal_bins = uniform_bins
# Initialize empty histogram
n = np.zeros(n_equal_bins, ntype)
# Pre-compute histogram scaling factor
norm = n_equal_bins / (last_edge - first_edge)
# We iterate over blocks here for two reasons: the first is that for
# large arrays, it is actually faster (for example for a 10^8 array it
# is 2x as fast) and it results in a memory footprint 3x lower in the
# limit of large arrays.
for i in _range(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
if weights is None:
tmp_w = None
else:
tmp_w = weights[i:i + BLOCK]
# Only include values in the right range
keep = (tmp_a >= first_edge)
keep &= (tmp_a <= last_edge)
if not np.logical_and.reduce(keep):
tmp_a = tmp_a[keep]
if tmp_w is not None:
tmp_w = tmp_w[keep]
# This cast ensures no type promotions occur below, which gh-10322
# make unpredictable. Getting it wrong leads to precision errors
# like gh-8123.
tmp_a = tmp_a.astype(bin_edges.dtype, copy=False)
# Compute the bin indices, and for values that lie exactly on
# last_edge we need to subtract one
f_indices = (tmp_a - first_edge) * norm
indices = f_indices.astype(np.intp)
indices[indices == n_equal_bins] -= 1
# The index computation is not guaranteed to give exactly
# consistent results within ~1 ULP of the bin edges.
decrement = tmp_a < bin_edges[indices]
indices[decrement] -= 1
# The last bin includes the right edge. The other bins do not.
increment = ((tmp_a >= bin_edges[indices + 1])
& (indices != n_equal_bins - 1))
indices[increment] += 1
# We now compute the histogram using bincount
if ntype.kind == 'c':
n.real += np.bincount(indices, weights=tmp_w.real,
minlength=n_equal_bins)
n.imag += np.bincount(indices, weights=tmp_w.imag,
minlength=n_equal_bins)
else:
n += np.bincount(indices, weights=tmp_w,
minlength=n_equal_bins).astype(ntype)
else:
# Compute via cumulative histogram
cum_n = np.zeros(bin_edges.shape, ntype)
if weights is None:
for i in _range(0, len(a), BLOCK):
sa = np.sort(a[i:i+BLOCK])
cum_n += _search_sorted_inclusive(sa, bin_edges)
else:
zero = np.zeros(1, dtype=ntype)
for i in _range(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
tmp_w = weights[i:i+BLOCK]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate((zero, sw.cumsum()))
bin_index = _search_sorted_inclusive(sa, bin_edges)
cum_n += cw[bin_index]
n = np.diff(cum_n)
# density overrides the normed keyword
if density is not None:
normed = False
if density:
db = np.array(np.diff(bin_edges), float)
return n/db/n.sum(), bin_edges
elif normed:
# deprecated, buggy behavior. Remove for NumPy 2.0.0
db = np.array(np.diff(bin_edges), float)
return n/(n*db).sum(), bin_edges
else:
return n, bin_edges
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : (N, D) array, or (D, N) array_like
The data to be histogrammed.
Note the unusual interpretation of sample when an array_like:
* When an array, each row is a coordinate in a D-dimensional space -
such as ``histogramgramdd(np.array([p1, p2, p3]))``.
* When an array_like, each element is the list of values for single
coordinate - such as ``histogramgramdd((X, Y, Z))``.
The first form should be preferred.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of length D, each an optional (lower, upper) tuple giving
the outer bin edges to be used if the edges are not given explicitly in
`bins`.
An entry of None in the sequence results in the minimum and maximum
values being used for the corresponding dimension.
The default, None, is equivalent to passing a tuple of D None values.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_volume``.
weights : (N,) array_like, optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
N, D = sample.shape
nbin = np.empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = np.asarray(weights)
try:
M = len(bins)
if M != D:
raise ValueError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# avoid rounding issues for comparisons when dealing with inexact types
if np.issubdtype(sample.dtype, np.inexact):
edge_dt = sample.dtype
else:
edge_dt = float
# normalize the range argument
if range is None:
range = (None,) * D
elif len(range) != D:
raise ValueError('range argument must have one entry per dimension')
# Create edge arrays
for i in _range(D):
if np.ndim(bins[i]) == 0:
if bins[i] < 1:
raise ValueError(
'`bins[{}]` must be positive, when an integer'.format(i))
smin, smax = _get_outer_edges(sample[:,i], range[i])
edges[i] = np.linspace(smin, smax, bins[i] + 1, dtype=edge_dt)
elif np.ndim(bins[i]) == 1:
edges[i] = np.asarray(bins[i], edge_dt)
# not just monotonic, due to the use of mindiff below
if np.any(edges[i][:-1] >= edges[i][1:]):
raise ValueError(
'`bins[{}]` must be strictly increasing, when an array'
.format(i))
else:
raise ValueError(
'`bins[{}]` must be a scalar or 1d array'.format(i))
nbin[i] = len(edges[i]) + 1 # includes an outlier on each end
dedges[i] = np.diff(edges[i])
# Handle empty input.
if N == 0:
return np.zeros(nbin-2), edges
# Compute the bin number each sample falls into.
Ncount = tuple(
np.digitize(sample[:, i], edges[i])
for i in _range(D)
)
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in _range(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-np.log10(mindiff)) + 6
# Find which points are on the rightmost edge.
not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
on_edge = (np.around(sample[:, i], decimal) ==
np.around(edges[i][-1], decimal))
# Shift these points one bin to the left.
Ncount[i][on_edge & not_smaller_than_edge] -= 1
# Compute the sample indices in the flattened histogram matrix.
# This raises an error if the array is too large.
xy = np.ravel_multi_index(Ncount, nbin)
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
hist = np.bincount(xy, weights, minlength=nbin.prod())
# Shape into a proper matrix
hist = hist.reshape(nbin)
# This preserves the (bad) behavior observed in gh-7845, for now.
hist = hist.astype(float, casting='safe')
# Remove outliers (indices 0 and -1 for each dimension).
core = D*(slice(1, -1),)
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in _range(D):
shape = np.ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
| bsd-3-clause | -3,011,626,326,053,294,000 | 34.684211 | 93 | 0.596449 | false |
pszemus/grpc | tools/run_tests/run_interop_tests.py | 1 | 57575 | #!/usr/bin/env python
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run interop (cross-language) tests in parallel."""
from __future__ import print_function
import argparse
import atexit
import itertools
import json
import multiprocessing
import os
import re
import subprocess
import sys
import tempfile
import time
import uuid
import six
import traceback
import python_utils.dockerjob as dockerjob
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
# It's ok to not import because this is only necessary to upload results to BQ.
try:
from python_utils.upload_test_results import upload_interop_results_to_bq
except ImportError as e:
print(e)
# Docker doesn't clean up after itself, so we do it on exit.
atexit.register(lambda: subprocess.call(['stty', 'echo']))
ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(ROOT)
_DEFAULT_SERVER_PORT = 8080
_SKIP_CLIENT_COMPRESSION = [
'client_compressed_unary', 'client_compressed_streaming'
]
_SKIP_SERVER_COMPRESSION = [
'server_compressed_unary', 'server_compressed_streaming'
]
_SKIP_COMPRESSION = _SKIP_CLIENT_COMPRESSION + _SKIP_SERVER_COMPRESSION
_SKIP_ADVANCED = [
'status_code_and_message', 'custom_metadata', 'unimplemented_method',
'unimplemented_service'
]
_SKIP_SPECIAL_STATUS_MESSAGE = ['special_status_message']
_GOOGLE_DEFAULT_CREDS_TEST_CASE = 'google_default_credentials'
_SKIP_GOOGLE_DEFAULT_CREDS = [
_GOOGLE_DEFAULT_CREDS_TEST_CASE,
]
_COMPUTE_ENGINE_CHANNEL_CREDS_TEST_CASE = 'compute_engine_channel_credentials'
_SKIP_COMPUTE_ENGINE_CHANNEL_CREDS = [
_COMPUTE_ENGINE_CHANNEL_CREDS_TEST_CASE,
]
_TEST_TIMEOUT = 3 * 60
# disable this test on core-based languages,
# see https://github.com/grpc/grpc/issues/9779
_SKIP_DATA_FRAME_PADDING = ['data_frame_padding']
# report suffix "sponge_log.xml" is important for reports to get picked up by internal CI
_DOCKER_BUILD_XML_REPORT = 'interop_docker_build/sponge_log.xml'
_TESTS_XML_REPORT = 'interop_test/sponge_log.xml'
class CXXLanguage:
def __init__(self):
self.client_cwd = None
self.server_cwd = None
self.http2_cwd = None
self.safename = 'cxx'
def client_cmd(self, args):
return ['bins/opt/interop_client'] + args
def client_cmd_http2interop(self, args):
return ['bins/opt/http2_client'] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ['bins/opt/interop_server'] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_DATA_FRAME_PADDING + \
_SKIP_SPECIAL_STATUS_MESSAGE + \
_SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
def unimplemented_test_cases_server(self):
return []
def __str__(self):
return 'c++'
class CSharpLanguage:
def __init__(self):
self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug/net45'
self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/net45'
self.safename = str(self)
def client_cmd(self, args):
return ['mono', 'Grpc.IntegrationTesting.Client.exe'] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ['mono', 'Grpc.IntegrationTesting.Server.exe'] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_SERVER_COMPRESSION + \
_SKIP_DATA_FRAME_PADDING + \
_SKIP_GOOGLE_DEFAULT_CREDS + \
_SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
def __str__(self):
return 'csharp'
class CSharpCoreCLRLanguage:
def __init__(self):
self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug/netcoreapp2.1'
self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/netcoreapp2.1'
self.safename = str(self)
def client_cmd(self, args):
return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Client.dll'] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Server.dll'] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_SERVER_COMPRESSION + \
_SKIP_DATA_FRAME_PADDING + \
_SKIP_GOOGLE_DEFAULT_CREDS + \
_SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
def __str__(self):
return 'csharpcoreclr'
class AspNetCoreLanguage:
def __init__(self):
self.client_cwd = '../grpc-dotnet/testassets/InteropTestsClient/bin/Debug/netcoreapp3.0'
self.server_cwd = '../grpc-dotnet/testassets/InteropTestsWebsite/bin/Debug/netcoreapp3.0'
self.safename = str(self)
def cloud_to_prod_env(self):
return {}
def client_cmd(self, args):
return ['dotnet', 'exec', 'InteropTestsClient.dll'] + args
def server_cmd(self, args):
return ['dotnet', 'exec', 'InteropTestsWebsite.dll'] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_GOOGLE_DEFAULT_CREDS + \
_SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
def unimplemented_test_cases_server(self):
return []
def __str__(self):
return 'aspnetcore'
class DartLanguage:
def __init__(self):
self.client_cwd = '../grpc-dart/interop'
self.server_cwd = '../grpc-dart/interop'
self.http2_cwd = '../grpc-dart/interop'
self.safename = str(self)
def client_cmd(self, args):
return ['dart', 'bin/client.dart'] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ['dart', 'bin/server.dart'] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_COMPRESSION + \
_SKIP_SPECIAL_STATUS_MESSAGE + \
_SKIP_GOOGLE_DEFAULT_CREDS + \
_SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION + _SKIP_SPECIAL_STATUS_MESSAGE
def __str__(self):
return 'dart'
class JavaLanguage:
def __init__(self):
self.client_cwd = '../grpc-java'
self.server_cwd = '../grpc-java'
self.http2_cwd = '../grpc-java'
self.safename = str(self)
def client_cmd(self, args):
return ['./run-test-client.sh'] + args
def client_cmd_http2interop(self, args):
return [
'./interop-testing/build/install/grpc-interop-testing/bin/http2-client'
] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ['./run-test-server.sh'] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return []
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
def __str__(self):
return 'java'
class JavaOkHttpClient:
def __init__(self):
self.client_cwd = '../grpc-java'
self.safename = 'java'
def client_cmd(self, args):
return ['./run-test-client.sh', '--use_okhttp=true'] + args
def cloud_to_prod_env(self):
return {}
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_DATA_FRAME_PADDING + _SKIP_SPECIAL_STATUS_MESSAGE
def __str__(self):
return 'javaokhttp'
class GoLanguage:
def __init__(self):
# TODO: this relies on running inside docker
self.client_cwd = '/go/src/google.golang.org/grpc/interop/client'
self.server_cwd = '/go/src/google.golang.org/grpc/interop/server'
self.http2_cwd = '/go/src/google.golang.org/grpc/interop/http2'
self.safename = str(self)
def client_cmd(self, args):
return ['go', 'run', 'client.go'] + args
def client_cmd_http2interop(self, args):
return ['go', 'run', 'negative_http2_client.go'] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ['go', 'run', 'server.go'] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_COMPRESSION
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
def __str__(self):
return 'go'
class Http2Server:
"""Represents the HTTP/2 Interop Test server
This pretends to be a language in order to be built and run, but really it
isn't.
"""
def __init__(self):
self.server_cwd = None
self.safename = str(self)
def server_cmd(self, args):
return ['python test/http2_test/http2_test_server.py']
def cloud_to_prod_env(self):
return {}
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _TEST_CASES + \
_SKIP_DATA_FRAME_PADDING + \
_SKIP_SPECIAL_STATUS_MESSAGE + \
_SKIP_GOOGLE_DEFAULT_CREDS + \
_SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
def unimplemented_test_cases_server(self):
return _TEST_CASES
def __str__(self):
return 'http2'
class Http2Client:
"""Represents the HTTP/2 Interop Test
This pretends to be a language in order to be built and run, but really it
isn't.
"""
def __init__(self):
self.client_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return ['tools/http2_interop/http2_interop.test', '-test.v'] + args
def cloud_to_prod_env(self):
return {}
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _TEST_CASES + \
_SKIP_SPECIAL_STATUS_MESSAGE + \
_SKIP_GOOGLE_DEFAULT_CREDS + \
_SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
def unimplemented_test_cases_server(self):
return _TEST_CASES
def __str__(self):
return 'http2'
class NodeLanguage:
def __init__(self):
self.client_cwd = '../grpc-node'
self.server_cwd = '../grpc-node'
self.safename = str(self)
def client_cmd(self, args):
return [
'packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh',
'node', '--require', './test/fixtures/native_native',
'test/interop/interop_client.js'
] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return [
'packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh',
'node', '--require', './test/fixtures/native_native',
'test/interop/interop_server.js'
] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_COMPRESSION + \
_SKIP_DATA_FRAME_PADDING + \
_SKIP_GOOGLE_DEFAULT_CREDS + \
_SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
def __str__(self):
return 'node'
class NodePureJSLanguage:
def __init__(self):
self.client_cwd = '../grpc-node'
self.server_cwd = '../grpc-node'
self.safename = str(self)
def client_cmd(self, args):
return [
'packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh',
'node', '--require', './test/fixtures/js_js',
'test/interop/interop_client.js'
] + args
def cloud_to_prod_env(self):
return {}
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_COMPRESSION + \
_SKIP_DATA_FRAME_PADDING + \
_SKIP_GOOGLE_DEFAULT_CREDS + \
_SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
def unimplemented_test_cases_server(self):
return []
def __str__(self):
return 'nodepurejs'
class PHPLanguage:
def __init__(self):
self.client_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return ['src/php/bin/interop_client.sh'] + args
def cloud_to_prod_env(self):
return {}
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_COMPRESSION + \
_SKIP_DATA_FRAME_PADDING + \
_SKIP_SPECIAL_STATUS_MESSAGE + \
_SKIP_GOOGLE_DEFAULT_CREDS + \
_SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
def unimplemented_test_cases_server(self):
return []
def __str__(self):
return 'php'
class PHP7Language:
def __init__(self):
self.client_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return ['src/php/bin/interop_client.sh'] + args
def cloud_to_prod_env(self):
return {}
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_COMPRESSION + \
_SKIP_DATA_FRAME_PADDING + \
_SKIP_SPECIAL_STATUS_MESSAGE + \
_SKIP_GOOGLE_DEFAULT_CREDS + \
_SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
def unimplemented_test_cases_server(self):
return []
def __str__(self):
return 'php7'
class ObjcLanguage:
def __init__(self):
self.client_cwd = 'src/objective-c/tests'
self.safename = str(self)
def client_cmd(self, args):
# from args, extract the server port and craft xcodebuild command out of it
for arg in args:
port = re.search('--server_port=(\d+)', arg)
if port:
portnum = port.group(1)
cmdline = 'pod install && xcodebuild -workspace Tests.xcworkspace -scheme InteropTestsLocalSSL -destination name="iPhone 6" HOST_PORT_LOCALSSL=localhost:%s test' % portnum
return [cmdline]
def cloud_to_prod_env(self):
return {}
def global_env(self):
return {}
def unimplemented_test_cases(self):
# ObjC test runs all cases with the same command. It ignores the testcase
# cmdline argument. Here we return all but one test cases as unimplemented,
# and depend upon ObjC test's behavior that it runs all cases even when
# we tell it to run just one.
return _TEST_CASES[1:] + \
_SKIP_COMPRESSION + \
_SKIP_DATA_FRAME_PADDING + \
_SKIP_SPECIAL_STATUS_MESSAGE + \
_SKIP_GOOGLE_DEFAULT_CREDS + \
_SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
def __str__(self):
return 'objc'
class RubyLanguage:
def __init__(self):
self.client_cwd = None
self.server_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return [
'tools/run_tests/interop/with_rvm.sh', 'ruby',
'src/ruby/pb/test/client.rb'
] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return [
'tools/run_tests/interop/with_rvm.sh', 'ruby',
'src/ruby/pb/test/server.rb'
] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_SERVER_COMPRESSION + \
_SKIP_DATA_FRAME_PADDING + \
_SKIP_SPECIAL_STATUS_MESSAGE + \
_SKIP_GOOGLE_DEFAULT_CREDS + \
_SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
def __str__(self):
return 'ruby'
class PythonLanguage:
def __init__(self):
self.client_cwd = None
self.server_cwd = None
self.http2_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return [
'py37_native/bin/python', 'src/python/grpcio_tests/setup.py',
'run_interop', '--client', '--args="{}"'.format(' '.join(args))
]
def client_cmd_http2interop(self, args):
return [
'py37_native/bin/python',
'src/python/grpcio_tests/tests/http2/negative_http2_client.py',
] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return [
'py37_native/bin/python', 'src/python/grpcio_tests/setup.py',
'run_interop', '--server', '--args="{}"'.format(' '.join(args))
]
def global_env(self):
return {
'LD_LIBRARY_PATH': '{}/libs/opt'.format(DOCKER_WORKDIR_ROOT),
'PYTHONPATH': '{}/src/python/gens'.format(DOCKER_WORKDIR_ROOT)
}
def unimplemented_test_cases(self):
return _SKIP_COMPRESSION + \
_SKIP_DATA_FRAME_PADDING + \
_SKIP_GOOGLE_DEFAULT_CREDS + \
_SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
def __str__(self):
return 'python'
_LANGUAGES = {
'c++': CXXLanguage(),
'csharp': CSharpLanguage(),
'csharpcoreclr': CSharpCoreCLRLanguage(),
'aspnetcore': AspNetCoreLanguage(),
'dart': DartLanguage(),
'go': GoLanguage(),
'java': JavaLanguage(),
'javaokhttp': JavaOkHttpClient(),
'node': NodeLanguage(),
'nodepurejs': NodePureJSLanguage(),
'php': PHPLanguage(),
'php7': PHP7Language(),
'objc': ObjcLanguage(),
'ruby': RubyLanguage(),
'python': PythonLanguage(),
}
# languages supported as cloud_to_cloud servers
_SERVERS = [
'c++', 'node', 'csharp', 'csharpcoreclr', 'aspnetcore', 'java', 'go',
'ruby', 'python', 'dart'
]
_TEST_CASES = [
'large_unary', 'empty_unary', 'ping_pong', 'empty_stream',
'client_streaming', 'server_streaming', 'cancel_after_begin',
'cancel_after_first_response', 'timeout_on_sleeping_server',
'custom_metadata', 'status_code_and_message', 'unimplemented_method',
'client_compressed_unary', 'server_compressed_unary',
'client_compressed_streaming', 'server_compressed_streaming',
'unimplemented_service', 'special_status_message'
]
_AUTH_TEST_CASES = [
'compute_engine_creds',
'jwt_token_creds',
'oauth2_auth_token',
'per_rpc_creds',
_GOOGLE_DEFAULT_CREDS_TEST_CASE,
_COMPUTE_ENGINE_CHANNEL_CREDS_TEST_CASE,
]
_HTTP2_TEST_CASES = ['tls', 'framing']
_HTTP2_SERVER_TEST_CASES = [
'rst_after_header', 'rst_after_data', 'rst_during_data', 'goaway', 'ping',
'max_streams', 'data_frame_padding', 'no_df_padding_sanity_test'
]
_GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES = {
'data_frame_padding': 'large_unary',
'no_df_padding_sanity_test': 'large_unary'
}
_HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES.keys(
)
_LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES = [
'java', 'go', 'python', 'c++'
]
_LANGUAGES_FOR_ALTS_TEST_CASES = ['java', 'go', 'c++']
_SERVERS_FOR_ALTS_TEST_CASES = ['java', 'go', 'c++']
_TRANSPORT_SECURITY_OPTIONS = ['tls', 'alts', 'insecure']
_CUSTOM_CREDENTIALS_TYPE_OPTIONS = [
'tls', 'google_default_credentials', 'compute_engine_channel_creds'
]
DOCKER_WORKDIR_ROOT = '/var/local/git/grpc'
def docker_run_cmdline(cmdline, image, docker_args=[], cwd=None, environ=None):
"""Wraps given cmdline array to create 'docker run' cmdline from it."""
docker_cmdline = ['docker', 'run', '-i', '--rm=true']
# turn environ into -e docker args
if environ:
for k, v in environ.items():
docker_cmdline += ['-e', '%s=%s' % (k, v)]
# set working directory
workdir = DOCKER_WORKDIR_ROOT
if cwd:
workdir = os.path.join(workdir, cwd)
docker_cmdline += ['-w', workdir]
docker_cmdline += docker_args + [image] + cmdline
return docker_cmdline
def manual_cmdline(docker_cmdline, docker_image):
"""Returns docker cmdline adjusted for manual invocation."""
print_cmdline = []
for item in docker_cmdline:
if item.startswith('--name='):
continue
if item == docker_image:
item = "$docker_image"
item = item.replace('"', '\\"')
# add quotes when necessary
if any(character.isspace() for character in item):
item = "\"%s\"" % item
print_cmdline.append(item)
return ' '.join(print_cmdline)
def write_cmdlog_maybe(cmdlog, filename):
"""Returns docker cmdline adjusted for manual invocation."""
if cmdlog:
with open(filename, 'w') as logfile:
logfile.write('#!/bin/bash\n')
logfile.write('# DO NOT MODIFY\n')
logfile.write(
'# This file is generated by run_interop_tests.py/create_testcases.sh\n'
)
logfile.writelines("%s\n" % line for line in cmdlog)
print('Command log written to file %s' % filename)
def bash_cmdline(cmdline):
"""Creates bash -c cmdline from args list."""
# Use login shell:
# * makes error messages clearer if executables are missing
return ['bash', '-c', ' '.join(cmdline)]
def compute_engine_creds_required(language, test_case):
"""Returns True if given test requires access to compute engine creds."""
language = str(language)
if test_case == 'compute_engine_creds':
return True
if test_case == 'oauth2_auth_token' and language == 'c++':
# C++ oauth2 test uses GCE creds because C++ only supports JWT
return True
return False
def auth_options(language, test_case, google_default_creds_use_key_file,
service_account_key_file, default_service_account):
"""Returns (cmdline, env) tuple with cloud_to_prod_auth test options."""
language = str(language)
cmdargs = []
env = {}
oauth_scope_arg = '--oauth_scope=https://www.googleapis.com/auth/xapi.zoo'
key_file_arg = '--service_account_key_file=%s' % service_account_key_file
default_account_arg = '--default_service_account=%s' % default_service_account
if test_case in ['jwt_token_creds', 'per_rpc_creds', 'oauth2_auth_token']:
if language in [
'csharp', 'csharpcoreclr', 'aspnetcore', 'node', 'php', 'php7',
'python', 'ruby', 'nodepurejs'
]:
env['GOOGLE_APPLICATION_CREDENTIALS'] = service_account_key_file
else:
cmdargs += [key_file_arg]
if test_case in ['per_rpc_creds', 'oauth2_auth_token']:
cmdargs += [oauth_scope_arg]
if test_case == 'oauth2_auth_token' and language == 'c++':
# C++ oauth2 test uses GCE creds and thus needs to know the default account
cmdargs += [default_account_arg]
if test_case == 'compute_engine_creds':
cmdargs += [oauth_scope_arg, default_account_arg]
if test_case == _GOOGLE_DEFAULT_CREDS_TEST_CASE:
if google_default_creds_use_key_file:
env['GOOGLE_APPLICATION_CREDENTIALS'] = service_account_key_file
cmdargs += [default_account_arg]
if test_case == _COMPUTE_ENGINE_CHANNEL_CREDS_TEST_CASE:
cmdargs += [default_account_arg]
return (cmdargs, env)
def _job_kill_handler(job):
if job._spec.container_name:
dockerjob.docker_kill(job._spec.container_name)
# When the job times out and we decide to kill it,
# we need to wait a before restarting the job
# to prevent "container name already in use" error.
# TODO(jtattermusch): figure out a cleaner way to this.
time.sleep(2)
def cloud_to_prod_jobspec(language,
test_case,
server_host_nickname,
server_host,
google_default_creds_use_key_file,
docker_image=None,
auth=False,
manual_cmd_log=None,
service_account_key_file=None,
default_service_account=None,
transport_security='tls'):
"""Creates jobspec for cloud-to-prod interop test"""
container_name = None
cmdargs = [
'--server_host=%s' % server_host, '--server_port=443',
'--test_case=%s' % test_case
]
if transport_security == 'tls':
transport_security_options = ['--use_tls=true']
elif transport_security == 'google_default_credentials' and str(
language) in ['c++', 'go', 'java', 'javaokhttp']:
transport_security_options = [
'--custom_credentials_type=google_default_credentials'
]
elif transport_security == 'compute_engine_channel_creds' and str(
language) in ['go', 'java', 'javaokhttp']:
transport_security_options = [
'--custom_credentials_type=compute_engine_channel_creds'
]
else:
print(
'Invalid transport security option %s in cloud_to_prod_jobspec. Lang: %s'
% (str(language), transport_security))
sys.exit(1)
cmdargs = cmdargs + transport_security_options
environ = dict(language.cloud_to_prod_env(), **language.global_env())
if auth:
auth_cmdargs, auth_env = auth_options(
language, test_case, google_default_creds_use_key_file,
service_account_key_file, default_service_account)
cmdargs += auth_cmdargs
environ.update(auth_env)
cmdline = bash_cmdline(language.client_cmd(cmdargs))
cwd = language.client_cwd
if docker_image:
container_name = dockerjob.random_name(
'interop_client_%s' % language.safename)
cmdline = docker_run_cmdline(
cmdline,
image=docker_image,
cwd=cwd,
environ=environ,
docker_args=['--net=host',
'--name=%s' % container_name])
if manual_cmd_log is not None:
if manual_cmd_log == []:
manual_cmd_log.append(
'echo "Testing ${docker_image:=%s}"' % docker_image)
manual_cmd_log.append(manual_cmdline(cmdline, docker_image))
cwd = None
environ = None
suite_name = 'cloud_to_prod_auth' if auth else 'cloud_to_prod'
test_job = jobset.JobSpec(
cmdline=cmdline,
cwd=cwd,
environ=environ,
shortname='%s:%s:%s:%s:%s' %
(suite_name, language, server_host_nickname, test_case,
transport_security),
timeout_seconds=_TEST_TIMEOUT,
flake_retries=4 if args.allow_flakes else 0,
timeout_retries=2 if args.allow_flakes else 0,
kill_handler=_job_kill_handler)
if docker_image:
test_job.container_name = container_name
return test_job
def cloud_to_cloud_jobspec(language,
test_case,
server_name,
server_host,
server_port,
docker_image=None,
transport_security='tls',
manual_cmd_log=None):
"""Creates jobspec for cloud-to-cloud interop test"""
interop_only_options = [
'--server_host_override=foo.test.google.fr',
'--use_test_ca=true',
]
if transport_security == 'tls':
interop_only_options += ['--use_tls=true']
elif transport_security == 'alts':
interop_only_options += ['--use_tls=false', '--use_alts=true']
elif transport_security == 'insecure':
interop_only_options += ['--use_tls=false']
else:
print('Invalid transport security option %s in cloud_to_cloud_jobspec.'
% transport_security)
sys.exit(1)
client_test_case = test_case
if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
client_test_case = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES[
test_case]
if client_test_case in language.unimplemented_test_cases():
print('asking client %s to run unimplemented test case %s' %
(repr(language), client_test_case))
sys.exit(1)
common_options = [
'--test_case=%s' % client_test_case,
'--server_host=%s' % server_host,
'--server_port=%s' % server_port,
]
if test_case in _HTTP2_SERVER_TEST_CASES:
if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
client_options = interop_only_options + common_options
cmdline = bash_cmdline(language.client_cmd(client_options))
cwd = language.client_cwd
else:
cmdline = bash_cmdline(
language.client_cmd_http2interop(common_options))
cwd = language.http2_cwd
else:
cmdline = bash_cmdline(
language.client_cmd(common_options + interop_only_options))
cwd = language.client_cwd
environ = language.global_env()
if docker_image and language.safename != 'objc':
# we can't run client in docker for objc.
container_name = dockerjob.random_name(
'interop_client_%s' % language.safename)
cmdline = docker_run_cmdline(
cmdline,
image=docker_image,
environ=environ,
cwd=cwd,
docker_args=['--net=host',
'--name=%s' % container_name])
if manual_cmd_log is not None:
if manual_cmd_log == []:
manual_cmd_log.append(
'echo "Testing ${docker_image:=%s}"' % docker_image)
manual_cmd_log.append(manual_cmdline(cmdline, docker_image))
cwd = None
test_job = jobset.JobSpec(
cmdline=cmdline,
cwd=cwd,
environ=environ,
shortname='cloud_to_cloud:%s:%s_server:%s:%s' %
(language, server_name, test_case, transport_security),
timeout_seconds=_TEST_TIMEOUT,
flake_retries=4 if args.allow_flakes else 0,
timeout_retries=2 if args.allow_flakes else 0,
kill_handler=_job_kill_handler)
if docker_image:
test_job.container_name = container_name
return test_job
def server_jobspec(language,
docker_image,
transport_security='tls',
manual_cmd_log=None):
"""Create jobspec for running a server"""
container_name = dockerjob.random_name(
'interop_server_%s' % language.safename)
server_cmd = ['--port=%s' % _DEFAULT_SERVER_PORT]
if transport_security == 'tls':
server_cmd += ['--use_tls=true']
elif transport_security == 'alts':
server_cmd += ['--use_tls=false', '--use_alts=true']
elif transport_security == 'insecure':
server_cmd += ['--use_tls=false']
else:
print('Invalid transport security option %s in server_jobspec.' %
transport_security)
sys.exit(1)
cmdline = bash_cmdline(language.server_cmd(server_cmd))
environ = language.global_env()
docker_args = ['--name=%s' % container_name]
if language.safename == 'http2':
# we are running the http2 interop server. Open next N ports beginning
# with the server port. These ports are used for http2 interop test
# (one test case per port).
docker_args += list(
itertools.chain.from_iterable(
('-p', str(_DEFAULT_SERVER_PORT + i))
for i in range(len(_HTTP2_SERVER_TEST_CASES))))
# Enable docker's healthcheck mechanism.
# This runs a Python script inside the container every second. The script
# pings the http2 server to verify it is ready. The 'health-retries' flag
# specifies the number of consecutive failures before docker will report
# the container's status as 'unhealthy'. Prior to the first 'health_retries'
# failures or the first success, the status will be 'starting'. 'docker ps'
# or 'docker inspect' can be used to see the health of the container on the
# command line.
docker_args += [
'--health-cmd=python test/http2_test/http2_server_health_check.py '
'--server_host=%s --server_port=%d' % ('localhost',
_DEFAULT_SERVER_PORT),
'--health-interval=1s',
'--health-retries=5',
'--health-timeout=10s',
]
else:
docker_args += ['-p', str(_DEFAULT_SERVER_PORT)]
docker_cmdline = docker_run_cmdline(
cmdline,
image=docker_image,
cwd=language.server_cwd,
environ=environ,
docker_args=docker_args)
if manual_cmd_log is not None:
if manual_cmd_log == []:
manual_cmd_log.append(
'echo "Testing ${docker_image:=%s}"' % docker_image)
manual_cmd_log.append(manual_cmdline(docker_cmdline, docker_image))
server_job = jobset.JobSpec(
cmdline=docker_cmdline,
environ=environ,
shortname='interop_server_%s' % language,
timeout_seconds=30 * 60)
server_job.container_name = container_name
return server_job
def build_interop_image_jobspec(language, tag=None):
"""Creates jobspec for building interop docker image for a language"""
if not tag:
tag = 'grpc_interop_%s:%s' % (language.safename, uuid.uuid4())
env = {
'INTEROP_IMAGE': tag,
'BASE_NAME': 'grpc_interop_%s' % language.safename
}
if not args.travis:
env['TTY_FLAG'] = '-t'
# This env variable is used to get around the github rate limit
# error when running the PHP `composer install` command
host_file = '%s/.composer/auth.json' % os.environ['HOME']
if language.safename == 'php' and os.path.exists(host_file):
env['BUILD_INTEROP_DOCKER_EXTRA_ARGS'] = \
'-v %s:/root/.composer/auth.json:ro' % host_file
build_job = jobset.JobSpec(
cmdline=['tools/run_tests/dockerize/build_interop_image.sh'],
environ=env,
shortname='build_docker_%s' % (language),
timeout_seconds=30 * 60)
build_job.tag = tag
return build_job
def aggregate_http2_results(stdout):
match = re.search(r'\{"cases[^\]]*\]\}', stdout)
if not match:
return None
results = json.loads(match.group(0))
skipped = 0
passed = 0
failed = 0
failed_cases = []
for case in results['cases']:
if case.get('skipped', False):
skipped += 1
else:
if case.get('passed', False):
passed += 1
else:
failed += 1
failed_cases.append(case.get('name', "NONAME"))
return {
'passed': passed,
'failed': failed,
'skipped': skipped,
'failed_cases': ', '.join(failed_cases),
'percent': 1.0 * passed / (passed + failed)
}
# A dictionary of prod servers to test against.
# See go/grpc-interop-tests (internal-only) for details.
prod_servers = {
'default': 'grpc-test.sandbox.googleapis.com',
'gateway_v4': 'grpc-test4.sandbox.googleapis.com',
}
argp = argparse.ArgumentParser(description='Run interop tests.')
argp.add_argument(
'-l',
'--language',
choices=['all'] + sorted(_LANGUAGES),
nargs='+',
default=['all'],
help='Clients to run. Objc client can be only run on OSX.')
argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
argp.add_argument(
'--cloud_to_prod',
default=False,
action='store_const',
const=True,
help='Run cloud_to_prod tests.')
argp.add_argument(
'--cloud_to_prod_auth',
default=False,
action='store_const',
const=True,
help='Run cloud_to_prod_auth tests.')
argp.add_argument(
'--google_default_creds_use_key_file',
default=False,
action='store_const',
const=True,
help=('Whether or not we should use a key file for the '
'google_default_credentials test case, e.g. by '
'setting env var GOOGLE_APPLICATION_CREDENTIALS.'))
argp.add_argument(
'--prod_servers',
choices=prod_servers.keys(),
default=['default'],
nargs='+',
help=('The servers to run cloud_to_prod and '
'cloud_to_prod_auth tests against.'))
argp.add_argument(
'-s',
'--server',
choices=['all'] + sorted(_SERVERS),
nargs='+',
help='Run cloud_to_cloud servers in a separate docker ' +
'image. Servers can only be started automatically if ' +
'--use_docker option is enabled.',
default=[])
argp.add_argument(
'--override_server',
action='append',
type=lambda kv: kv.split('='),
help=
'Use servername=HOST:PORT to explicitly specify a server. E.g. csharp=localhost:50000',
default=[])
# TODO(jtattermusch): the default service_account_key_file only works when --use_docker is used.
argp.add_argument(
'--service_account_key_file',
type=str,
help='The service account key file to use for some auth interop tests.',
default='/root/service_account/grpc-testing-ebe7c1ac7381.json')
argp.add_argument(
'--default_service_account',
type=str,
help='Default GCE service account email to use for some auth interop tests.',
default='[email protected]')
argp.add_argument(
'-t', '--travis', default=False, action='store_const', const=True)
argp.add_argument(
'-v', '--verbose', default=False, action='store_const', const=True)
argp.add_argument(
'--use_docker',
default=False,
action='store_const',
const=True,
help='Run all the interop tests under docker. That provides ' +
'additional isolation and prevents the need to install ' +
'language specific prerequisites. Only available on Linux.')
argp.add_argument(
'--allow_flakes',
default=False,
action='store_const',
const=True,
help=
'Allow flaky tests to show as passing (re-runs failed tests up to five times)'
)
argp.add_argument(
'--manual_run',
default=False,
action='store_const',
const=True,
help='Prepare things for running interop tests manually. ' +
'Preserve docker images after building them and skip '
'actually running the tests. Only print commands to run by ' + 'hand.')
argp.add_argument(
'--http2_interop',
default=False,
action='store_const',
const=True,
help='Enable HTTP/2 client edge case testing. (Bad client, good server)')
argp.add_argument(
'--http2_server_interop',
default=False,
action='store_const',
const=True,
help=
'Enable HTTP/2 server edge case testing. (Includes positive and negative tests'
)
argp.add_argument(
'--transport_security',
choices=_TRANSPORT_SECURITY_OPTIONS,
default='tls',
type=str,
nargs='?',
const=True,
help='Which transport security mechanism to use.')
argp.add_argument(
'--custom_credentials_type',
choices=_CUSTOM_CREDENTIALS_TYPE_OPTIONS,
default=_CUSTOM_CREDENTIALS_TYPE_OPTIONS,
nargs='+',
help=
'Credential types to test in the cloud_to_prod setup. Default is to test with all creds types possible.'
)
argp.add_argument(
'--skip_compute_engine_creds',
default=False,
action='store_const',
const=True,
help='Skip auth tests requiring access to compute engine credentials.')
argp.add_argument(
'--internal_ci',
default=False,
action='store_const',
const=True,
help=(
'(Deprecated, has no effect) Put reports into subdirectories to improve '
'presentation of results by Internal CI.'))
argp.add_argument(
'--bq_result_table',
default='',
type=str,
nargs='?',
help='Upload test results to a specified BQ table.')
args = argp.parse_args()
servers = set(
s
for s in itertools.chain.from_iterable(
_SERVERS if x == 'all' else [x] for x in args.server))
# ALTS servers are only available for certain languages.
if args.transport_security == 'alts':
servers = servers.intersection(_SERVERS_FOR_ALTS_TEST_CASES)
if args.use_docker:
if not args.travis:
print('Seen --use_docker flag, will run interop tests under docker.')
print('')
print(
'IMPORTANT: The changes you are testing need to be locally committed'
)
print(
'because only the committed changes in the current branch will be')
print('copied to the docker environment.')
time.sleep(5)
if args.manual_run and not args.use_docker:
print('--manual_run is only supported with --use_docker option enabled.')
sys.exit(1)
if not args.use_docker and servers:
print(
'Running interop servers is only supported with --use_docker option enabled.'
)
sys.exit(1)
# we want to include everything but objc in 'all'
# because objc won't run on non-mac platforms
all_but_objc = set(six.iterkeys(_LANGUAGES)) - set(['objc'])
languages = set(_LANGUAGES[l]
for l in itertools.chain.from_iterable(
all_but_objc if x == 'all' else [x] for x in args.language))
# ALTS interop clients are only available for certain languages.
if args.transport_security == 'alts':
alts_languages = set(_LANGUAGES[l] for l in _LANGUAGES_FOR_ALTS_TEST_CASES)
languages = languages.intersection(alts_languages)
languages_http2_clients_for_http2_server_interop = set()
if args.http2_server_interop:
languages_http2_clients_for_http2_server_interop = set(
_LANGUAGES[l]
for l in _LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES
if 'all' in args.language or l in args.language)
http2Interop = Http2Client() if args.http2_interop else None
http2InteropServer = Http2Server() if args.http2_server_interop else None
docker_images = {}
if args.use_docker:
# languages for which to build docker images
languages_to_build = set(
_LANGUAGES[k]
for k in set([str(l) for l in languages] + [s for s in servers]))
languages_to_build = languages_to_build | languages_http2_clients_for_http2_server_interop
if args.http2_interop:
languages_to_build.add(http2Interop)
if args.http2_server_interop:
languages_to_build.add(http2InteropServer)
build_jobs = []
for l in languages_to_build:
if str(l) == 'objc':
# we don't need to build a docker image for objc
continue
job = build_interop_image_jobspec(l)
docker_images[str(l)] = job.tag
build_jobs.append(job)
if build_jobs:
jobset.message(
'START', 'Building interop docker images.', do_newline=True)
if args.verbose:
print('Jobs to run: \n%s\n' % '\n'.join(str(j) for j in build_jobs))
num_failures, build_resultset = jobset.run(
build_jobs, newline_on_success=True, maxjobs=args.jobs)
report_utils.render_junit_xml_report(build_resultset,
_DOCKER_BUILD_XML_REPORT)
if num_failures == 0:
jobset.message(
'SUCCESS',
'All docker images built successfully.',
do_newline=True)
else:
jobset.message(
'FAILED',
'Failed to build interop docker images.',
do_newline=True)
for image in six.itervalues(docker_images):
dockerjob.remove_image(image, skip_nonexistent=True)
sys.exit(1)
server_manual_cmd_log = [] if args.manual_run else None
client_manual_cmd_log = [] if args.manual_run else None
# Start interop servers.
server_jobs = {}
server_addresses = {}
try:
for s in servers:
lang = str(s)
spec = server_jobspec(
_LANGUAGES[lang],
docker_images.get(lang),
args.transport_security,
manual_cmd_log=server_manual_cmd_log)
if not args.manual_run:
job = dockerjob.DockerJob(spec)
server_jobs[lang] = job
server_addresses[lang] = ('localhost',
job.mapped_port(_DEFAULT_SERVER_PORT))
else:
# don't run the server, set server port to a placeholder value
server_addresses[lang] = ('localhost', '${SERVER_PORT}')
http2_server_job = None
if args.http2_server_interop:
# launch a HTTP2 server emulator that creates edge cases
lang = str(http2InteropServer)
spec = server_jobspec(
http2InteropServer,
docker_images.get(lang),
manual_cmd_log=server_manual_cmd_log)
if not args.manual_run:
http2_server_job = dockerjob.DockerJob(spec)
server_jobs[lang] = http2_server_job
else:
# don't run the server, set server port to a placeholder value
server_addresses[lang] = ('localhost', '${SERVER_PORT}')
jobs = []
if args.cloud_to_prod:
if args.transport_security not in ['tls']:
print('TLS is always enabled for cloud_to_prod scenarios.')
for server_host_nickname in args.prod_servers:
for language in languages:
for test_case in _TEST_CASES:
if not test_case in language.unimplemented_test_cases():
if not test_case in _SKIP_ADVANCED + _SKIP_COMPRESSION + _SKIP_SPECIAL_STATUS_MESSAGE:
for transport_security in args.custom_credentials_type:
# google_default_credentials not yet supported by all languages
if transport_security == 'google_default_credentials' and str(
language) not in [
'c++', 'go', 'java', 'javaokhttp'
]:
continue
# compute_engine_channel_creds not yet supported by all languages
if transport_security == 'compute_engine_channel_creds' and str(
language) not in [
'go', 'java', 'javaokhttp'
]:
continue
test_job = cloud_to_prod_jobspec(
language,
test_case,
server_host_nickname,
prod_servers[server_host_nickname],
google_default_creds_use_key_file=args.
google_default_creds_use_key_file,
docker_image=docker_images.get(
str(language)),
manual_cmd_log=client_manual_cmd_log,
service_account_key_file=args.
service_account_key_file,
default_service_account=args.
default_service_account,
transport_security=transport_security)
jobs.append(test_job)
if args.http2_interop:
for test_case in _HTTP2_TEST_CASES:
test_job = cloud_to_prod_jobspec(
http2Interop,
test_case,
server_host_nickname,
prod_servers[server_host_nickname],
google_default_creds_use_key_file=args.
google_default_creds_use_key_file,
docker_image=docker_images.get(str(http2Interop)),
manual_cmd_log=client_manual_cmd_log,
service_account_key_file=args.service_account_key_file,
default_service_account=args.default_service_account,
transport_security=args.transport_security)
jobs.append(test_job)
if args.cloud_to_prod_auth:
if args.transport_security not in ['tls']:
print('TLS is always enabled for cloud_to_prod scenarios.')
for server_host_nickname in args.prod_servers:
for language in languages:
for test_case in _AUTH_TEST_CASES:
if (not args.skip_compute_engine_creds or
not compute_engine_creds_required(
language, test_case)):
if not test_case in language.unimplemented_test_cases():
if test_case == _GOOGLE_DEFAULT_CREDS_TEST_CASE:
transport_security = 'google_default_credentials'
elif test_case == _COMPUTE_ENGINE_CHANNEL_CREDS_TEST_CASE:
transport_security = 'compute_engine_channel_creds'
else:
transport_security = 'tls'
if transport_security not in args.custom_credentials_type:
continue
test_job = cloud_to_prod_jobspec(
language,
test_case,
server_host_nickname,
prod_servers[server_host_nickname],
google_default_creds_use_key_file=args.
google_default_creds_use_key_file,
docker_image=docker_images.get(str(language)),
auth=True,
manual_cmd_log=client_manual_cmd_log,
service_account_key_file=args.
service_account_key_file,
default_service_account=args.
default_service_account,
transport_security=transport_security)
jobs.append(test_job)
for server in args.override_server:
server_name = server[0]
(server_host, server_port) = server[1].split(':')
server_addresses[server_name] = (server_host, server_port)
for server_name, server_address in server_addresses.items():
(server_host, server_port) = server_address
server_language = _LANGUAGES.get(server_name, None)
skip_server = [] # test cases unimplemented by server
if server_language:
skip_server = server_language.unimplemented_test_cases_server()
for language in languages:
for test_case in _TEST_CASES:
if not test_case in language.unimplemented_test_cases():
if not test_case in skip_server:
test_job = cloud_to_cloud_jobspec(
language,
test_case,
server_name,
server_host,
server_port,
docker_image=docker_images.get(str(language)),
transport_security=args.transport_security,
manual_cmd_log=client_manual_cmd_log)
jobs.append(test_job)
if args.http2_interop:
for test_case in _HTTP2_TEST_CASES:
if server_name == "go":
# TODO(carl-mastrangelo): Reenable after https://github.com/grpc/grpc-go/issues/434
continue
test_job = cloud_to_cloud_jobspec(
http2Interop,
test_case,
server_name,
server_host,
server_port,
docker_image=docker_images.get(str(http2Interop)),
transport_security=args.transport_security,
manual_cmd_log=client_manual_cmd_log)
jobs.append(test_job)
if args.http2_server_interop:
if not args.manual_run:
http2_server_job.wait_for_healthy(timeout_seconds=600)
for language in languages_http2_clients_for_http2_server_interop:
for test_case in set(_HTTP2_SERVER_TEST_CASES) - set(
_HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS):
offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case)
server_port = _DEFAULT_SERVER_PORT + offset
if not args.manual_run:
server_port = http2_server_job.mapped_port(server_port)
test_job = cloud_to_cloud_jobspec(
language,
test_case,
str(http2InteropServer),
'localhost',
server_port,
docker_image=docker_images.get(str(language)),
manual_cmd_log=client_manual_cmd_log)
jobs.append(test_job)
for language in languages:
# HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS is a subset of
# HTTP_SERVER_TEST_CASES, in which clients use their gRPC interop clients rather
# than specialized http2 clients, reusing existing test implementations.
# For example, in the "data_frame_padding" test, use language's gRPC
# interop clients and make them think that theyre running "large_unary"
# test case. This avoids implementing a new test case in each language.
for test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
if test_case not in language.unimplemented_test_cases():
offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case)
server_port = _DEFAULT_SERVER_PORT + offset
if not args.manual_run:
server_port = http2_server_job.mapped_port(server_port)
if args.transport_security != 'insecure':
print(
('Creating grpc client to http2 server test case '
'with insecure connection, even though '
'args.transport_security is not insecure. Http2 '
'test server only supports insecure connections.'))
test_job = cloud_to_cloud_jobspec(
language,
test_case,
str(http2InteropServer),
'localhost',
server_port,
docker_image=docker_images.get(str(language)),
transport_security='insecure',
manual_cmd_log=client_manual_cmd_log)
jobs.append(test_job)
if not jobs:
print('No jobs to run.')
for image in six.itervalues(docker_images):
dockerjob.remove_image(image, skip_nonexistent=True)
sys.exit(1)
if args.manual_run:
print('All tests will skipped --manual_run option is active.')
if args.verbose:
print('Jobs to run: \n%s\n' % '\n'.join(str(job) for job in jobs))
num_failures, resultset = jobset.run(
jobs,
newline_on_success=True,
maxjobs=args.jobs,
skip_jobs=args.manual_run)
if args.bq_result_table and resultset:
upload_interop_results_to_bq(resultset, args.bq_result_table)
if num_failures:
jobset.message('FAILED', 'Some tests failed', do_newline=True)
else:
jobset.message('SUCCESS', 'All tests passed', do_newline=True)
write_cmdlog_maybe(server_manual_cmd_log, 'interop_server_cmds.sh')
write_cmdlog_maybe(client_manual_cmd_log, 'interop_client_cmds.sh')
report_utils.render_junit_xml_report(resultset, _TESTS_XML_REPORT)
for name, job in resultset.items():
if "http2" in name:
job[0].http2results = aggregate_http2_results(job[0].message)
http2_server_test_cases = (_HTTP2_SERVER_TEST_CASES
if args.http2_server_interop else [])
if num_failures:
sys.exit(1)
else:
sys.exit(0)
finally:
# Check if servers are still running.
for server, job in server_jobs.items():
if not job.is_running():
print('Server "%s" has exited prematurely.' % server)
dockerjob.finish_jobs([j for j in six.itervalues(server_jobs)])
for image in six.itervalues(docker_images):
if not args.manual_run:
print('Removing docker image %s' % image)
dockerjob.remove_image(image)
else:
print('Preserving docker image: %s' % image)
| apache-2.0 | 2,339,893,549,345,489,400 | 33.455416 | 187 | 0.583656 | false |
johnkit/vtk-dev | Web/Python/vtk/web/wamp.py | 1 | 9811 | r"""wamp is a module that provide classes that extend any
WAMP related class for the purpose of vtkWeb.
"""
import inspect, types, string, random, logging, six, json
from threading import Timer
from twisted.python import log
from twisted.internet import reactor
from twisted.internet import defer
from twisted.internet.defer import Deferred, returnValue
from autobahn import wamp
from autobahn import util
from autobahn.wamp import types
from autobahn.wamp import auth
from autobahn.wamp import register as exportRpc
from autobahn.twisted.wamp import ApplicationSession, RouterSession
from autobahn.twisted.websocket import WampWebSocketServerFactory
from autobahn.twisted.websocket import WampWebSocketServerProtocol
try:
from vtkWebCore import vtkWebApplication
except:
from vtkWebCorePython import vtkWebApplication
# =============================================================================
salt = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(32))
application = None
# =============================================================================
#
# Base class for vtkWeb WampServerProtocol
#
# =============================================================================
class ServerProtocol(ApplicationSession):
"""
Defines the core server protocol for vtkWeb. Adds support to
marshall/unmarshall RPC callbacks that involve ServerManager proxies as
arguments or return values.
Applications typically don't use this class directly, since it doesn't
register any RPC callbacks that are required for basic web-applications with
interactive visualizations. For that, use vtkWebServerProtocol.
"""
def __init__(self, config):
ApplicationSession.__init__(self, config)
self.vtkWebProtocols = []
self.authdb = None
self.secret = None
self.Application = self.initApplication()
self.initialize()
def setAuthDB(self, db):
self.authdb = db
if self.secret:
self.authdb.updateKey('vtkweb', self.secret)
def initialize(self):
"""
Let the sub class define what they need to do to properly initialize
themselves.
"""
pass
def initApplication(self):
"""
Let subclass optionally initialize a custom application in lieu
of the default vtkWebApplication.
"""
global application
if not application:
application = vtkWebApplication()
return application
def onJoin(self, details):
ApplicationSession.onJoin(self, details)
self.register(self)
for protocol in self.vtkWebProtocols:
self.register(protocol)
def setApplication(self, application):
self.Application = application
def registerVtkWebProtocol(self, protocol):
protocol.setApplication(self.Application)
self.vtkWebProtocols.append(protocol)
def getVtkWebProtocols(self):
return self.vtkWebProtocols
def updateSecret(self, newSecret):
self.secret = newSecret
if self.authdb:
self.authdb.updateKey('vtkweb', self.secret)
@exportRpc("application.exit")
def exit(self):
"""RPC callback to exit"""
reactor.stop()
@exportRpc("application.exit.later")
def exitLater(self, secondsLater=60):
"""RPC callback to exit after a short delay"""
reactor.callLater(secondsLater, reactor.stop)
# =============================================================================
#
# Base class for vtkWeb WampServerFactory
#
# =============================================================================
class TimeoutWampWebSocketServerFactory(WampWebSocketServerFactory):
"""
TimeoutWampWebSocketServerFactory is WampWebSocketServerFactory subclass
that adds support to close the web-server after a timeout when the last
connected client drops.
Currently, the protocol must call connectionMade() and connectionLost() methods
to notify the factory that the connection was started/closed.
If the connection count drops to zero, then the reap timer
is started which will end the process if no other connections are made in
the timeout interval.
"""
def __init__(self, factory, *args, **kwargs):
self._connection_count = 0
self._timeout = kwargs['timeout']
self._reaper = reactor.callLater(self._timeout, lambda: reactor.stop())
del kwargs['timeout']
WampWebSocketServerFactory.__init__(self, factory, *args, **kwargs)
WampWebSocketServerFactory.protocol = TimeoutWampWebSocketServerProtocol
def connectionMade(self):
if self._reaper:
log.msg("Client has reconnected, cancelling reaper", logLevel=logging.DEBUG)
self._reaper.cancel()
self._reaper = None
self._connection_count += 1
log.msg("on_connect: connection count = %s" % self._connection_count, logLevel=logging.DEBUG)
def connectionLost(self, reason):
if self._connection_count > 0:
self._connection_count -= 1
log.msg("connection_lost: connection count = %s" % self._connection_count, logLevel=logging.DEBUG)
if self._connection_count == 0 and not self._reaper:
log.msg("Starting timer, process will terminate in: %ssec" % self._timeout, logLevel=logging.DEBUG)
self._reaper = reactor.callLater(self._timeout, lambda: reactor.stop())
# =============================================================================
class TimeoutWampWebSocketServerProtocol(WampWebSocketServerProtocol):
def connectionMade(self):
WampWebSocketServerProtocol.connectionMade(self)
self.factory.connectionMade()
def connectionLost(self, reason):
WampWebSocketServerProtocol.connectionLost(self, reason)
self.factory.connectionLost(reason)
# =============================================================================
class AuthDb:
"""
An in-memory-only user database of a single user.
"""
AUTHEXTRA = {'salt': 'salt123', 'keylen': 32, 'iterations': 1000}
def __init__(self):
self._creds = {'vtkweb': auth.derive_key("vtkweb-secret", self.AUTHEXTRA['salt'])}
def get(self, authid):
## we return a deferred to simulate an asynchronous lookup
return defer.succeed(self._creds.get(authid, None))
def updateKey(self, id, newKey):
self._creds[id] = auth.derive_key(newKey, self.AUTHEXTRA['salt'])
# =============================================================================
class PendingAuth:
"""
Used for tracking pending authentications.
"""
def __init__(self, key, session, authid, authrole, authmethod, authprovider):
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
self.session = session
self.timestamp = util.utcnow()
self.nonce = util.newid()
challenge_obj = {
'authid': self.authid,
'authrole': self.authrole,
'authmethod': self.authmethod,
'authprovider': self.authprovider,
'session': self.session,
'nonce': self.nonce,
'timestamp': self.timestamp
}
self.challenge = json.dumps(challenge_obj)
self.signature = auth.compute_wcs(key, self.challenge)
# =============================================================================
class CustomWampCraRouterSession(RouterSession):
"""
A custom router session that authenticates via WAMP-CRA.
"""
def __init__(self, routerFactory):
"""
Constructor.
"""
RouterSession.__init__(self, routerFactory)
@defer.inlineCallbacks
def onHello(self, realm, details):
"""
Callback fired when client wants to attach session.
"""
self._pending_auth = None
if details.authmethods:
for authmethod in details.authmethods:
if authmethod == u"wampcra":
authdb = self.factory.authdb
## lookup user in user DB
key = yield authdb.get(details.authid)
## if user found ..
if key:
## setup pending auth
self._pending_auth = PendingAuth(key, details.pending_session,
details.authid, "user", authmethod, "authdb")
## send challenge to client
extra = { 'challenge': self._pending_auth.challenge }
## when using salted passwords, provide the client with
## the salt and then PBKDF2 parameters used
extra['salt'] = authdb.AUTHEXTRA['salt']
extra['iterations'] = 1000
extra['keylen'] = 32
defer.returnValue(types.Challenge('wampcra', extra))
## deny client
defer.returnValue(types.Deny())
def onAuthenticate(self, signature, extra):
"""
Callback fired when a client responds to an authentication challenge.
"""
## if there is a pending auth, and the signature provided by client matches ..
if self._pending_auth and signature == self._pending_auth.signature:
## accept the client
return types.Accept(authid = self._pending_auth.authid,
authrole = self._pending_auth.authrole,
authmethod = self._pending_auth.authmethod,
authprovider = self._pending_auth.authprovider)
## deny client
return types.Deny()
| bsd-3-clause | -8,795,263,232,398,575,000 | 33.545775 | 111 | 0.59525 | false |
bcgov/gwells | app/backend/wells/migrations/0081_update_well_disinfect_values.py | 1 | 1097 | from django.db import migrations
# This can be deleted when doing next squash of migrations because it's a one time update
def migrate_well_disinfected(apps, schema_editor):
well = apps.get_model('wells', 'well')
code = apps.get_model('wells', 'welldisinfectedcode')
disinfected = code.objects.filter(well_disinfected_code='Disinfected').first()
not_disinfected = code.objects.filter(well_disinfected_code='Not Disinfected').first()
unknown = code.objects.filter(well_disinfected_code='Unknown').first()
well.objects.filter(well_disinfected=True).update(
well_disinfected_status=disinfected)
well.objects.filter(well_disinfected=False).update(
well_disinfected_status=not_disinfected)
well.objects.filter(well_disinfected__isnull=True).update(
well_disinfected_status=unknown)
def reverse(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('wells', '0080_add_well_disinfect_status'),
]
operations = [
migrations.RunPython(migrate_well_disinfected, reverse),
]
| apache-2.0 | -6,403,787,883,633,071,000 | 30.342857 | 90 | 0.714676 | false |
JacobSheehy/pressureNETAnalysis | readings/models.py | 1 | 2519 | from django.db import models
from readings import choices as readings_choices
class DateLocationMeasurementModel(models.Model):
"""
Abstract base class for PressureNET measurements.
"""
date = models.DateTimeField(auto_now_add=True, null=True, blank=True)
user_id = models.CharField(max_length=255, db_index=True)
latitude = models.FloatField(db_index=True)
longitude = models.FloatField(db_index=True)
altitude = models.FloatField(default=0.0)
daterecorded = models.BigIntegerField(db_index=True)
tzoffset = models.BigIntegerField()
client_key = models.CharField(max_length=255)
sharing = models.CharField(max_length=255, choices=readings_choices.SHARING_CHOICES)
provider = models.CharField(max_length=255, default='')
class Meta:
abstract = True
class Reading(DateLocationMeasurementModel):
"""
Barometer reading from pressureNET
"""
reading = models.FloatField()
reading_accuracy = models.FloatField()
observation_type = models.CharField(max_length=255, default='')
observation_unit = models.CharField(max_length=255, default='')
location_accuracy = models.FloatField()
class Meta:
verbose_name = 'reading'
verbose_name_plural = 'readings'
def __unicode__(self):
return '%s: %s' % (self.user_id, self.reading)
class Condition(DateLocationMeasurementModel):
"""
Condition reading from pressureNET
"""
accuracy = models.FloatField()
general_condition = models.CharField(max_length=255)
windy = models.CharField(max_length=255)
fog_thickness = models.CharField(max_length=255)
precipitation_type = models.CharField(max_length=255)
precipitation_amount = models.FloatField()
precipitation_unit = models.CharField(max_length=255)
thunderstorm_intensity = models.CharField(max_length=255)
user_comment = models.CharField(max_length=255)
class Meta:
verbose_name = 'condition'
verbose_name_plural = 'conditions'
def __unicode__(self):
return '%s: %s' % (self.user_id, self.general_condition)
class ReadingSync(models.Model):
"""
Reading synchronization from Tomcat Server
"""
date = models.DateTimeField(auto_now_add=True)
readings = models.IntegerField()
processing_time = models.FloatField()
class Meta:
verbose_name = 'reading sync'
verbose_name_plural = 'reading syncs'
def __unicode__(self):
return '%s: %s' % (self.date, self.readings)
| gpl-3.0 | 2,577,059,662,120,520,000 | 31.294872 | 88 | 0.688765 | false |
jtylers/tools | decrypt.py | 1 | 10290 | #!/usr/bin/python2.7
import re
import sys
from string import maketrans
max_word_size = 25
offset = 65
solutions = []
maps = []
filename = ""
cypher = ""
known_letters = ""
mapped_to = ""
precise = False
verbose = False
thorough = False
color = False
blue = ""
green = ""
red = ""
white = ""
# Return letters in the string that aren't part of the input_letters
def unused_letters(input_letters,input_string):
unused_letters = ""
for x in input_string:
found = False
for y in input_letters:
if x == y:
found = True
if not found:
input_letters += x
unused_letters += x
return unused_letters
# Handle command line arguments
# Python's for loop doesn't work like a c++ for loop so I did it this way.
a = 1
while a < len(sys.argv):
if sys.argv[a] == "help" or sys.argv[a] == "-?" or sys.argv[a] == "-h" or sys.argv[a] == "--help":
print "usage: ./decrypt.py [-d dictionary.file] [-m <mapped>:<letters>] [cryptogram] [-v|--verbose] [-t|--thorough] [-c|--color] [-p|--precise]"
print "example: ./decrypt.py -d dictionary.txt -m QWJFLD:ABCDEF KCGPBWK ZKFDMBX ZUFXUHAGDM XKCX"
exit()
# True or false command line options
elif sys.argv[a] == "-c" or sys.argv[a] == "--color":
color = True
blue = "\033[1;36;40m"
green = "\033[1;32;40m"
red = "\033[1;31;40m"
white = "\033[1;37;40m"
elif sys.argv[a] == "-p" or sys.argv[a] == "--precise":
precise = True
elif sys.argv[a] == "-t" or sys.argv[a] == "--thorough":
thorough = True
elif sys.argv[a] == "-v" or sys.argv[a] == "--verbose":
verbose = True
# Command line arguments with parameters
elif sys.argv[a] == "-m":
a+=1
if a >= len(sys.argv) or sys.argv[a][0] == "-" or ':' not in sys.argv[a]:
print "-m takes an argument in the format ABCD:EFGH"
exit()
mapped_to = unused_letters("",re.sub(':.+$','',sys.argv[a].upper()))
known_letters = unused_letters("",re.sub('.+:','',sys.argv[a].upper()))
if not len(mapped_to) == len(known_letters):
print ("both sides of \"" + known_letters + ":" + mapped_to + "\" must be the same length")
exit()
elif sys.argv[a] == "-d":
a+=1
if a >= len(sys.argv) or sys.argv[a][0] == "-":
print "-d takes an argument!"
exit()
filename = sys.argv[a]
# Whatever is left over is part of the cypher as long as it doesn't start with a hyphen
elif not sys.argv[a][0] == "-":
cypher += (sys.argv[a].upper() + " ")
# Anything that has a hyphen that hasen't been taken care of is illegal
else:
print sys.argv[a] + " is not a recognized option"
exit()
a+=1
# Import the dictionary, ask for a filename if none was given
if filename == "":
filename = raw_input("Path to the dictionary file: [/usr/share/dict/words] ")
if filename == "":
filename = "/usr/share/dict/words"
words = set()
with open(filename) as f: words = set(f.readlines())
# Get the cypher from user if there wasn't one on the command line
if cypher == "":
cypher = raw_input("Enter the cypher, then press ENTER:\n").upper()
# See if a word matches what I have so far
def match(word,key,crypto_letters,translation):
k = 0
for w in word:
c = 0
for t in translation:
if w == t: # if a letter in the word is in the translation
# It has to match the same spot in the key
if not key[k] == crypto_letters[c]:
#print "False because " + t + " = " + crypto_letters[c] + " not " + key[k]
return False
c += 1
k += 1
return True
# Recursive function
def decrypt(crypto_letters,translation,hashmap):
# Print out the text being worked on
#untested_letters = unused_letters("qwertyuiopasdfghjklzxcvbnm",crypto_letters)
#output_trans = maketrans(crypto_letters+untested_letters,translation+"*"*len(untested_letters))
output_trans = maketrans(crypto_letters,translation)
sys.stdout.flush()
solution = cypher.translate(output_trans)
sys.stdout.write(solution+"\r")
sys.stdout.flush()
# If a key has a letter not in the crypto_letters string, then call decrypt on it
a=0
for key in hashmap:
a+=1
unused = unused_letters(crypto_letters,key)
# Base case: Once all the letters in all the keys have been used
if not unused == "":
b=0
for word in hashmap[key]:
b+=1
sys.stdout.flush()
#sys.stdout.write(solution + " " + str(a+1) + "/" + str(len(hashmap)) + " " + str(b+1) + "/"+ str(len(hashmap[key])) + " \r")
new_trans = unused_letters(translation,word)
if not new_trans == "":
# If the word has any letters in the translation, they should be in the right spot
if len(new_trans) == len(unused) and match(word,key,crypto_letters,translation):
# If possible doesn't end with a possible word then skip it and continue in loop
possible = decrypt(crypto_letters + unused,translation + new_trans,hashmap)
if not possible == "":
return possible
# If none of the words work out then return an empty string
return ""
return crypto_letters + ":" + translation
# Make it possible to loop through all the calculations again
def full_decryption(cypher_text):
original_word_list = map(lambda x:re.sub('[^A-Z]+',"",x), cypher_text.split())
original_lists = [[] for i in range(max_word_size)]
hashmap = {}
# Add words to the dictionary based on their length
for word in original_word_list:
original_lists[len(word.strip())].append(word.strip().upper())
hashmap[word.strip().upper()] = set()
dictionary = [[] for i in range(max_word_size)]
for word in words:
new_word = re.sub('[^A-Z]+',"",word.strip().upper())
dictionary[len(new_word)].append(new_word)
# Add all matching words to the hash map
word_length = 0
for lists in (original_lists):
if (lists):
for x_word in lists:
for y_word in (dictionary[word_length]):
x_trans = x_word
y_trans = y_word
for i in range(0,word_length):
x_tab = maketrans(str(x_trans[i]),str(chr(33+i)))
y_tab = maketrans(str(y_trans[i]),str(chr(33+i)))
x_trans = x_trans.translate(x_tab)
y_trans = y_trans.translate(y_tab)
# When a dictionary word has letters in the same places as a cypher
if x_trans == y_trans:
hashmap[x_word].add(y_word)
word_length += 1
# Initialize Recursion
a = 0
for key in hashmap:
a+=1
#print "\n" + key + ":\n"+ str(hashmap[key])
b = 0
for word in hashmap[key]:
b+=1
answer = ""
full_key = unused_letters("",mapped_to + key)
full_word = unused_letters("",known_letters + word)
if len(full_word) == len(full_key):
if full_key+full_word not in maps:
#print full_key + ":" + full_word
maps.append(full_key+full_word)
answer = decrypt(full_key,full_word,hashmap)
# Turn answer into translate table
mixed = re.sub(':.+$','',answer)
translated = re.sub('.+:','',answer)
if (len(mixed) == len(translated)) and not answer == "":
translate_table = maketrans(mixed,translated)
solution = cypher_text.translate(translate_table)
if (solution not in solutions) and not answer == full_key+":"+full_word:
valid_solution = True
color_solution = ""
# Double check that the solution is indeed a correct one, with "color" show it, with "precise" eliminate it
if precise or color:
for c in range (0,len(original_word_list)):
solution_words = solution.split()
if solution_words[c] not in hashmap[original_word_list[c]]:
color_solution += red + solution_words[c] + " "
else:
color_solution += green + solution_words[c] + " "
if precise:
valid_solution = False
if color:
solution = color_solution + white
# print "Translate table -> " + answer
if valid_solution:
solutions.append(solution)
if verbose:
print (" "*len(cypher))
sys.stdout.flush()
#print key + ":" + word
print blue + answer + white
#print unused_letters(mapped_to,mixed)
print(solution + " " + blue + str(a+1) + "/" + str(len(hashmap)) + " " + str(b+1) + "/"+ str(len(hashmap[key]))+ white)
else:
print(solution)
# Run the program once
full_decryption(cypher)
# run the program on each solution until There are no more possibilitie
if thorough:
solutions_size = len(solutions)
new_solutions = 1
while not solutions_size == new_solutions:
for solution in solutions:
full_decryption(solution)
new_solutions = len(solutions)
# Give advice if there were no solutions
if len(solutions) == 0:
print red + "No solutions were found!\n" + white + "Try adding the --thorough flag, removing the --precise flag, simplifying your -m map, or add/remove words from the cryptogram"
else:
# Remove the last line of jibberish
sys.stdout.flush()
sys.stdout.write(" "*len(cypher))
| gpl-2.0 | 3,257,954,420,681,244,000 | 40.659919 | 182 | 0.538581 | false |
NervanaSystems/coach | rl_coach/agents/cil_agent.py | 1 | 3783 | #
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Union
from rl_coach.agents.imitation_agent import ImitationAgent
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.head_parameters import RegressionHeadParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
from rl_coach.base_parameters import AgentParameters, MiddlewareScheme, NetworkParameters, AlgorithmParameters
from rl_coach.exploration_policies.e_greedy import EGreedyParameters
from rl_coach.memories.non_episodic.balanced_experience_replay import BalancedExperienceReplayParameters
class CILAlgorithmParameters(AlgorithmParameters):
"""
:param state_key_with_the_class_index: (str)
The key of the state dictionary which corresponds to the value that will be used to control the class index.
"""
def __init__(self):
super().__init__()
self.state_key_with_the_class_index = 'high_level_command'
class CILNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters()}
self.middleware_parameters = FCMiddlewareParameters(scheme=MiddlewareScheme.Medium)
self.heads_parameters = [RegressionHeadParameters()]
self.optimizer_type = 'Adam'
self.batch_size = 32
self.replace_mse_with_huber_loss = False
self.create_target_network = False
class CILAgentParameters(AgentParameters):
def __init__(self):
super().__init__(algorithm=CILAlgorithmParameters(),
exploration=EGreedyParameters(),
memory=BalancedExperienceReplayParameters(),
networks={"main": CILNetworkParameters()})
@property
def path(self):
return 'rl_coach.agents.cil_agent:CILAgent'
# Conditional Imitation Learning Agent: https://arxiv.org/abs/1710.02410
class CILAgent(ImitationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.current_high_level_control = 0
def choose_action(self, curr_state):
self.current_high_level_control = curr_state[self.ap.algorithm.state_key_with_the_class_index]
return super().choose_action(curr_state)
def extract_action_values(self, prediction):
return prediction[self.current_high_level_control].squeeze()
def learn_from_batch(self, batch):
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()
target_values = self.networks['main'].online_network.predict({**batch.states(network_keys)})
branch_to_update = batch.states([self.ap.algorithm.state_key_with_the_class_index])[self.ap.algorithm.state_key_with_the_class_index]
for idx, branch in enumerate(branch_to_update):
target_values[branch][idx] = batch.actions()[idx]
result = self.networks['main'].train_and_sync_networks({**batch.states(network_keys)}, target_values)
total_loss, losses, unclipped_grads = result[:3]
return total_loss, losses, unclipped_grads
| apache-2.0 | -6,056,350,723,993,225,000 | 42.482759 | 141 | 0.716363 | false |
LCAS/spqrel_tools | slu4p/speech_to_text/audio_recorder.py | 1 | 3431 | import pyaudio
import wave
import time
class AudioRecorder(object):
'''A recorder class for recording audio to a WAV file.
Records in mono by default.
'''
def __init__(self, channels=1, rate=44100, frames_per_buffer=1024):
self.channels = channels
self.rate = rate
self.frames_per_buffer = frames_per_buffer
self.recordingFile = None
def open(self, fname, mode='wb'):
return RecordingFile(fname, mode, self.channels, self.rate,
self.frames_per_buffer)
def startMicrophonesRecording(self, file_name):
self.recordingFile = self.open(file_name, 'wb')
self.recordingFile.start_recording()
def stopMicrophonesRecording(self):
try:
self.recordingFile.stop_recording()
except Exception as e:
print 'Already stopped'
class RecordingFile(object):
def __init__(self, fname, mode, channels,
rate, frames_per_buffer):
self.fname = fname
self.mode = mode
self.channels = channels
self.rate = rate
self.frames_per_buffer = frames_per_buffer
self._pa = pyaudio.PyAudio()
self.wavefile = self._prepare_file(self.fname, self.mode)
self._stream = None
def __enter__(self):
return self
def __exit__(self, exception, value, traceback):
self.close()
def record(self, duration):
# Use a stream with no callback function in blocking mode
self._stream = self._pa.open(format=pyaudio.paInt16,
channels=self.channels,
rate=self.rate,
input=True,
frames_per_buffer=self.frames_per_buffer)
for _ in range(int(self.rate / self.frames_per_buffer * duration)):
audio = self._stream.read(self.frames_per_buffer)
self.wavefile.writeframes(audio)
return None
def start_recording(self):
# Use a stream with a callback in non-blocking mode
self._stream = self._pa.open(format=pyaudio.paInt16,
channels=self.channels,
rate=self.rate,
input=True,
frames_per_buffer=self.frames_per_buffer,
stream_callback=self.get_callback())
self._stream.start_stream()
return self
def stop_recording(self):
self._stream.stop_stream()
return self
def get_callback(self):
def callback(in_data, frame_count, time_info, status):
self.wavefile.writeframes(in_data)
return in_data, pyaudio.paContinue
return callback
def close(self):
self._stream.close()
self._pa.terminate()
self.wavefile.close()
def _prepare_file(self, fname, mode='wb'):
wavefile = wave.open(fname, mode)
wavefile.setnchannels(self.channels)
wavefile.setsampwidth(self._pa.get_sample_size(pyaudio.paInt16))
wavefile.setframerate(self.rate)
return wavefile
def main():
rec = AudioRecorder(channels=2)
rec.startMicrophonesRecording('nonblocking.wav')
time.sleep(5.0)
rec.stopMicrophonesRecording()
if __name__ == "__main__":
main()
| mit | 837,996,950,919,042,400 | 32.637255 | 81 | 0.567182 | false |
bfagundez/apex_paperboy | test/project_operation_tests.py | 1 | 7585 | #TO RUN: joey2 project_operation_tests.py
import sys
import os
import unittest
import shutil
sys.path.append('../')
import lib.config as config
import lib.mm_util as util
import test_helper as helper
from lib.mm_connection import MavensMatePluginConnection
from lib.mm_client import MavensMateClient
class TestProjectOperations(unittest.TestCase):
RunInitialDelete = True
# FYI: overriding this constructor is apparently not recommended, so we should find a better way to init test data
def __init__(self, *args, **kwargs):
super(TestProjectOperations, self).__init__(*args, **kwargs)
self.project_name = 'MavensMateUnitTestProject'
self.username = '[email protected]'
self.password = 'force'
self.org_type = 'developer'
def setUp(self):
config.connection = MavensMatePluginConnection(client='Sublime Text')
if os.path.exists(config.connection.workspace+"/MavensMateUnitTestProject"):
shutil.rmtree(config.connection.workspace+"/MavensMateUnitTestProject")
temp_client = MavensMateClient(credentials={"username":self.username, "password":self.password})
if self.RunInitialDelete:
helper.delete_metadata(
temp_client,
{
'ApexClass' : ['apex_class_from_unit_test_123'],
'ApexTrigger' : ['apex_trigger_from_unit_test_123'],
'ApexPage' : ['apex_page_from_unit_test_123'],
'ApexComponent' : ['apex_component_from_unit_test_123']
}
)
self.__class__.RunInitialDelete = False
def test_index_project(self):
config.connection.new_project
def test_clean_project(self):
config.connection.new_project(params={
"project_name" : self.project_name,
"username" : self.username,
"password" : self.password,
"org_type" : self.org_type,
"package" : config.base_path+'/test/resources/package.xml' #=> this should be a dict of package contents or the location of a package.xml
},action='new')
config.connection.project.clean()
self.assertTrue(os.path.isdir(config.connection.workspace+"/MavensMateUnitTestProject"))
self.assertTrue(os.path.isdir(config.connection.workspace+"/MavensMateUnitTestProject"+"/config"))
self.assertTrue(os.path.isdir(config.connection.workspace+"/MavensMateUnitTestProject"+"/src"))
self.assertTrue(os.path.isdir(config.connection.workspace+"/MavensMateUnitTestProject"+"/src/classes"))
self.assertTrue(os.path.isdir(config.connection.workspace+"/MavensMateUnitTestProject"+"/src/components"))
self.assertTrue(os.path.isdir(config.connection.workspace+"/MavensMateUnitTestProject"+"/src/objects"))
self.assertTrue(os.path.isdir(config.connection.workspace+"/MavensMateUnitTestProject"+"/src/pages"))
self.assertTrue(os.path.isfile(config.connection.workspace+"/MavensMateUnitTestProject"+"/src/package.xml"))
def test_create_new_apex_class(self):
config.connection.new_project(params={
"project_name" : self.project_name,
"username" : self.username,
"password" : self.password,
"org_type" : self.org_type,
"package" : config.base_path+'/test/resources/package.xml' #=> this should be a dict of package contents or the location of a package.xml
},action='new')
deploy_result = config.connection.project.new_metadata(
api_name = 'apex_class_from_unit_test_123',
apex_class_type = 'default',
metadata_type = 'ApexClass'
)
print deploy_result
self.assertTrue(deploy_result.success == True)
helper.delete_metadata(config.connection.project.sfdc_client, {'ApexClass':['apex_class_from_unit_test_123']})
def test_compile_project(self):
config.connection.new_project(params={
"project_name" : self.project_name,
"username" : self.username,
"password" : self.password,
"org_type" : self.org_type,
"package" : config.base_path+'/test/resources/package.xml' #=> this should be a dict of package contents or the location of a package.xml
},action='new')
deploy_result = config.connection.project.compile()
print deploy_result
self.assertTrue(deploy_result.success == True)
def test_create_new_apex_trigger(self):
config.connection.new_project(params={
"project_name" : self.project_name,
"username" : self.username,
"password" : self.password,
"org_type" : self.org_type,
"package" : config.base_path+'/test/resources/package.xml' #=> this should be a dict of package contents or the location of a package.xml
},action='new')
deploy_result = config.connection.project.new_metadata(
api_name = 'apex_trigger_from_unit_test_123',
metadata_type = 'ApexTrigger',
apex_trigger_object_api_name = 'Account'
)
print deploy_result
self.assertTrue(deploy_result.success == True)
helper.delete_metadata(config.connection.project.sfdc_client, {'ApexTrigger':['apex_trigger_from_unit_test_123']})
def test_create_new_apex_page(self):
config.connection.new_project(params={
"project_name" : self.project_name,
"username" : self.username,
"password" : self.password,
"org_type" : self.org_type,
"package" : config.base_path+'/test/resources/package.xml' #=> this should be a dict of package contents or the location of a package.xml
},action='new')
deploy_result = config.connection.project.new_metadata(
api_name = 'apex_page_from_unit_test_123',
metadata_type = 'ApexPage'
)
print deploy_result
self.assertTrue(deploy_result.success == True)
helper.delete_metadata(config.connection.project.sfdc_client, {'ApexPage':['apex_page_from_unit_test_123']})
def test_create_new_apex_component(self):
config.connection.new_project(params={
"project_name" : self.project_name,
"username" : self.username,
"password" : self.password,
"org_type" : self.org_type,
"package" : config.base_path+'/test/resources/package.xml' #=> this should be a dict of package contents or the location of a package.xml
},action='new')
deploy_result = config.connection.project.new_metadata(
api_name = 'apex_component_from_unit_test_123',
metadata_type = 'ApexComponent'
)
print deploy_result
self.assertTrue(deploy_result.success == True)
helper.delete_metadata(config.connection.project.sfdc_client, {'ApexComponent':['apex_component_from_unit_test_123']})
def do_test_assumptions(self):
pass
def tearDown(self):
try:
pass
#shutil.rmtree(config.connection.workspace+"/MavensMateUnitTestProject")
except:
pass
if __name__ == '__main__':
unittest.main() | mit | 6,917,280,531,064,079,000 | 48.25974 | 157 | 0.607515 | false |
kerwinxu/barcodeManager | zxing/cpp/scons/scons-local-2.0.0.final.0/SCons/Tool/mslink.py | 1 | 10946 | """SCons.Tool.mslink
Tool-specific initialization for the Microsoft linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/mslink.py 5023 2010/06/14 22:05:46 scons"
import os.path
import SCons.Action
import SCons.Defaults
import SCons.Errors
import SCons.Platform.win32
import SCons.Tool
import SCons.Tool.msvc
import SCons.Tool.msvs
import SCons.Util
from MSCommon import msvc_setup_env_once, msvc_exists
def pdbGenerator(env, target, source, for_signature):
try:
return ['/PDB:%s' % target[0].attributes.pdb, '/DEBUG']
except (AttributeError, IndexError):
return None
def _dllTargets(target, source, env, for_signature, paramtp):
listCmd = []
dll = env.FindIxes(target, '%sPREFIX' % paramtp, '%sSUFFIX' % paramtp)
if dll: listCmd.append("/out:%s"%dll.get_string(for_signature))
implib = env.FindIxes(target, 'LIBPREFIX', 'LIBSUFFIX')
if implib: listCmd.append("/implib:%s"%implib.get_string(for_signature))
return listCmd
def _dllSources(target, source, env, for_signature, paramtp):
listCmd = []
deffile = env.FindIxes(source, "WINDOWSDEFPREFIX", "WINDOWSDEFSUFFIX")
for src in source:
# Check explicitly for a non-None deffile so that the __cmp__
# method of the base SCons.Util.Proxy class used for some Node
# proxies doesn't try to use a non-existent __dict__ attribute.
if deffile and src == deffile:
# Treat this source as a .def file.
listCmd.append("/def:%s" % src.get_string(for_signature))
else:
# Just treat it as a generic source file.
listCmd.append(src)
return listCmd
def windowsShlinkTargets(target, source, env, for_signature):
return _dllTargets(target, source, env, for_signature, 'SHLIB')
def windowsShlinkSources(target, source, env, for_signature):
return _dllSources(target, source, env, for_signature, 'SHLIB')
def _windowsLdmodTargets(target, source, env, for_signature):
"""Get targets for loadable modules."""
return _dllTargets(target, source, env, for_signature, 'LDMODULE')
def _windowsLdmodSources(target, source, env, for_signature):
"""Get sources for loadable modules."""
return _dllSources(target, source, env, for_signature, 'LDMODULE')
def _dllEmitter(target, source, env, paramtp):
"""Common implementation of dll emitter."""
SCons.Tool.msvc.validate_vars(env)
extratargets = []
extrasources = []
dll = env.FindIxes(target, '%sPREFIX' % paramtp, '%sSUFFIX' % paramtp)
no_import_lib = env.get('no_import_lib', 0)
if not dll:
raise SCons.Errors.UserError('A shared library should have exactly one target with the suffix: %s' % env.subst('$%sSUFFIX' % paramtp))
insert_def = env.subst("$WINDOWS_INSERT_DEF")
if not insert_def in ['', '0', 0] and \
not env.FindIxes(source, "WINDOWSDEFPREFIX", "WINDOWSDEFSUFFIX"):
# append a def file to the list of sources
extrasources.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"WINDOWSDEFPREFIX", "WINDOWSDEFSUFFIX"))
version_num, suite = SCons.Tool.msvs.msvs_parse_version(env.get('MSVS_VERSION', '6.0'))
if version_num >= 8.0 and env.get('WINDOWS_INSERT_MANIFEST', 0):
# MSVC 8 automatically generates .manifest files that must be installed
extratargets.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"WINDOWSSHLIBMANIFESTPREFIX", "WINDOWSSHLIBMANIFESTSUFFIX"))
if 'PDB' in env and env['PDB']:
pdb = env.arg2nodes('$PDB', target=target, source=source)[0]
extratargets.append(pdb)
target[0].attributes.pdb = pdb
if not no_import_lib and \
not env.FindIxes(target, "LIBPREFIX", "LIBSUFFIX"):
# Append an import library to the list of targets.
extratargets.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"LIBPREFIX", "LIBSUFFIX"))
# and .exp file is created if there are exports from a DLL
extratargets.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"WINDOWSEXPPREFIX", "WINDOWSEXPSUFFIX"))
return (target+extratargets, source+extrasources)
def windowsLibEmitter(target, source, env):
return _dllEmitter(target, source, env, 'SHLIB')
def ldmodEmitter(target, source, env):
"""Emitter for loadable modules.
Loadable modules are identical to shared libraries on Windows, but building
them is subject to different parameters (LDMODULE*).
"""
return _dllEmitter(target, source, env, 'LDMODULE')
def prog_emitter(target, source, env):
SCons.Tool.msvc.validate_vars(env)
extratargets = []
exe = env.FindIxes(target, "PROGPREFIX", "PROGSUFFIX")
if not exe:
raise SCons.Errors.UserError("An executable should have exactly one target with the suffix: %s" % env.subst("$PROGSUFFIX"))
version_num, suite = SCons.Tool.msvs.msvs_parse_version(env.get('MSVS_VERSION', '6.0'))
if version_num >= 8.0 and env.get('WINDOWS_INSERT_MANIFEST', 0):
# MSVC 8 automatically generates .manifest files that have to be installed
extratargets.append(
env.ReplaceIxes(exe,
"PROGPREFIX", "PROGSUFFIX",
"WINDOWSPROGMANIFESTPREFIX", "WINDOWSPROGMANIFESTSUFFIX"))
if 'PDB' in env and env['PDB']:
pdb = env.arg2nodes('$PDB', target=target, source=source)[0]
extratargets.append(pdb)
target[0].attributes.pdb = pdb
return (target+extratargets,source)
def RegServerFunc(target, source, env):
if 'register' in env and env['register']:
ret = regServerAction([target[0]], [source[0]], env)
if ret:
raise SCons.Errors.UserError("Unable to register %s" % target[0])
else:
print "Registered %s sucessfully" % target[0]
return ret
return 0
regServerAction = SCons.Action.Action("$REGSVRCOM", "$REGSVRCOMSTR")
regServerCheck = SCons.Action.Action(RegServerFunc, None)
shlibLinkAction = SCons.Action.Action('${TEMPFILE("$SHLINK $SHLINKFLAGS $_SHLINK_TARGETS $_LIBDIRFLAGS $_LIBFLAGS $_PDB $_SHLINK_SOURCES")}')
compositeShLinkAction = shlibLinkAction + regServerCheck
ldmodLinkAction = SCons.Action.Action('${TEMPFILE("$LDMODULE $LDMODULEFLAGS $_LDMODULE_TARGETS $_LIBDIRFLAGS $_LIBFLAGS $_PDB $_LDMODULE_SOURCES")}')
compositeLdmodAction = ldmodLinkAction + regServerCheck
def generate(env):
"""Add Builders and construction variables for ar to an Environment."""
SCons.Tool.createSharedLibBuilder(env)
SCons.Tool.createProgBuilder(env)
env['SHLINK'] = '$LINK'
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS /dll')
env['_SHLINK_TARGETS'] = windowsShlinkTargets
env['_SHLINK_SOURCES'] = windowsShlinkSources
env['SHLINKCOM'] = compositeShLinkAction
env.Append(SHLIBEMITTER = [windowsLibEmitter])
env['LINK'] = 'link'
env['LINKFLAGS'] = SCons.Util.CLVar('/nologo')
env['_PDB'] = pdbGenerator
env['LINKCOM'] = '${TEMPFILE("$LINK $LINKFLAGS /OUT:$TARGET.windows $_LIBDIRFLAGS $_LIBFLAGS $_PDB $SOURCES.windows")}'
env.Append(PROGEMITTER = [prog_emitter])
env['LIBDIRPREFIX']='/LIBPATH:'
env['LIBDIRSUFFIX']=''
env['LIBLINKPREFIX']=''
env['LIBLINKSUFFIX']='$LIBSUFFIX'
env['WIN32DEFPREFIX'] = ''
env['WIN32DEFSUFFIX'] = '.def'
env['WIN32_INSERT_DEF'] = 0
env['WINDOWSDEFPREFIX'] = '${WIN32DEFPREFIX}'
env['WINDOWSDEFSUFFIX'] = '${WIN32DEFSUFFIX}'
env['WINDOWS_INSERT_DEF'] = '${WIN32_INSERT_DEF}'
env['WIN32EXPPREFIX'] = ''
env['WIN32EXPSUFFIX'] = '.exp'
env['WINDOWSEXPPREFIX'] = '${WIN32EXPPREFIX}'
env['WINDOWSEXPSUFFIX'] = '${WIN32EXPSUFFIX}'
env['WINDOWSSHLIBMANIFESTPREFIX'] = ''
env['WINDOWSSHLIBMANIFESTSUFFIX'] = '${SHLIBSUFFIX}.manifest'
env['WINDOWSPROGMANIFESTPREFIX'] = ''
env['WINDOWSPROGMANIFESTSUFFIX'] = '${PROGSUFFIX}.manifest'
env['REGSVRACTION'] = regServerCheck
env['REGSVR'] = os.path.join(SCons.Platform.win32.get_system_root(),'System32','regsvr32')
env['REGSVRFLAGS'] = '/s '
env['REGSVRCOM'] = '$REGSVR $REGSVRFLAGS ${TARGET.windows}'
# Set-up ms tools paths
msvc_setup_env_once(env)
# Loadable modules are on Windows the same as shared libraries, but they
# are subject to different build parameters (LDMODULE* variables).
# Therefore LDMODULE* variables correspond as much as possible to
# SHLINK*/SHLIB* ones.
SCons.Tool.createLoadableModuleBuilder(env)
env['LDMODULE'] = '$SHLINK'
env['LDMODULEPREFIX'] = '$SHLIBPREFIX'
env['LDMODULESUFFIX'] = '$SHLIBSUFFIX'
env['LDMODULEFLAGS'] = '$SHLINKFLAGS'
env['_LDMODULE_TARGETS'] = _windowsLdmodTargets
env['_LDMODULE_SOURCES'] = _windowsLdmodSources
env['LDMODULEEMITTER'] = [ldmodEmitter]
env['LDMODULECOM'] = compositeLdmodAction
def exists(env):
return msvc_exists()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| bsd-2-clause | 6,870,829,008,940,624,000 | 39.150376 | 149 | 0.650466 | false |
mbatchkarov/dc_evaluation | eval/pipeline/thesauri.py | 1 | 1144 | from random import sample
from discoutils.thesaurus_loader import Thesaurus
class DummyThesaurus(Thesaurus):
"""
A thesaurus-like object which return "b/N" as the only neighbour of every possible entry
"""
name = 'Constant'
def __init__(self):
pass
def get_nearest_neighbours(self, feature):
return [('b/N', 1.0)]
def get_vector(self):
pass
def to_shelf(self, *args, **kwargs):
pass
def __len__(self):
return 9999999
def __contains__(self, feature):
return True
class RandomThesaurus(DummyThesaurus):
"""
A thesaurus-like object which returns a single random neighbour for every possible entry. That neighbour
is chosen from the vocabulary that is passed in (as a dict {feature:index} )
"""
name = 'Random'
def __init__(self, vocab=None, k=1):
self.vocab = vocab
self.k = k
def get_nearest_neighbours(self, item):
if not self.vocab:
raise ValueError('You need to provide a set of value to choose from first.')
return [(str(foo), 1.) for foo in sample(self.vocab, self.k)]
| bsd-3-clause | 8,440,284,303,191,987,000 | 25 | 108 | 0.625874 | false |
aaiijmrtt/MUSICALQA | code/query.py | 1 | 2090 | import sys
import music, language
_debug_ = False
replacements = [('full note', 'semibreve'), ('whole note', 'semibreve'), ('half note', 'minim'), ('quarter note', 'crotchet'), ('eighth note', 'quaver'), ('sixteenth note', 'semiquaver'), ('32nd note', 'demisemiquaver'), ('sharp', '#'), ('flat', 'b')]
translatetimes = {'semibreve': 4, 'minim': 2, 'crotchet': 1, 'quaver': 0.5, 'semiquaver': 0.25, 'demisemiquaver': 0.125}
prematch = lambda thing: thing.lower() if type(thing) == str else thing
match = lambda expectation, reality: not expectation or prematch(expectation) == prematch(reality)
def preprocess(line):
for replacement in replacements:
line = line.replace(*replacement)
return line
def lookup(query, context):
time, note, octave, meter, clef, instrument, initialposition, answerlist = None, None, None, None, None, None, 1, list()
if query[2]: time = translatetimes[query[2][1]]
if query[3]:
note = query[3][0]
if query[3][1]: note += query[3][1][0]
if query[3][2]: octave = int(query[3][2][0])
if query[8]:
initialposition = int(query[8][1]) - 1
for i in xrange(len(context)):
context[i] = context[i][int(query[8][1]) - 1: int(query[8][3])]
if query[9]: meter = ''.join(query[9][1: 4])
if query[10]: clef = query[10][1]
if query[11]: instrument = query[11][1]
if _debug_: print time, note, octave, meter, clef, instrument, initialposition
for part in context:
position = initialposition
for bar in part:
for descriptor in bar:
if match(time, descriptor[3]) and match(note, descriptor[0]) and match(meter, descriptor[6]) and match(clef, descriptor[7]) and match(instrument, descriptor[4]):
answerlist.append([descriptor[6], 1 / descriptor[3], position, descriptor[2] / descriptor[3] + 1])
position += 1
return answerlist
if __name__ == '__main__':
if len(sys.argv) == 3: query, context = sys.argv[1], sys.argv[2]
else: query, context = 'quarter note G in bars 2-2 in 4/4 time in the bass', 'data/f01.xml'
query = preprocess(query)
query = language.parse(query)
context = music.parse(context)
print lookup(query, context)
| mit | -4,313,926,338,157,113,000 | 45.444444 | 251 | 0.669856 | false |
fbradyirl/home-assistant | homeassistant/components/history/__init__.py | 1 | 15549 | """Provide pre-made queries on top of the recorder component."""
from collections import defaultdict
from datetime import timedelta
from itertools import groupby
import logging
import time
import voluptuous as vol
from homeassistant.const import (
HTTP_BAD_REQUEST,
CONF_DOMAINS,
CONF_ENTITIES,
CONF_EXCLUDE,
CONF_INCLUDE,
)
import homeassistant.util.dt as dt_util
from homeassistant.components import recorder, script
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import ATTR_HIDDEN
from homeassistant.components.recorder.util import session_scope, execute
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DOMAIN = "history"
CONF_ORDER = "use_include_order"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: recorder.FILTER_SCHEMA.extend(
{vol.Optional(CONF_ORDER, default=False): cv.boolean}
)
},
extra=vol.ALLOW_EXTRA,
)
SIGNIFICANT_DOMAINS = ("thermostat", "climate", "water_heater")
IGNORE_DOMAINS = ("zone", "scene")
def get_significant_states(
hass,
start_time,
end_time=None,
entity_ids=None,
filters=None,
include_start_time_state=True,
):
"""
Return states changes during UTC period start_time - end_time.
Significant states are all states where there is a state change,
as well as all states from certain domains (for instance
thermostat so that we get current temperature in our graphs).
"""
timer_start = time.perf_counter()
from homeassistant.components.recorder.models import States
with session_scope(hass=hass) as session:
query = session.query(States).filter(
(
States.domain.in_(SIGNIFICANT_DOMAINS)
| (States.last_changed == States.last_updated)
)
& (States.last_updated > start_time)
)
if filters:
query = filters.apply(query, entity_ids)
if end_time is not None:
query = query.filter(States.last_updated < end_time)
query = query.order_by(States.last_updated)
states = (
state
for state in execute(query)
if (_is_significant(state) and not state.attributes.get(ATTR_HIDDEN, False))
)
if _LOGGER.isEnabledFor(logging.DEBUG):
elapsed = time.perf_counter() - timer_start
_LOGGER.debug("get_significant_states took %fs", elapsed)
return states_to_json(
hass, states, start_time, entity_ids, filters, include_start_time_state
)
def state_changes_during_period(hass, start_time, end_time=None, entity_id=None):
"""Return states changes during UTC period start_time - end_time."""
from homeassistant.components.recorder.models import States
with session_scope(hass=hass) as session:
query = session.query(States).filter(
(States.last_changed == States.last_updated)
& (States.last_updated > start_time)
)
if end_time is not None:
query = query.filter(States.last_updated < end_time)
if entity_id is not None:
query = query.filter_by(entity_id=entity_id.lower())
entity_ids = [entity_id] if entity_id is not None else None
states = execute(query.order_by(States.last_updated))
return states_to_json(hass, states, start_time, entity_ids)
def get_last_state_changes(hass, number_of_states, entity_id):
"""Return the last number_of_states."""
from homeassistant.components.recorder.models import States
start_time = dt_util.utcnow()
with session_scope(hass=hass) as session:
query = session.query(States).filter(
(States.last_changed == States.last_updated)
)
if entity_id is not None:
query = query.filter_by(entity_id=entity_id.lower())
entity_ids = [entity_id] if entity_id is not None else None
states = execute(
query.order_by(States.last_updated.desc()).limit(number_of_states)
)
return states_to_json(
hass, reversed(states), start_time, entity_ids, include_start_time_state=False
)
def get_states(hass, utc_point_in_time, entity_ids=None, run=None, filters=None):
"""Return the states at a specific point in time."""
from homeassistant.components.recorder.models import States
if run is None:
run = recorder.run_information(hass, utc_point_in_time)
# History did not run before utc_point_in_time
if run is None:
return []
from sqlalchemy import and_, func
with session_scope(hass=hass) as session:
if entity_ids and len(entity_ids) == 1:
# Use an entirely different (and extremely fast) query if we only
# have a single entity id
most_recent_state_ids = (
session.query(States.state_id.label("max_state_id"))
.filter(
(States.last_updated < utc_point_in_time)
& (States.entity_id.in_(entity_ids))
)
.order_by(States.last_updated.desc())
)
most_recent_state_ids = most_recent_state_ids.limit(1)
else:
# We have more than one entity to look at (most commonly we want
# all entities,) so we need to do a search on all states since the
# last recorder run started.
most_recent_states_by_date = session.query(
States.entity_id.label("max_entity_id"),
func.max(States.last_updated).label("max_last_updated"),
).filter(
(States.last_updated >= run.start)
& (States.last_updated < utc_point_in_time)
)
if entity_ids:
most_recent_states_by_date.filter(States.entity_id.in_(entity_ids))
most_recent_states_by_date = most_recent_states_by_date.group_by(
States.entity_id
)
most_recent_states_by_date = most_recent_states_by_date.subquery()
most_recent_state_ids = session.query(
func.max(States.state_id).label("max_state_id")
).join(
most_recent_states_by_date,
and_(
States.entity_id == most_recent_states_by_date.c.max_entity_id,
States.last_updated
== most_recent_states_by_date.c.max_last_updated,
),
)
most_recent_state_ids = most_recent_state_ids.group_by(States.entity_id)
most_recent_state_ids = most_recent_state_ids.subquery()
query = (
session.query(States)
.join(
most_recent_state_ids,
States.state_id == most_recent_state_ids.c.max_state_id,
)
.filter((~States.domain.in_(IGNORE_DOMAINS)))
)
if filters:
query = filters.apply(query, entity_ids)
return [
state
for state in execute(query)
if not state.attributes.get(ATTR_HIDDEN, False)
]
def states_to_json(
hass, states, start_time, entity_ids, filters=None, include_start_time_state=True
):
"""Convert SQL results into JSON friendly data structure.
This takes our state list and turns it into a JSON friendly data
structure {'entity_id': [list of states], 'entity_id2': [list of states]}
We also need to go back and create a synthetic zero data point for
each list of states, otherwise our graphs won't start on the Y
axis correctly.
"""
result = defaultdict(list)
# Set all entity IDs to empty lists in result set to maintain the order
if entity_ids is not None:
for ent_id in entity_ids:
result[ent_id] = []
# Get the states at the start time
timer_start = time.perf_counter()
if include_start_time_state:
for state in get_states(hass, start_time, entity_ids, filters=filters):
state.last_changed = start_time
state.last_updated = start_time
result[state.entity_id].append(state)
if _LOGGER.isEnabledFor(logging.DEBUG):
elapsed = time.perf_counter() - timer_start
_LOGGER.debug("getting %d first datapoints took %fs", len(result), elapsed)
# Append all changes to it
for ent_id, group in groupby(states, lambda state: state.entity_id):
result[ent_id].extend(group)
# Filter out the empty lists if some states had 0 results.
return {key: val for key, val in result.items() if val}
def get_state(hass, utc_point_in_time, entity_id, run=None):
"""Return a state at a specific point in time."""
states = list(get_states(hass, utc_point_in_time, (entity_id,), run))
return states[0] if states else None
async def async_setup(hass, config):
"""Set up the history hooks."""
filters = Filters()
conf = config.get(DOMAIN, {})
exclude = conf.get(CONF_EXCLUDE)
if exclude:
filters.excluded_entities = exclude.get(CONF_ENTITIES, [])
filters.excluded_domains = exclude.get(CONF_DOMAINS, [])
include = conf.get(CONF_INCLUDE)
if include:
filters.included_entities = include.get(CONF_ENTITIES, [])
filters.included_domains = include.get(CONF_DOMAINS, [])
use_include_order = conf.get(CONF_ORDER)
hass.http.register_view(HistoryPeriodView(filters, use_include_order))
hass.components.frontend.async_register_built_in_panel(
"history", "history", "hass:poll-box"
)
return True
class HistoryPeriodView(HomeAssistantView):
"""Handle history period requests."""
url = "/api/history/period"
name = "api:history:view-period"
extra_urls = ["/api/history/period/{datetime}"]
def __init__(self, filters, use_include_order):
"""Initialize the history period view."""
self.filters = filters
self.use_include_order = use_include_order
async def get(self, request, datetime=None):
"""Return history over a period of time."""
timer_start = time.perf_counter()
if datetime:
datetime = dt_util.parse_datetime(datetime)
if datetime is None:
return self.json_message("Invalid datetime", HTTP_BAD_REQUEST)
now = dt_util.utcnow()
one_day = timedelta(days=1)
if datetime:
start_time = dt_util.as_utc(datetime)
else:
start_time = now - one_day
if start_time > now:
return self.json([])
end_time = request.query.get("end_time")
if end_time:
end_time = dt_util.parse_datetime(end_time)
if end_time:
end_time = dt_util.as_utc(end_time)
else:
return self.json_message("Invalid end_time", HTTP_BAD_REQUEST)
else:
end_time = start_time + one_day
entity_ids = request.query.get("filter_entity_id")
if entity_ids:
entity_ids = entity_ids.lower().split(",")
include_start_time_state = "skip_initial_state" not in request.query
hass = request.app["hass"]
result = await hass.async_add_job(
get_significant_states,
hass,
start_time,
end_time,
entity_ids,
self.filters,
include_start_time_state,
)
result = list(result.values())
if _LOGGER.isEnabledFor(logging.DEBUG):
elapsed = time.perf_counter() - timer_start
_LOGGER.debug("Extracted %d states in %fs", sum(map(len, result)), elapsed)
# Optionally reorder the result to respect the ordering given
# by any entities explicitly included in the configuration.
if self.use_include_order:
sorted_result = []
for order_entity in self.filters.included_entities:
for state_list in result:
if state_list[0].entity_id == order_entity:
sorted_result.append(state_list)
result.remove(state_list)
break
sorted_result.extend(result)
result = sorted_result
return await hass.async_add_job(self.json, result)
class Filters:
"""Container for the configured include and exclude filters."""
def __init__(self):
"""Initialise the include and exclude filters."""
self.excluded_entities = []
self.excluded_domains = []
self.included_entities = []
self.included_domains = []
def apply(self, query, entity_ids=None):
"""Apply the include/exclude filter on domains and entities on query.
Following rules apply:
* only the include section is configured - just query the specified
entities or domains.
* only the exclude section is configured - filter the specified
entities and domains from all the entities in the system.
* if include and exclude is defined - select the entities specified in
the include and filter out the ones from the exclude list.
"""
from homeassistant.components.recorder.models import States
# specific entities requested - do not in/exclude anything
if entity_ids is not None:
return query.filter(States.entity_id.in_(entity_ids))
query = query.filter(~States.domain.in_(IGNORE_DOMAINS))
filter_query = None
# filter if only excluded domain is configured
if self.excluded_domains and not self.included_domains:
filter_query = ~States.domain.in_(self.excluded_domains)
if self.included_entities:
filter_query &= States.entity_id.in_(self.included_entities)
# filter if only included domain is configured
elif not self.excluded_domains and self.included_domains:
filter_query = States.domain.in_(self.included_domains)
if self.included_entities:
filter_query |= States.entity_id.in_(self.included_entities)
# filter if included and excluded domain is configured
elif self.excluded_domains and self.included_domains:
filter_query = ~States.domain.in_(self.excluded_domains)
if self.included_entities:
filter_query &= States.domain.in_(
self.included_domains
) | States.entity_id.in_(self.included_entities)
else:
filter_query &= States.domain.in_(
self.included_domains
) & ~States.domain.in_(self.excluded_domains)
# no domain filter just included entities
elif (
not self.excluded_domains
and not self.included_domains
and self.included_entities
):
filter_query = States.entity_id.in_(self.included_entities)
if filter_query is not None:
query = query.filter(filter_query)
# finally apply excluded entities filter if configured
if self.excluded_entities:
query = query.filter(~States.entity_id.in_(self.excluded_entities))
return query
def _is_significant(state):
"""Test if state is significant for history charts.
Will only test for things that are not filtered out in SQL.
"""
# scripts that are not cancellable will never change state
return state.domain != "script" or state.attributes.get(script.ATTR_CAN_CANCEL)
| apache-2.0 | -3,292,990,274,373,517,300 | 34.419134 | 88 | 0.614509 | false |
mediafactory/tryton_core_daemon | trytond/ir/sequence.py | 1 | 12626 | #This file is part of Tryton. The COPYRIGHT file at the top level of
#this repository contains the full copyright notices and license terms.
from string import Template
import time
from ..model import ModelView, ModelSQL, fields
from ..tools import datetime_strftime
from ..pyson import Eval, And
from ..transaction import Transaction
from ..pool import Pool
from ..config import CONFIG
from ..backend import TableHandler
sql_sequence = CONFIG.options['db_type'] == 'postgresql'
class SequenceType(ModelSQL, ModelView):
"Sequence type"
_name = 'ir.sequence.type'
_description = __doc__
name = fields.Char('Sequence Name', required=True, translate=True)
code = fields.Char('Sequence Code', required=True)
SequenceType()
class Sequence(ModelSQL, ModelView):
"Sequence"
_name = 'ir.sequence'
_description = __doc__
_strict = False
name = fields.Char('Sequence Name', required=True, translate=True)
code = fields.Selection('code_get', 'Sequence Code', required=True,
states={
'readonly': Eval('context', {}).contains('code'),
})
active = fields.Boolean('Active')
prefix = fields.Char('Prefix')
suffix = fields.Char('Suffix')
type = fields.Selection([
('incremental', 'Incremental'),
('decimal timestamp', 'Decimal Timestamp'),
('hexadecimal timestamp', 'Hexadecimal Timestamp'),
], 'Type')
number_next_internal = fields.Integer('Next Number',
states={
'invisible': ~Eval('type').in_(['incremental']),
'required': And(Eval('type').in_(['incremental']),
not sql_sequence),
}, depends=['type'])
number_next = fields.Function(number_next_internal, 'get_number_next',
'set_number_next')
number_increment = fields.Integer('Increment Number',
states={
'invisible': ~Eval('type').in_(['incremental']),
'required': Eval('type').in_(['incremental']),
}, depends=['type'])
padding = fields.Integer('Number padding',
states={
'invisible': ~Eval('type').in_(['incremental']),
'required': Eval('type').in_(['incremental']),
}, depends=['type'])
timestamp_rounding = fields.Float('Timestamp Rounding', required=True,
states={
'invisible': ~Eval('type').in_(
['decimal timestamp', 'hexadecimal timestamp']),
}, depends=['type'])
timestamp_offset = fields.Float('Timestamp Offset', required=True,
states={
'invisible': ~Eval('type').in_(
['decimal timestamp', 'hexadecimal timestamp']),
}, depends=['type'])
last_timestamp = fields.Integer('Last Timestamp',
states={
'invisible': ~Eval('type').in_(
['decimal timestamp', 'hexadecimal timestamp']),
'required': Eval('type').in_(
['decimal timestamp', 'hexadecimal timestamp']),
}, depends=['type'])
def __init__(self):
super(Sequence, self).__init__()
self._constraints += [
('check_prefix_suffix', 'invalid_prefix_suffix'),
('check_last_timestamp', 'future_last_timestamp'),
]
self._sql_constraints += [
('check_timestamp_rounding', 'CHECK(timestamp_rounding > 0)',
'Timestamp rounding should be greater than 0'),
]
self._error_messages.update({
'missing': 'Missing sequence!',
'invalid_prefix_suffix': 'Invalid prefix/suffix!',
'future_last_timestamp': 'Last Timestamp could not be in future!',
})
def init(self, module_name):
cursor = Transaction().cursor
table = TableHandler(cursor, self, module_name)
# Migration from 2.0 rename number_next into number_next_internal
table.column_rename('number_next', 'number_next_internal')
super(Sequence, self).init(module_name)
# Migration from 2.0 create sql_sequence
if sql_sequence and not self._strict:
sequence_ids = self.search([])
for sequence in self.browse(sequence_ids):
if sequence.type != 'incremental':
continue
if not TableHandler.sequence_exist(cursor,
self._sql_sequence_name(sequence)):
self.create_sql_sequence(sequence,
sequence.number_next_internal)
def default_active(self):
return True
def default_type(self):
return 'incremental'
def default_number_increment(self):
return 1
def default_number_next(self):
return 1
def default_padding(self):
return 0
def default_timestamp_rounding(self):
return 1.0
def default_timestamp_offset(self):
return 946681200.0 # Offset for 2000-01-01
def default_last_timestamp(self):
return 0.0
def default_code(self):
return Transaction().context.get('code')
def get_number_next(self, ids, name):
cursor = Transaction().cursor
result = {}
for sequence in self.browse(ids):
sql_name = self._sql_sequence_name(sequence)
if sql_sequence and not self._strict:
cursor.execute('SELECT '
'CASE WHEN NOT is_called THEN last_value '
'ELSE last_value + increment_by '
'END FROM "%s"' % sql_name)
value, = cursor.fetchone()
else:
value = sequence.number_next_internal
result[sequence.id] = value
return result
def set_number_next(self, ids, name, value):
super(Sequence, self).write(ids, {
'number_next_internal': value,
})
def create(self, values):
sequence_id = super(Sequence, self).create(values)
if sql_sequence and not self._strict:
sequence = self.browse(sequence_id)
self.update_sql_sequence(sequence, values.get('number_next',
self.default_number_next()))
return sequence_id
def write(self, ids, values):
result = super(Sequence, self).write(ids, values)
if sql_sequence and not self._strict:
ids = [ids] if isinstance(ids, (int, long)) else ids
sequences = self.browse(ids)
for sequence in sequences:
self.update_sql_sequence(sequence, values.get('number_next'))
return result
def delete(self, ids):
if sql_sequence and not self._strict:
ids = [ids] if isinstance(ids, (int, long)) else ids
sequences = self.browse(ids)
for sequence in sequences:
self.delete_sql_sequence(sequence)
return super(Sequence, self).delete(ids)
def code_get(self):
pool = Pool()
sequence_type_obj = pool.get('ir.sequence.type')
sequence_type_ids = sequence_type_obj.search([])
sequence_types = sequence_type_obj.browse(sequence_type_ids)
return [(x.code, x.name) for x in sequence_types]
def check_prefix_suffix(self, ids):
"Check prefix and suffix"
for sequence in self.browse(ids):
try:
self._process(sequence.prefix)
self._process(sequence.suffix)
except Exception:
return False
return True
def check_last_timestamp(self, ids):
"Check last_timestamp"
for sequence in self.browse(ids):
next_timestamp = self._timestamp(sequence)
if sequence.last_timestamp > next_timestamp:
return False
return True
def _sql_sequence_name(self, sequence):
'Return SQL sequence name'
return '%s_%s' % (self._table, sequence.id)
def create_sql_sequence(self, sequence, number_next=None):
'Create the SQL sequence'
cursor = Transaction().cursor
if sequence.type != 'incremental':
return
if number_next is None:
number_next = sequence.number_next
cursor.execute('CREATE SEQUENCE "' + self._sql_sequence_name(sequence)
+ '" INCREMENT BY %s START WITH %s', (sequence.number_increment,
number_next))
def update_sql_sequence(self, sequence, number_next=None):
'Update the SQL sequence'
cursor = Transaction().cursor
exist = TableHandler.sequence_exist(cursor,
self._sql_sequence_name(sequence))
if sequence.type != 'incremental':
if exist:
self.delete_sql_sequence(sequence)
return
if not exist:
self.create_sql_sequence(sequence, number_next)
return
if number_next is None:
number_next = sequence.number_next
cursor.execute('ALTER SEQUENCE "' + self._sql_sequence_name(sequence)
+ '" INCREMENT BY %s RESTART WITH %s', (sequence.number_increment,
number_next))
def delete_sql_sequence(self, sequence):
'Delete the SQL sequence'
cursor = Transaction().cursor
if sequence.type != 'incremental':
return
cursor.execute('DROP SEQUENCE "%s"'
% self._sql_sequence_name(sequence))
def _process(self, string, date=None):
pool = Pool()
date_obj = pool.get('ir.date')
if not date:
date = date_obj.today()
year = datetime_strftime(date, '%Y')
month = datetime_strftime(date, '%m')
day = datetime_strftime(date, '%d')
return Template(string or '').substitute(
year=year,
month=month,
day=day,
)
def _timestamp(self, sequence):
return int((time.time() - sequence.timestamp_offset)
/ sequence.timestamp_rounding)
def _get_sequence(self, sequence):
if sequence.type == 'incremental':
if sql_sequence and not self._strict:
cursor = Transaction().cursor
cursor.execute('SELECT nextval(\'"%s"\')'
% self._sql_sequence_name(sequence))
number_next, = cursor.fetchone()
else:
#Pre-fetch number_next
number_next = sequence.number_next_internal
with Transaction().set_user(0):
self.write(sequence.id, {
'number_next_internal': (number_next
+ sequence.number_increment),
})
return '%%0%sd' % sequence.padding % number_next
elif sequence.type in ('decimal timestamp', 'hexadecimal timestamp'):
timestamp = sequence.last_timestamp
while timestamp == sequence.last_timestamp:
timestamp = self._timestamp(sequence)
with Transaction().set_user(0):
self.write(sequence.id, {
'last_timestamp': timestamp,
})
if sequence.type == 'decimal timestamp':
return '%d' % timestamp
else:
return hex(timestamp)[2:].upper()
return ''
def get_id(self, domain):
'''
Return sequence value for the domain
:param domain: a domain or a sequence id
:return: the sequence value
'''
if isinstance(domain, (int, long)):
domain = [('id', '=', domain)]
# bypass rules on sequences
with Transaction().set_context(user=False):
with Transaction().set_user(0):
sequence_ids = self.search(domain, limit=1)
date = Transaction().context.get('date')
if sequence_ids:
with Transaction().set_user(0):
sequence = self.browse(sequence_ids[0])
return '%s%s%s' % (
self._process(sequence.prefix, date=date),
self._get_sequence(sequence),
self._process(sequence.suffix, date=date),
)
self.raise_user_error('missing')
def get(self, code):
return self.get_id([('code', '=', code)])
Sequence()
class SequenceStrict(Sequence):
"Sequence Strict"
_name = 'ir.sequence.strict'
_description = __doc__
_strict = True
def get_id(self, clause):
Transaction().cursor.lock(self._table)
return super(SequenceStrict, self).get_id(clause)
SequenceStrict()
| gpl-3.0 | -67,326,091,095,597,624 | 35.386167 | 78 | 0.562649 | false |
jplusplus/statscraper | tests/test-scb.py | 1 | 1343 | """Test SCB/PXWeb scraper."""
from statscraper.scrapers import SCB
from statscraper.exceptions import InvalidData
import pytest
def test_get_data():
"""We should be able to access a dataset by path."""
scraper = SCB()
scraper.move_to("HE").move_to("HE0110").move_to("HE0110F").move_to("Tab1DispInkN")
data = scraper.fetch({
"ContentsCode": ("item", "000002VY"),
"InkomstTyp": ("item", "FastInkIn"),
}, by="municipality")
assert "Region" in data.dataset.dimensions
assert "InkomstTyp" in data.dataset.dimensions
df = data.pandas
assert "value" in df.columns
assert "Region" in df.columns
assert "InkomstTyp" in df.columns
def test_values():
"""Make sure values are numerical."""
scraper = SCB()
scraper.move_to("HE").move_to("HE0110").move_to("HE0110F").move_to("Tab1DispInkN")
data = scraper.fetch({
"ContentsCode": ("item", "000002VY"),
"InkomstTyp": ("item", "FastInkIn"),
}, by="municipality")
float(data[0].value.isnumeric())
def test_invalid_query():
"""We should raise an error on invalid queries."""
scraper = SCB()
scraper.move_to("HE").move_to("HE0110").move_to("HE0110F").move_to("Tab1DispInkN")
with pytest.raises(InvalidData):
scraper.fetch({
"foo": ("bar", "buzz"),
}, by="municipality")
| mit | 1,736,330,334,571,077,600 | 30.232558 | 86 | 0.634401 | false |
boonedox/AutobahnPython | autobahn/autobahn/__init__.py | 1 | 1165 | ###############################################################################
##
## Copyright 2011-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from _version import __version__
version = __version__ # backward compat.
import util
import useragent
import flashpolicy
import httpstatus
import utf8validator
import xormasker
import compress
import websocket
## disable import, since it leads to reactor import
## https://twistedmatrix.com/trac/ticket/6849
#import resource
import prefixmap
import wamp
| apache-2.0 | 6,950,222,436,859,204,000 | 30.361111 | 79 | 0.627468 | false |
miptliot/edx-platform | openedx/features/course_experience/plugins.py | 2 | 2362 | """
Platform plugins to support the course experience.
This includes any locally defined CourseTools.
"""
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from course_tools import CourseTool
from courseware.courses import get_course_by_id
from views.course_reviews import CourseReviewsModuleFragmentView
from views.course_updates import CourseUpdatesFragmentView
from . import SHOW_REVIEWS_TOOL_FLAG, UNIFIED_COURSE_TAB_FLAG
class CourseUpdatesTool(CourseTool):
"""
The course updates tool.
"""
@classmethod
def title(cls):
"""
Returns the title of this tool.
"""
return _('Updates')
@classmethod
def icon_classes(cls):
"""
Returns icon classes needed to represent this tool.
"""
return 'fa fa-newspaper-o'
@classmethod
def is_enabled(cls, request, course_key):
"""
Returns True if this tool is enabled for the specified course key.
"""
course = get_course_by_id(course_key)
has_updates = CourseUpdatesFragmentView.has_updates(request, course)
return UNIFIED_COURSE_TAB_FLAG.is_enabled(course_key) and has_updates
@classmethod
def url(cls, course_key):
"""
Returns the URL for this tool for the specified course key.
"""
return reverse('openedx.course_experience.course_updates', args=[course_key])
class CourseReviewsTool(CourseTool):
"""
The course reviews tool.
"""
@classmethod
def title(cls):
"""
Returns the title of this tool.
"""
return _('Reviews')
@classmethod
def icon_classes(cls):
"""
Returns icon classes needed to represent this tool.
"""
return 'fa fa-star'
@classmethod
def is_enabled(cls, request, course_key):
"""
Returns True if this tool is enabled for the specified course key.
"""
reviews_configured = CourseReviewsModuleFragmentView.is_configured()
return SHOW_REVIEWS_TOOL_FLAG.is_enabled(course_key) and reviews_configured
@classmethod
def url(cls, course_key):
"""
Returns the URL for this tool for the specified course key.
"""
return reverse('openedx.course_experience.course_reviews', args=[course_key])
| agpl-3.0 | -1,128,624,673,592,773,600 | 27.457831 | 85 | 0.648603 | false |
caseywstark/colab | colab/apps/threadedcomments/urls.py | 1 | 2392 | from django.conf.urls.defaults import patterns, url
from threadedcomments.models import FreeThreadedComment, ThreadedComment
from threadedcomments import views
from voting.views import vote_on_object
free = {'model' : FreeThreadedComment}
urlpatterns = patterns('',
### Comments ###
url(r'^comment/(?P<content_type>\d+)/(?P<object_id>\d+)/$', views.comment, name="tc_comment"),
url(r'^comment/(?P<content_type>\d+)/(?P<object_id>\d+)/(?P<parent_id>\d+)/$', views.comment, name="tc_comment_parent"),
url(r'^comment/(?P<object_id>\d+)/delete/$', views.comment_delete, name="tc_comment_delete"),
url(r'^comment/(?P<edit_id>\d+)/edit/$', views.comment, name="tc_comment_edit"),
### Comments (AJAX) ###
url(r'^comment/(?P<content_type>\d+)/(?P<object_id>\d+)/(?P<ajax>json|xml)/$', views.comment, name="tc_comment_ajax"),
url(r'^comment/(?P<content_type>\d+)/(?P<object_id>\d+)/(?P<parent_id>\d+)/(?P<ajax>json|xml)/$', views.comment, name="tc_comment_parent_ajax"),
url(r'^comment/(?P<edit_id>\d+)/edit/(?P<ajax>json|xml)/$', views.comment, name="tc_comment_edit_ajax"),
### Free Comments ###
url(r'^freecomment/(?P<content_type>\d+)/(?P<object_id>\d+)/$', views.free_comment, name="tc_free_comment"),
url(r'^freecomment/(?P<content_type>\d+)/(?P<object_id>\d+)/(?P<parent_id>\d+)/$', views.free_comment, name="tc_free_comment_parent"),
url(r'^freecomment/(?P<object_id>\d+)/delete/$', views.comment_delete, free, name="tc_free_comment_delete"),
url(r'^freecomment/(?P<edit_id>\d+)/edit/$', views.free_comment, name="tc_free_comment_edit"),
### Free Comments (AJAX) ###
url(r'^freecomment/(?P<content_type>\d+)/(?P<object_id>\d+)/(?P<ajax>json|xml)/$', views.free_comment, name="tc_free_comment_ajax"),
url(r'^freecomment/(?P<content_type>\d+)/(?P<object_id>\d+)/(?P<parent_id>\d+)/(?P<ajax>json|xml)/$', views.free_comment, name="tc_free_comment_parent_ajax"),
url(r'^freecomment/(?P<edit_id>\d+)/edit/(?P<ajax>json|xml)/$', views.free_comment, name="tc_free_comment_edit_ajax"),
### Custom shit ###
# comment voting
url(r'^voting/(?P<object_id>\d+)/(?P<direction>up|down|clear)vote/$',
vote_on_object, dict(model=ThreadedComment, template_object_name='comment',
allow_xmlhttprequest=True, confirm_vote=False), name="comment_vote"),
)
| mit | -4,319,326,364,418,396,000 | 62.648649 | 162 | 0.628344 | false |
ktbs/ktbs | lib/ktbs/methods/abstract.py | 1 | 8730 | # This file is part of KTBS <http://liris.cnrs.fr/sbt-dev/ktbs>
# Copyright (C) 2011-2012 Pierre-Antoine Champin <[email protected]> /
# Universite de Lyon <http://www.universite-lyon.fr>
#
# KTBS is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# KTBS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with KTBS. If not, see <http://www.gnu.org/licenses/>.
"""I define an abstract implementation for mono-source transformation methods.
Method based on this class will always exactly one source.
It handles
TODO: document how to use this
"""
import traceback
from json import dumps as json_dumps, loads as json_loads
import logging
from rdflib import Literal, URIRef
from rdfrest.util import Diagnosis
from .interface import IMethod
from ..engine.resource import METADATA
from ..namespace import KTBS
LOG = logging.getLogger(__name__)
NOT_MON = 0
LOGIC_MON = 1
PSEUDO_MON = 2
STRICT_MON = 3
class AbstractMonosourceMethod(IMethod):
"""An abstract implementation of a mono-source transformation method.
"""
# the following attributes must be overridden by subclasses
uri = None # a rdflib.URIRef identifying this transformation method
# the following attributes may be overridden by subclasses
parameter_types = {} # a dict enumerating all the possible parameters and their expected type
required_parameters = () # an iterable of parameter names
target_model = None # override (with URIRef) if you want to force the compute trace's origin
target_origin = None # override (with URIRef) if you want to force the compute trace's origin
# the following methods may be overridden by subclasses
def compute_model(self, computed_trace, params, diag):
"""Compute the model of the computed trace.
The default implementation works as follows:
* if there is a parameter 'model', it will be returned;
* else, if the 'target_model' attribute of the method is not None, it will be returned;
* else, the model of the source trace will be returned.
TODO document parameters
"""
model = params.get("model")
if model is not None:
return model
elif self.target_model is not None:
return self.target_model
else:
return computed_trace.source_traces[0].model_uri
def compute_origin(self, computed_trace, params, diag):
"""Compute the origin of the computed trace.
The default implementation works as follows:
* if there is a a parameter 'origin', it will be returned;
* else, if the 'target_origin' attribute of the method is not None, it will be returned;
* else, the origin of the source trace will be returned.
TODO document parameters
"""
origin = params.get("origin")
if origin is not None:
return Literal(origin)
elif self.target_origin is not None:
return self.target_origin
else:
source = computed_trace.source_traces[0]
return source.state.value(source.uri, KTBS.hasOrigin)
def init_state(self, computed_trace, cstate, params, diag):
"""Return the initial structure of the computation state.
The computation state is a JSON-compatible dict,
that will hold information across several computation steps.
TODO document parameters
"""
pass
# the following methods must be overridden by subclasses
def do_compute_obsels(self, computed_trace, cstate, monotonicity, diag):
"""Computes the obsels of the computed trace.
TODO document parameters
"""
raise NotImplemented
# the following methods should not be changed by subclasses,
# the constitute the common implementation of IMethod by all subclasses
def compute_trace_description(self, computed_trace):
"""I implement :meth:`.interface.IMethod.compute_trace_description`.
"""
diag = Diagnosis("compute_trace_description for <{}>".format(self.uri))
cstate = {
'errors': None,
'log_mon_tag': None,
'pse_mon_tag': None,
'str_mon_tag': None,
'custom': {},
}
params = self._prepare_params(computed_trace, diag)
if len(computed_trace.source_traces) != 1:
diag.append("Method <{}> expects exactly one source".format(self.uri))
params = None
if params is not None:
model = self.compute_model(computed_trace, params, diag)
origin = self.compute_origin(computed_trace, params, diag)
with computed_trace.edit(_trust=True) as editable:
editable.add((computed_trace.uri, KTBS.hasModel, model))
editable.add((computed_trace.uri, KTBS.hasOrigin, origin))
self.init_state(computed_trace, params, cstate['custom'], diag)
if not diag:
cstate["errors"] = list(diag)
computed_trace.metadata.set((computed_trace.uri,
METADATA.computation_state,
Literal(json_dumps(cstate))
))
return diag
def compute_obsels(self, computed_trace, from_scratch=False):
"""I implement :meth:`.interface.IMethod.compute_obsels`.
"""
diag = Diagnosis("compute_obsels for <{}>".format(self.uri))
cstate = json_loads(
computed_trace.metadata.value(computed_trace.uri,
METADATA.computation_state))
if from_scratch:
cstate["log_mon_tag"] = None
cstate["pse_mon_tag"] = None
cstate["str_mon_tag"] = None
errors = cstate.get("errors")
if errors:
for i in errors:
diag.append(i)
return diag
source_obsels = computed_trace.source_traces[0].obsel_collection
from logging import getLogger; LOG = getLogger()
if cstate["str_mon_tag"] == source_obsels.str_mon_tag:
monotonicity = STRICT_MON
elif cstate["pse_mon_tag"] == source_obsels.pse_mon_tag:
monotonicity = PSEUDO_MON
elif cstate["log_mon_tag"] == source_obsels.log_mon_tag:
monotonicity = LOGIC_MON
else:
monotonicity = NOT_MON
self.do_compute_obsels(computed_trace, cstate['custom'], monotonicity, diag)
cstate["log_mon_tag"] = source_obsels.log_mon_tag
cstate["pse_mon_tag"] = source_obsels.pse_mon_tag
cstate["str_mon_tag"] = source_obsels.str_mon_tag
computed_trace.metadata.set((computed_trace.uri,
METADATA.computation_state,
Literal(json_dumps(cstate))
))
return diag
def _prepare_params(self, computed_trace, diag):
"""I check and prepare the parameters passed to the method.
I return a dict of useful parameters converted to the expected datatype.
If this can not be done, I return None.
I also populate `diag` with error/warning messages.
"""
params = computed_trace.parameters_as_dict
critical = False
for key, val in params.items():
datatype = self.parameter_types.get(key)
if datatype is None:
diag.append("WARN: Parameter %s is not used by <%s>"
% (key, self.uri))
else:
try:
params[key] = datatype(val)
except Exception as e:
LOG.info(traceback.format_exc())
diag.append("Parameter %s has illegal value: %s.\n"
" Reason: %s"
% (key, val, e.message))
critical = True
for key in self.required_parameters:
if key not in params:
diag.append("Parameter '%s' is required" % key)
critical = True
if critical:
return None
else:
return params
| lgpl-3.0 | -3,967,442,504,605,612,500 | 36.956522 | 97 | 0.606186 | false |
onyedikilo/tacotron | train.py | 1 | 3723 | # -*- coding: utf-8 -*-
#/usr/bin/python2
'''
By kyubyong park. [email protected].
https://www.github.com/kyubyong/tacotron
'''
from __future__ import print_function
import tensorflow as tf
import numpy as np
import librosa
import os
from tqdm import tqdm
from hyperparams import Hyperparams as hp
from prepro import *
from networks import encode, decode1, decode2
from modules import *
from data_load import get_batch
from utils import shift_by_one
from prepro import load_vocab
class Graph:
def __init__(self, is_training=True):
self.graph = tf.Graph()
with self.graph.as_default():
if is_training:
self.x, self.y, self.z, self.num_batch = get_batch()
self.decoder_inputs = shift_by_one(self.y)
else: # Evaluation
self.x = tf.placeholder(tf.int32, shape=(None, None))
self.decoder_inputs = tf.placeholder(tf.float32, shape=(None, None, hp.n_mels*hp.r))
# Encoder
self.memory = encode(self.x, is_training=is_training) # (N, T, E)
# Decoder
self.outputs1 = decode1(self.decoder_inputs, self.memory, is_training=is_training) # (N, T', hp.n_mels*hp.r)
self.outputs2 = decode2(self.outputs1, is_training=is_training) # (N, T', (1+hp.n_fft//2)*hp.r)
if is_training:
# Loss
if hp.loss_type=="l1": # L1 loss
self.loss1 = tf.abs(self.outputs1 - self.y)
self.loss2 = tf.abs(self.outputs2 - self.z)
else: # L2 loss
self.loss1 = tf.squared_difference(self.outputs1, self.y)
self.loss2 = tf.squared_difference(self.outputs2, self.z)
# Target masking
if hp.target_zeros_masking:
self.loss1 *= tf.to_float(tf.not_equal(self.y, 0.))
self.loss2 *= tf.to_float(tf.not_equal(self.z, 0.))
self.mean_loss1 = tf.reduce_mean(self.loss1)
self.mean_loss2 = tf.reduce_mean(self.loss2)
self.mean_loss = self.mean_loss1 + self.mean_loss2
# Training Scheme
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self.optimizer = tf.train.AdamOptimizer(learning_rate=hp.lr)
self.train_op = self.optimizer.minimize(self.mean_loss, global_step=self.global_step)
# Summmary
tf.summary.scalar('mean_loss1', self.mean_loss1)
tf.summary.scalar('mean_loss2', self.mean_loss2)
tf.summary.scalar('mean_loss', self.mean_loss)
self.merged = tf.summary.merge_all()
def main():
g = Graph(); print("Training Graph loaded")
with g.graph.as_default():
# Load vocabulary
char2idx, idx2char = load_vocab()
# Training
sv = tf.train.Supervisor(logdir=hp.logdir,
save_model_secs=0)
with sv.managed_session() as sess:
for epoch in range(1, hp.num_epochs+1):
if sv.should_stop(): break
for step in tqdm(range(g.num_batch), total=g.num_batch, ncols=70, leave=False, unit='b'):
sess.run(g.train_op)
# Write checkpoint files at every epoch
gs = sess.run(g.global_step)
sv.saver.save(sess, hp.logdir + '/model_epoch_%02d_gs_%d' % (epoch, gs))
if __name__ == '__main__':
main()
print("Done")
| apache-2.0 | -1,837,786,440,896,341,800 | 38.606383 | 120 | 0.53747 | false |
tensorflow/agents | tf_agents/experimental/distributed/examples/sac/sac_collect.py | 1 | 5592 | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Sample collection Job using a variable container for policy updates.
See README for launch instructions.
"""
import os
from typing import Callable, Text
from absl import app
from absl import flags
from absl import logging
import gin
import reverb
import tensorflow.compat.v2 as tf
from tf_agents.environments import py_environment
from tf_agents.environments import suite_mujoco
from tf_agents.experimental.distributed import reverb_variable_container
from tf_agents.metrics import py_metrics
from tf_agents.policies import py_tf_eager_policy
from tf_agents.policies import random_py_policy
from tf_agents.replay_buffers import reverb_replay_buffer
from tf_agents.replay_buffers import reverb_utils
from tf_agents.system import system_multiprocessing as multiprocessing
from tf_agents.train import actor
from tf_agents.train import learner
from tf_agents.train.utils import train_utils
flags.DEFINE_string('root_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),
'Root directory for writing logs/summaries/checkpoints.')
flags.DEFINE_string('replay_buffer_server_address', None,
'Replay buffer server address.')
flags.DEFINE_string('variable_container_server_address', None,
'Variable container server address.')
flags.DEFINE_integer(
'task', 0, 'Identifier of the collect task. Must be unique in a job.')
flags.DEFINE_multi_string('gin_file', None, 'Paths to the gin-config files.')
flags.DEFINE_multi_string('gin_bindings', None, 'Gin binding parameters.')
FLAGS = flags.FLAGS
@gin.configurable
def collect(summary_dir: Text,
environment_name: Text,
collect_policy: py_tf_eager_policy.PyTFEagerPolicyBase,
replay_buffer_server_address: Text,
variable_container_server_address: Text,
suite_load_fn: Callable[
[Text], py_environment.PyEnvironment] = suite_mujoco.load,
initial_collect_steps: int = 10000,
max_train_steps: int = 2000000) -> None:
"""Collects experience using a policy updated after every episode."""
# Create the environment. For now support only single environment collection.
collect_env = suite_load_fn(environment_name)
# Create the variable container.
train_step = train_utils.create_train_step()
variables = {
reverb_variable_container.POLICY_KEY: collect_policy.variables(),
reverb_variable_container.TRAIN_STEP_KEY: train_step
}
variable_container = reverb_variable_container.ReverbVariableContainer(
variable_container_server_address,
table_names=[reverb_variable_container.DEFAULT_TABLE])
variable_container.update(variables)
# Create the replay buffer observer.
rb_observer = reverb_utils.ReverbAddTrajectoryObserver(
reverb.Client(replay_buffer_server_address),
table_name=reverb_replay_buffer.DEFAULT_TABLE,
sequence_length=2,
stride_length=1)
random_policy = random_py_policy.RandomPyPolicy(collect_env.time_step_spec(),
collect_env.action_spec())
initial_collect_actor = actor.Actor(
collect_env,
random_policy,
train_step,
steps_per_run=initial_collect_steps,
observers=[rb_observer])
logging.info('Doing initial collect.')
initial_collect_actor.run()
env_step_metric = py_metrics.EnvironmentSteps()
collect_actor = actor.Actor(
collect_env,
collect_policy,
train_step,
steps_per_run=1,
metrics=actor.collect_metrics(10),
summary_dir=summary_dir,
observers=[rb_observer, env_step_metric])
# Run the experience collection loop.
while train_step.numpy() < max_train_steps:
logging.info('Collecting with policy at step: %d', train_step.numpy())
collect_actor.run()
variable_container.update(variables)
def main(_):
logging.set_verbosity(logging.INFO)
tf.enable_v2_behavior()
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_bindings)
# Wait for the collect policy to become available, then load it.
collect_policy_dir = os.path.join(FLAGS.root_dir,
learner.POLICY_SAVED_MODEL_DIR,
learner.COLLECT_POLICY_SAVED_MODEL_DIR)
collect_policy = train_utils.wait_for_policy(
collect_policy_dir, load_specs_from_pbtxt=True)
# Prepare summary directory.
summary_dir = os.path.join(FLAGS.root_dir, learner.TRAIN_DIR, str(FLAGS.task))
# Perform collection.
collect(
summary_dir=summary_dir,
environment_name=gin.REQUIRED,
collect_policy=collect_policy,
replay_buffer_server_address=FLAGS.replay_buffer_server_address,
variable_container_server_address=FLAGS.variable_container_server_address)
if __name__ == '__main__':
flags.mark_flags_as_required([
'root_dir', 'replay_buffer_server_address',
'variable_container_server_address'
])
multiprocessing.handle_main(lambda _: app.run(main))
| apache-2.0 | 3,883,808,191,007,981,000 | 36.28 | 80 | 0.711731 | false |
petrjasek/superdesk-server | apps/packages/resource.py | 1 | 1554 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from superdesk.resource import Resource
from apps.content import metadata_schema
from apps.archive.common import item_url
from apps.archive.archive import SOURCE as ARCHIVE
from apps.archive import ArchiveVersionsResource
class PackageVersionsResource(ArchiveVersionsResource):
"""
Resource class for versions of archive_media
"""
datasource = {
'source': ARCHIVE + '_versions',
'filter': {'type': 'composite'}
}
class PackageResource(Resource):
'''
Package schema
'''
datasource = {
'source': ARCHIVE,
'default_sort': [('_updated', -1)],
'filter': {'type': 'composite'},
'elastic_filter': {'term': {'archive.type': 'composite'}} # eve-elastic specific filter
}
item_url = item_url
item_methods = ['GET', 'PATCH']
schema = {}
schema.update(metadata_schema)
schema.update({
'type': {
'type': 'string',
'readonly': True,
'default': 'composite'
},
'groups': {
'type': 'list',
'minlength': 1
},
'profile': {
'type': 'string'
}
})
versioning = True
privileges = {'POST': 'archive', 'PATCH': 'archive'}
| agpl-3.0 | -7,215,252,584,971,752,000 | 24.47541 | 96 | 0.596525 | false |
parinporecha/backend_gtgonline | GTG/tests/test_networkmanager.py | 1 | 1441 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) 2008-2012 - Lionel Dricot & Bertrand Rousseau
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
""" Tests for Network Manager """
import unittest
from GTG.tools.networkmanager import is_connection_up
class TestNetworkManager(unittest.TestCase):
""" Test network manager tool code """
def test_is_connection_up_dont_throw_exception(self):
""" is_connection_up() returns a boolean value and
don't throw any exception """
self.assertIn(is_connection_up(), [True, False])
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
| gpl-3.0 | -7,845,067,461,941,154,000 | 37.945946 | 79 | 0.649549 | false |
chrislit/abydos | tests/distance/test_distance_clement.py | 1 | 5010 | # Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.tests.distance.test_distance_clement.
This module contains unit tests for abydos.distance.Clement
"""
import unittest
from abydos.distance import Clement
class ClementTestCases(unittest.TestCase):
"""Test Clement functions.
abydos.distance.Clement
"""
cmp = Clement()
cmp_no_d = Clement(alphabet=0)
def test_clement_sim(self):
"""Test abydos.distance.Clement.sim."""
# Base cases
self.assertEqual(self.cmp.sim('', ''), 1.0)
self.assertEqual(self.cmp.sim('a', ''), 0.0025510204081632404)
self.assertEqual(self.cmp.sim('', 'a'), 0.0)
self.assertEqual(self.cmp.sim('abc', ''), 0.005102040816326481)
self.assertEqual(self.cmp.sim('', 'abc'), 0.0)
self.assertEqual(self.cmp.sim('abc', 'abc'), 1.0)
self.assertEqual(self.cmp.sim('abcd', 'efgh'), 0.006336616803332366)
self.assertAlmostEqual(self.cmp.sim('Nigel', 'Niall'), 0.5037970201)
self.assertAlmostEqual(self.cmp.sim('Niall', 'Nigel'), 0.5037970201)
self.assertAlmostEqual(self.cmp.sim('Colin', 'Coiln'), 0.5037970201)
self.assertAlmostEqual(self.cmp.sim('Coiln', 'Colin'), 0.5037970201)
self.assertAlmostEqual(
self.cmp.sim('ATCAACGAGT', 'AACGATTAG'), 0.6414112246
)
# Tests with alphabet=0 (no d factor)
self.assertEqual(self.cmp_no_d.sim('', ''), 1.0)
self.assertEqual(self.cmp_no_d.sim('a', ''), 0.0)
self.assertEqual(self.cmp_no_d.sim('', 'a'), 0.0)
self.assertEqual(self.cmp_no_d.sim('abc', ''), 0.0)
self.assertEqual(self.cmp_no_d.sim('', 'abc'), 0.0)
self.assertEqual(self.cmp_no_d.sim('abc', 'abc'), 1.0)
self.assertEqual(self.cmp_no_d.sim('abcd', 'efgh'), 0.0)
self.assertAlmostEqual(
self.cmp_no_d.sim('Nigel', 'Niall'), 0.1666666667
)
self.assertAlmostEqual(
self.cmp_no_d.sim('Niall', 'Nigel'), 0.1666666667
)
self.assertAlmostEqual(
self.cmp_no_d.sim('Colin', 'Coiln'), 0.1666666667
)
self.assertAlmostEqual(
self.cmp_no_d.sim('Coiln', 'Colin'), 0.1666666667
)
self.assertAlmostEqual(
self.cmp_no_d.sim('ATCAACGAGT', 'AACGATTAG'), 0.1363636364
)
def test_clement_dist(self):
"""Test abydos.distance.Clement.dist."""
# Base cases
self.assertEqual(self.cmp.dist('', ''), 0.0)
self.assertEqual(self.cmp.dist('a', ''), 0.9974489795918368)
self.assertEqual(self.cmp.dist('', 'a'), 1.0)
self.assertEqual(self.cmp.dist('abc', ''), 0.9948979591836735)
self.assertEqual(self.cmp.dist('', 'abc'), 1.0)
self.assertEqual(self.cmp.dist('abc', 'abc'), 0.0)
self.assertEqual(self.cmp.dist('abcd', 'efgh'), 0.9936633831966676)
self.assertAlmostEqual(self.cmp.dist('Nigel', 'Niall'), 0.4962029799)
self.assertAlmostEqual(self.cmp.dist('Niall', 'Nigel'), 0.4962029799)
self.assertAlmostEqual(self.cmp.dist('Colin', 'Coiln'), 0.4962029799)
self.assertAlmostEqual(self.cmp.dist('Coiln', 'Colin'), 0.4962029799)
self.assertAlmostEqual(
self.cmp.dist('ATCAACGAGT', 'AACGATTAG'), 0.3585887754
)
# Tests with alphabet=0 (no d factor)
self.assertEqual(self.cmp_no_d.dist('', ''), 0.0)
self.assertEqual(self.cmp_no_d.dist('a', ''), 1.0)
self.assertEqual(self.cmp_no_d.dist('', 'a'), 1.0)
self.assertEqual(self.cmp_no_d.dist('abc', ''), 1.0)
self.assertEqual(self.cmp_no_d.dist('', 'abc'), 1.0)
self.assertEqual(self.cmp_no_d.dist('abc', 'abc'), 0.0)
self.assertEqual(self.cmp_no_d.dist('abcd', 'efgh'), 1.0)
self.assertAlmostEqual(
self.cmp_no_d.dist('Nigel', 'Niall'), 0.8333333333
)
self.assertAlmostEqual(
self.cmp_no_d.dist('Niall', 'Nigel'), 0.8333333333
)
self.assertAlmostEqual(
self.cmp_no_d.dist('Colin', 'Coiln'), 0.8333333333
)
self.assertAlmostEqual(
self.cmp_no_d.dist('Coiln', 'Colin'), 0.8333333333
)
self.assertAlmostEqual(
self.cmp_no_d.dist('ATCAACGAGT', 'AACGATTAG'), 0.8636363636
)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 2,676,999,604,259,894,000 | 38.761905 | 77 | 0.61497 | false |
Johnzero/OE7 | openerp/addons/mail/mail_thread.py | 1 | 71162 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import base64
import datetime
import dateutil
import email
import logging
import pytz
import re
import time
import xmlrpclib
from email.message import Message
from openerp import tools
from openerp import SUPERUSER_ID
from openerp.addons.mail.mail_message import decode
from openerp.osv import fields, osv, orm
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
def decode_header(message, header, separator=' '):
return separator.join(map(decode, filter(None, message.get_all(header, []))))
class mail_thread(osv.AbstractModel):
''' mail_thread model is meant to be inherited by any model that needs to
act as a discussion topic on which messages can be attached. Public
methods are prefixed with ``message_`` in order to avoid name
collisions with methods of the models that will inherit from this class.
``mail.thread`` defines fields used to handle and display the
communication history. ``mail.thread`` also manages followers of
inheriting classes. All features and expected behavior are managed
by mail.thread. Widgets has been designed for the 7.0 and following
versions of OpenERP.
Inheriting classes are not required to implement any method, as the
default implementation will work for any model. However it is common
to override at least the ``message_new`` and ``message_update``
methods (calling ``super``) to add model-specific behavior at
creation and update of a thread when processing incoming emails.
Options:
- _mail_flat_thread: if set to True, all messages without parent_id
are automatically attached to the first message posted on the
ressource. If set to False, the display of Chatter is done using
threads, and no parent_id is automatically set.
'''
_name = 'mail.thread'
_description = 'Email Thread'
_mail_flat_thread = True
# Automatic logging system if mail installed
# _track = {
# 'field': {
# 'module.subtype_xml': lambda self, cr, uid, obj, context=None: obj[state] == done,
# 'module.subtype_xml2': lambda self, cr, uid, obj, context=None: obj[state] != done,
# },
# 'field2': {
# ...
# },
# }
# where
# :param string field: field name
# :param module.subtype_xml: xml_id of a mail.message.subtype (i.e. mail.mt_comment)
# :param obj: is a browse_record
# :param function lambda: returns whether the tracking should record using this subtype
_track = {}
def _get_message_data(self, cr, uid, ids, name, args, context=None):
""" Computes:
- message_unread: has uid unread message for the document
- message_summary: html snippet summarizing the Chatter for kanban views """
res = dict((id, dict(message_unread=False, message_unread_count=0, message_summary=' ')) for id in ids)
user_pid = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
# search for unread messages, directly in SQL to improve performances
cr.execute(""" SELECT m.res_id FROM mail_message m
RIGHT JOIN mail_notification n
ON (n.message_id = m.id AND n.partner_id = %s AND (n.read = False or n.read IS NULL))
WHERE m.model = %s AND m.res_id in %s""",
(user_pid, self._name, tuple(ids),))
for result in cr.fetchall():
res[result[0]]['message_unread'] = True
res[result[0]]['message_unread_count'] += 1
for id in ids:
if res[id]['message_unread_count']:
title = res[id]['message_unread_count'] > 1 and _("You have %d unread messages") % res[id]['message_unread_count'] or _("You have one unread message")
res[id]['message_summary'] = "<span class='oe_kanban_mail_new' title='%s'><span class='oe_e'>9</span> %d %s</span>" % (title, res[id].pop('message_unread_count'), _("New"))
return res
def _get_subscription_data(self, cr, uid, ids, name, args, context=None):
""" Computes:
- message_subtype_data: data about document subtypes: which are
available, which are followed if any """
res = dict((id, dict(message_subtype_data='')) for id in ids)
user_pid = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
# find current model subtypes, add them to a dictionary
subtype_obj = self.pool.get('mail.message.subtype')
subtype_ids = subtype_obj.search(cr, uid, ['|', ('res_model', '=', self._name), ('res_model', '=', False)], context=context)
subtype_dict = dict((subtype.name, dict(default=subtype.default, followed=False, id=subtype.id)) for subtype in subtype_obj.browse(cr, uid, subtype_ids, context=context))
for id in ids:
res[id]['message_subtype_data'] = subtype_dict.copy()
# find the document followers, update the data
fol_obj = self.pool.get('mail.followers')
fol_ids = fol_obj.search(cr, uid, [
('partner_id', '=', user_pid),
('res_id', 'in', ids),
('res_model', '=', self._name),
], context=context)
for fol in fol_obj.browse(cr, uid, fol_ids, context=context):
thread_subtype_dict = res[fol.res_id]['message_subtype_data']
for subtype in fol.subtype_ids:
thread_subtype_dict[subtype.name]['followed'] = True
res[fol.res_id]['message_subtype_data'] = thread_subtype_dict
return res
def _search_message_unread(self, cr, uid, obj=None, name=None, domain=None, context=None):
return [('message_ids.to_read', '=', True)]
def _get_followers(self, cr, uid, ids, name, arg, context=None):
fol_obj = self.pool.get('mail.followers')
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('res_id', 'in', ids)])
res = dict((id, dict(message_follower_ids=[], message_is_follower=False)) for id in ids)
user_pid = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
for fol in fol_obj.browse(cr, SUPERUSER_ID, fol_ids):
res[fol.res_id]['message_follower_ids'].append(fol.partner_id.id)
if fol.partner_id.id == user_pid:
res[fol.res_id]['message_is_follower'] = True
return res
def _set_followers(self, cr, uid, id, name, value, arg, context=None):
if not value:
return
partner_obj = self.pool.get('res.partner')
fol_obj = self.pool.get('mail.followers')
# read the old set of followers, and determine the new set of followers
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('res_id', '=', id)])
old = set(fol.partner_id.id for fol in fol_obj.browse(cr, SUPERUSER_ID, fol_ids))
new = set(old)
for command in value or []:
if isinstance(command, (int, long)):
new.add(command)
elif command[0] == 0:
new.add(partner_obj.create(cr, uid, command[2], context=context))
elif command[0] == 1:
partner_obj.write(cr, uid, [command[1]], command[2], context=context)
new.add(command[1])
elif command[0] == 2:
partner_obj.unlink(cr, uid, [command[1]], context=context)
new.discard(command[1])
elif command[0] == 3:
new.discard(command[1])
elif command[0] == 4:
new.add(command[1])
elif command[0] == 5:
new.clear()
elif command[0] == 6:
new = set(command[2])
# remove partners that are no longer followers
fol_ids = fol_obj.search(cr, SUPERUSER_ID,
[('res_model', '=', self._name), ('res_id', '=', id), ('partner_id', 'not in', list(new))])
fol_obj.unlink(cr, SUPERUSER_ID, fol_ids)
# add new followers
for partner_id in new - old:
fol_obj.create(cr, SUPERUSER_ID, {'res_model': self._name, 'res_id': id, 'partner_id': partner_id})
def _search_followers(self, cr, uid, obj, name, args, context):
fol_obj = self.pool.get('mail.followers')
res = []
for field, operator, value in args:
assert field == name
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('partner_id', operator, value)])
res_ids = [fol.res_id for fol in fol_obj.browse(cr, SUPERUSER_ID, fol_ids)]
res.append(('id', 'in', res_ids))
return res
_columns = {
'message_is_follower': fields.function(_get_followers,
type='boolean', string='Is a Follower', multi='_get_followers,'),
'message_follower_ids': fields.function(_get_followers, fnct_inv=_set_followers,
fnct_search=_search_followers, type='many2many',
obj='res.partner', string='Followers', multi='_get_followers'),
'message_ids': fields.one2many('mail.message', 'res_id',
domain=lambda self: [('model', '=', self._name)],
auto_join=True,
string='Messages',
help="Messages and communication history"),
'message_unread': fields.function(_get_message_data,
fnct_search=_search_message_unread, multi="_get_message_data",
type='boolean', string='Unread Messages',
help="If checked new messages require your attention."),
'message_summary': fields.function(_get_message_data, method=True,
type='text', string='Summary', multi="_get_message_data",
help="Holds the Chatter summary (number of messages, ...). "\
"This summary is directly in html format in order to "\
"be inserted in kanban views."),
}
#------------------------------------------------------
# CRUD overrides for automatic subscription and logging
#------------------------------------------------------
def create(self, cr, uid, values, context=None):
""" Chatter override :
- subscribe uid
- subscribe followers of parent
- log a creation message
"""
if context is None:
context = {}
thread_id = super(mail_thread, self).create(cr, uid, values, context=context)
# subscribe uid unless asked not to
if not context.get('mail_create_nosubscribe'):
self.message_subscribe_users(cr, uid, [thread_id], [uid], context=context)
self.message_auto_subscribe(cr, uid, [thread_id], values.keys(), context=context)
# automatic logging unless asked not to (mainly for various testing purpose)
if not context.get('mail_create_nolog'):
self.message_post(cr, uid, thread_id, body=_('Document created'), context=context)
return thread_id
def write(self, cr, uid, ids, values, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
# Track initial values of tracked fields
tracked_fields = self._get_tracked_fields(cr, uid, values.keys(), context=context)
if tracked_fields:
initial = self.read(cr, uid, ids, tracked_fields.keys(), context=context)
initial_values = dict((item['id'], item) for item in initial)
# Perform write, update followers
result = super(mail_thread, self).write(cr, uid, ids, values, context=context)
self.message_auto_subscribe(cr, uid, ids, values.keys(), context=context)
# Perform the tracking
if tracked_fields:
self.message_track(cr, uid, ids, tracked_fields, initial_values, context=context)
return result
def unlink(self, cr, uid, ids, context=None):
""" Override unlink to delete messages and followers. This cannot be
cascaded, because link is done through (res_model, res_id). """
msg_obj = self.pool.get('mail.message')
fol_obj = self.pool.get('mail.followers')
# delete messages and notifications
msg_ids = msg_obj.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)], context=context)
msg_obj.unlink(cr, uid, msg_ids, context=context)
# delete
res = super(mail_thread, self).unlink(cr, uid, ids, context=context)
# delete followers
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('res_id', 'in', ids)], context=context)
fol_obj.unlink(cr, SUPERUSER_ID, fol_ids, context=context)
return res
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
default['message_ids'] = []
default['message_follower_ids'] = []
return super(mail_thread, self).copy(cr, uid, id, default=default, context=context)
#------------------------------------------------------
# Automatically log tracked fields
#------------------------------------------------------
def _get_tracked_fields(self, cr, uid, updated_fields, context=None):
""" Return a structure of tracked fields for the current model.
:param list updated_fields: modified field names
:return list: a list of (field_name, column_info obj), containing
always tracked fields and modified on_change fields
"""
lst = []
for name, column_info in self._all_columns.items():
visibility = getattr(column_info.column, 'track_visibility', False)
if visibility == 'always' or (visibility == 'onchange' and name in updated_fields) or name in self._track:
lst.append(name)
if not lst:
return lst
return self.fields_get(cr, uid, lst, context=context)
def message_track(self, cr, uid, ids, tracked_fields, initial_values, context=None):
def convert_for_display(value, col_info):
if not value and col_info['type'] == 'boolean':
return 'False'
if not value:
return ''
if col_info['type'] == 'many2one':
return value[1]
if col_info['type'] == 'selection':
return dict(col_info['selection'])[value]
return value
def format_message(message_description, tracked_values):
message = ''
if message_description:
message = '<span>%s</span>' % message_description
for name, change in tracked_values.items():
message += '<div> • <b>%s</b>: ' % change.get('col_info')
if change.get('old_value'):
message += '%s → ' % change.get('old_value')
message += '%s</div>' % change.get('new_value')
return message
if not tracked_fields:
return True
for record in self.read(cr, uid, ids, tracked_fields.keys(), context=context):
initial = initial_values[record['id']]
changes = []
tracked_values = {}
# generate tracked_values data structure: {'col_name': {col_info, new_value, old_value}}
for col_name, col_info in tracked_fields.items():
if record[col_name] == initial[col_name] and getattr(self._all_columns[col_name].column, 'track_visibility', None) == 'always':
tracked_values[col_name] = dict(col_info=col_info['string'],
new_value=convert_for_display(record[col_name], col_info))
elif record[col_name] != initial[col_name]:
if getattr(self._all_columns[col_name].column, 'track_visibility', None) in ['always', 'onchange']:
tracked_values[col_name] = dict(col_info=col_info['string'],
old_value=convert_for_display(initial[col_name], col_info),
new_value=convert_for_display(record[col_name], col_info))
if col_name in tracked_fields:
changes.append(col_name)
if not changes:
continue
# find subtypes and post messages or log if no subtype found
subtypes = []
for field, track_info in self._track.items():
if field not in changes:
continue
for subtype, method in track_info.items():
if method(self, cr, uid, record, context):
subtypes.append(subtype)
posted = False
for subtype in subtypes:
try:
subtype_rec = self.pool.get('ir.model.data').get_object(cr, uid, subtype.split('.')[0], subtype.split('.')[1], context=context)
except ValueError, e:
_logger.debug('subtype %s not found, giving error "%s"' % (subtype, e))
continue
message = format_message(subtype_rec.description if subtype_rec.description else subtype_rec.name, tracked_values)
self.message_post(cr, uid, record['id'], body=message, subtype=subtype, context=context)
posted = True
if not posted:
message = format_message('', tracked_values)
self.message_post(cr, uid, record['id'], body=message, context=context)
return True
#------------------------------------------------------
# mail.message wrappers and tools
#------------------------------------------------------
def _needaction_domain_get(self, cr, uid, context=None):
if self._needaction:
return [('message_unread', '=', True)]
return []
def _garbage_collect_attachments(self, cr, uid, context=None):
""" Garbage collect lost mail attachments. Those are attachments
- linked to res_model 'mail.compose.message', the composer wizard
- with res_id 0, because they were created outside of an existing
wizard (typically user input through Chatter or reports
created on-the-fly by the templates)
- unused since at least one day (create_date and write_date)
"""
limit_date = datetime.datetime.utcnow() - datetime.timedelta(days=1)
limit_date_str = datetime.datetime.strftime(limit_date, tools.DEFAULT_SERVER_DATETIME_FORMAT)
ir_attachment_obj = self.pool.get('ir.attachment')
attach_ids = ir_attachment_obj.search(cr, uid, [
('res_model', '=', 'mail.compose.message'),
('res_id', '=', 0),
('create_date', '<', limit_date_str),
('write_date', '<', limit_date_str),
], context=context)
ir_attachment_obj.unlink(cr, uid, attach_ids, context=context)
return True
#------------------------------------------------------
# Email specific
#------------------------------------------------------
def message_get_reply_to(self, cr, uid, ids, context=None):
if not self._inherits.get('mail.alias'):
return [False for id in ids]
return ["%s@%s" % (record['alias_name'], record['alias_domain'])
if record.get('alias_domain') and record.get('alias_name')
else False
for record in self.read(cr, SUPERUSER_ID, ids, ['alias_name', 'alias_domain'], context=context)]
#------------------------------------------------------
# Mail gateway
#------------------------------------------------------
def message_capable_models(self, cr, uid, context=None):
""" Used by the plugin addon, based for plugin_outlook and others. """
ret_dict = {}
for model_name in self.pool.obj_list():
model = self.pool.get(model_name)
if 'mail.thread' in getattr(model, '_inherit', []):
ret_dict[model_name] = model._description
return ret_dict
def _message_find_partners(self, cr, uid, message, header_fields=['From'], context=None):
""" Find partners related to some header fields of the message.
TDE TODO: merge me with other partner finding methods in 8.0 """
partner_obj = self.pool.get('res.partner')
partner_ids = []
s = ', '.join([decode(message.get(h)) for h in header_fields if message.get(h)])
for email_address in tools.email_split(s):
related_partners = partner_obj.search(cr, uid, [('email', 'ilike', email_address), ('user_ids', '!=', False)], limit=1, context=context)
if not related_partners:
related_partners = partner_obj.search(cr, uid, [('email', 'ilike', email_address)], limit=1, context=context)
partner_ids += related_partners
return partner_ids
def _message_find_user_id(self, cr, uid, message, context=None):
""" TDE TODO: check and maybe merge me with other user finding methods in 8.0 """
from_local_part = tools.email_split(decode(message.get('From')))[0]
# FP Note: canonification required, the minimu: .lower()
user_ids = self.pool.get('res.users').search(cr, uid, ['|',
('login', '=', from_local_part),
('email', '=', from_local_part)], context=context)
return user_ids[0] if user_ids else uid
def message_route(self, cr, uid, message, model=None, thread_id=None,
custom_values=None, context=None):
"""Attempt to figure out the correct target model, thread_id,
custom_values and user_id to use for an incoming message.
Multiple values may be returned, if a message had multiple
recipients matching existing mail.aliases, for example.
The following heuristics are used, in this order:
1. If the message replies to an existing thread_id, and
properly contains the thread model in the 'In-Reply-To'
header, use this model/thread_id pair, and ignore
custom_value (not needed as no creation will take place)
2. Look for a mail.alias entry matching the message
recipient, and use the corresponding model, thread_id,
custom_values and user_id.
3. Fallback to the ``model``, ``thread_id`` and ``custom_values``
provided.
4. If all the above fails, raise an exception.
:param string message: an email.message instance
:param string model: the fallback model to use if the message
does not match any of the currently configured mail aliases
(may be None if a matching alias is supposed to be present)
:type dict custom_values: optional dictionary of default field values
to pass to ``message_new`` if a new record needs to be created.
Ignored if the thread record already exists, and also if a
matching mail.alias was found (aliases define their own defaults)
:param int thread_id: optional ID of the record/thread from ``model``
to which this mail should be attached. Only used if the message
does not reply to an existing thread and does not match any mail alias.
:return: list of [model, thread_id, custom_values, user_id]
"""
assert isinstance(message, Message), 'message must be an email.message.Message at this point'
message_id = message.get('Message-Id')
email_from = decode_header(message, 'From')
email_to = decode_header(message, 'To')
references = decode_header(message, 'References')
in_reply_to = decode_header(message, 'In-Reply-To')
# 1. Verify if this is a reply to an existing thread
thread_references = references or in_reply_to
ref_match = thread_references and tools.reference_re.search(thread_references)
if ref_match:
thread_id = int(ref_match.group(1))
model = ref_match.group(2) or model
model_pool = self.pool.get(model)
if thread_id and model and model_pool and model_pool.exists(cr, uid, thread_id) \
and hasattr(model_pool, 'message_update'):
_logger.info('Routing mail from %s to %s with Message-Id %s: direct reply to model: %s, thread_id: %s, custom_values: %s, uid: %s',
email_from, email_to, message_id, model, thread_id, custom_values, uid)
return [(model, thread_id, custom_values, uid)]
# Verify whether this is a reply to a private message
if in_reply_to:
message_ids = self.pool.get('mail.message').search(cr, uid, [('message_id', '=', in_reply_to)], limit=1, context=context)
if message_ids:
message = self.pool.get('mail.message').browse(cr, uid, message_ids[0], context=context)
_logger.info('Routing mail from %s to %s with Message-Id %s: direct reply to a private message: %s, custom_values: %s, uid: %s',
email_from, email_to, message_id, message.id, custom_values, uid)
return [(message.model, message.res_id, custom_values, uid)]
# 2. Look for a matching mail.alias entry
# Delivered-To is a safe bet in most modern MTAs, but we have to fallback on To + Cc values
# for all the odd MTAs out there, as there is no standard header for the envelope's `rcpt_to` value.
rcpt_tos = \
','.join([decode_header(message, 'Delivered-To'),
decode_header(message, 'To'),
decode_header(message, 'Cc'),
decode_header(message, 'Resent-To'),
decode_header(message, 'Resent-Cc')])
local_parts = [e.split('@')[0] for e in tools.email_split(rcpt_tos)]
if local_parts:
mail_alias = self.pool.get('mail.alias')
alias_ids = mail_alias.search(cr, uid, [('alias_name', 'in', local_parts)])
if alias_ids:
routes = []
for alias in mail_alias.browse(cr, uid, alias_ids, context=context):
user_id = alias.alias_user_id.id
if not user_id:
# TDE note: this could cause crashes, because no clue that the user
# that send the email has the right to create or modify a new document
# Fallback on user_id = uid
# Note: recognized partners will be added as followers anyway
# user_id = self._message_find_user_id(cr, uid, message, context=context)
user_id = uid
_logger.info('No matching user_id for the alias %s', alias.alias_name)
routes.append((alias.alias_model_id.model, alias.alias_force_thread_id, \
eval(alias.alias_defaults), user_id))
_logger.info('Routing mail from %s to %s with Message-Id %s: direct alias match: %r',
email_from, email_to, message_id, routes)
return routes
# 3. Fallback to the provided parameters, if they work
model_pool = self.pool.get(model)
if not thread_id:
# Legacy: fallback to matching [ID] in the Subject
match = tools.res_re.search(decode_header(message, 'Subject'))
thread_id = match and match.group(1)
# Convert into int (bug spotted in 7.0 because of str)
try:
thread_id = int(thread_id)
except:
thread_id = False
assert thread_id and hasattr(model_pool, 'message_update') or hasattr(model_pool, 'message_new'), \
"No possible route found for incoming message from %s to %s (Message-Id %s:)." \
"Create an appropriate mail.alias or force the destination model." % (email_from, email_to, message_id)
if thread_id and not model_pool.exists(cr, uid, thread_id):
_logger.warning('Received mail reply to missing document %s! Ignoring and creating new document instead for Message-Id %s',
thread_id, message_id)
thread_id = None
_logger.info('Routing mail from %s to %s with Message-Id %s: fallback to model:%s, thread_id:%s, custom_values:%s, uid:%s',
email_from, email_to, message_id, model, thread_id, custom_values, uid)
return [(model, thread_id, custom_values, uid)]
def message_process(self, cr, uid, model, message, custom_values=None,
save_original=False, strip_attachments=False,
thread_id=None, context=None):
""" Process an incoming RFC2822 email message, relying on
``mail.message.parse()`` for the parsing operation,
and ``message_route()`` to figure out the target model.
Once the target model is known, its ``message_new`` method
is called with the new message (if the thread record did not exist)
or its ``message_update`` method (if it did).
There is a special case where the target model is False: a reply
to a private message. In this case, we skip the message_new /
message_update step, to just post a new message using mail_thread
message_post.
:param string model: the fallback model to use if the message
does not match any of the currently configured mail aliases
(may be None if a matching alias is supposed to be present)
:param message: source of the RFC2822 message
:type message: string or xmlrpclib.Binary
:type dict custom_values: optional dictionary of field values
to pass to ``message_new`` if a new record needs to be created.
Ignored if the thread record already exists, and also if a
matching mail.alias was found (aliases define their own defaults)
:param bool save_original: whether to keep a copy of the original
email source attached to the message after it is imported.
:param bool strip_attachments: whether to strip all attachments
before processing the message, in order to save some space.
:param int thread_id: optional ID of the record/thread from ``model``
to which this mail should be attached. When provided, this
overrides the automatic detection based on the message
headers.
"""
if context is None:
context = {}
# extract message bytes - we are forced to pass the message as binary because
# we don't know its encoding until we parse its headers and hence can't
# convert it to utf-8 for transport between the mailgate script and here.
if isinstance(message, xmlrpclib.Binary):
message = str(message.data)
# Warning: message_from_string doesn't always work correctly on unicode,
# we must use utf-8 strings here :-(
if isinstance(message, unicode):
message = message.encode('utf-8')
msg_txt = email.message_from_string(message)
# parse the message, verify we are not in a loop by checking message_id is not duplicated
msg = self.message_parse(cr, uid, msg_txt, save_original=save_original, context=context)
if strip_attachments:
msg.pop('attachments', None)
if msg.get('message_id'): # should always be True as message_parse generate one if missing
existing_msg_ids = self.pool.get('mail.message').search(cr, SUPERUSER_ID, [
('message_id', '=', msg.get('message_id')),
], context=context)
if existing_msg_ids:
_logger.info('Ignored mail from %s to %s with Message-Id %s:: found duplicated Message-Id during processing',
msg.get('from'), msg.get('to'), msg.get('message_id'))
return False
# find possible routes for the message
routes = self.message_route(cr, uid, msg_txt, model,
thread_id, custom_values,
context=context)
# postpone setting msg.partner_ids after message_post, to avoid double notifications
partner_ids = msg.pop('partner_ids', [])
thread_id = False
for model, thread_id, custom_values, user_id in routes:
if self._name == 'mail.thread':
context.update({'thread_model': model})
if model:
model_pool = self.pool.get(model)
assert thread_id and hasattr(model_pool, 'message_update') or hasattr(model_pool, 'message_new'), \
"Undeliverable mail with Message-Id %s, model %s does not accept incoming emails" % \
(msg['message_id'], model)
# disabled subscriptions during message_new/update to avoid having the system user running the
# email gateway become a follower of all inbound messages
nosub_ctx = dict(context, mail_create_nosubscribe=True)
if thread_id and hasattr(model_pool, 'message_update'):
model_pool.message_update(cr, user_id, [thread_id], msg, context=nosub_ctx)
else:
nosub_ctx = dict(nosub_ctx, mail_create_nolog=True)
thread_id = model_pool.message_new(cr, user_id, msg, custom_values, context=nosub_ctx)
else:
assert thread_id == 0, "Posting a message without model should be with a null res_id, to create a private message."
model_pool = self.pool.get('mail.thread')
new_msg_id = model_pool.message_post(cr, uid, [thread_id], context=context, subtype='mail.mt_comment', **msg)
if partner_ids:
# postponed after message_post, because this is an external message and we don't want to create
# duplicate emails due to notifications
self.pool.get('mail.message').write(cr, uid, [new_msg_id], {'partner_ids': partner_ids}, context=context)
return thread_id
def message_new(self, cr, uid, msg_dict, custom_values=None, context=None):
"""Called by ``message_process`` when a new message is received
for a given thread model, if the message did not belong to
an existing thread.
The default behavior is to create a new record of the corresponding
model (based on some very basic info extracted from the message).
Additional behavior may be implemented by overriding this method.
:param dict msg_dict: a map containing the email details and
attachments. See ``message_process`` and
``mail.message.parse`` for details.
:param dict custom_values: optional dictionary of additional
field values to pass to create()
when creating the new thread record.
Be careful, these values may override
any other values coming from the message.
:param dict context: if a ``thread_model`` value is present
in the context, its value will be used
to determine the model of the record
to create (instead of the current model).
:rtype: int
:return: the id of the newly created thread object
"""
if context is None:
context = {}
data = {}
if isinstance(custom_values, dict):
data = custom_values.copy()
model = context.get('thread_model') or self._name
model_pool = self.pool.get(model)
fields = model_pool.fields_get(cr, uid, context=context)
if 'name' in fields and not data.get('name'):
data['name'] = msg_dict.get('subject', '')
res_id = model_pool.create(cr, uid, data, context=context)
return res_id
def message_update(self, cr, uid, ids, msg_dict, update_vals=None, context=None):
"""Called by ``message_process`` when a new message is received
for an existing thread. The default behavior is to update the record
with update_vals taken from the incoming email.
Additional behavior may be implemented by overriding this
method.
:param dict msg_dict: a map containing the email details and
attachments. See ``message_process`` and
``mail.message.parse()`` for details.
:param dict update_vals: a dict containing values to update records
given their ids; if the dict is None or is
void, no write operation is performed.
"""
if update_vals:
self.write(cr, uid, ids, update_vals, context=context)
return True
def _message_extract_payload(self, message, save_original=False):
"""Extract body as HTML and attachments from the mail message"""
attachments = []
body = u''
if save_original:
attachments.append(('original_email.eml', message.as_string()))
if not message.is_multipart() or 'text/' in message.get('content-type', ''):
encoding = message.get_content_charset()
body = message.get_payload(decode=True)
body = tools.ustr(body, encoding, errors='replace')
if message.get_content_type() == 'text/plain':
# text/plain -> <pre/>
body = tools.append_content_to_html(u'', body, preserve=True)
else:
alternative = (message.get_content_type() == 'multipart/alternative')
for part in message.walk():
if part.get_content_maintype() == 'multipart':
continue # skip container
filename = part.get_filename() # None if normal part
encoding = part.get_content_charset() # None if attachment
# 1) Explicit Attachments -> attachments
if filename or part.get('content-disposition', '').strip().startswith('attachment'):
attachments.append((filename or 'attachment', part.get_payload(decode=True)))
continue
# 2) text/plain -> <pre/>
if part.get_content_type() == 'text/plain' and (not alternative or not body):
body = tools.append_content_to_html(body, tools.ustr(part.get_payload(decode=True),
encoding, errors='replace'), preserve=True)
# 3) text/html -> raw
elif part.get_content_type() == 'text/html':
html = tools.ustr(part.get_payload(decode=True), encoding, errors='replace')
if alternative:
body = html
else:
body = tools.append_content_to_html(body, html, plaintext=False)
# 4) Anything else -> attachment
else:
attachments.append((filename or 'attachment', part.get_payload(decode=True)))
return body, attachments
def message_parse(self, cr, uid, message, save_original=False, context=None):
"""Parses a string or email.message.Message representing an
RFC-2822 email, and returns a generic dict holding the
message details.
:param message: the message to parse
:type message: email.message.Message | string | unicode
:param bool save_original: whether the returned dict
should include an ``original`` attachment containing
the source of the message
:rtype: dict
:return: A dict with the following structure, where each
field may not be present if missing in original
message::
{ 'message_id': msg_id,
'subject': subject,
'from': from,
'to': to,
'cc': cc,
'body': unified_body,
'attachments': [('file1', 'bytes'),
('file2', 'bytes')}
}
"""
msg_dict = {
'type': 'email',
'author_id': False,
}
if not isinstance(message, Message):
if isinstance(message, unicode):
# Warning: message_from_string doesn't always work correctly on unicode,
# we must use utf-8 strings here :-(
message = message.encode('utf-8')
message = email.message_from_string(message)
message_id = message['message-id']
if not message_id:
# Very unusual situation, be we should be fault-tolerant here
message_id = "<%s@localhost>" % time.time()
_logger.debug('Parsing Message without message-id, generating a random one: %s', message_id)
msg_dict['message_id'] = message_id
if message.get('Subject'):
msg_dict['subject'] = decode(message.get('Subject'))
# Envelope fields not stored in mail.message but made available for message_new()
msg_dict['from'] = decode(message.get('from'))
msg_dict['to'] = decode(message.get('to'))
msg_dict['cc'] = decode(message.get('cc'))
if message.get('From'):
author_ids = self._message_find_partners(cr, uid, message, ['From'], context=context)
if author_ids:
msg_dict['author_id'] = author_ids[0]
msg_dict['email_from'] = decode(message.get('from'))
partner_ids = self._message_find_partners(cr, uid, message, ['To', 'Cc'], context=context)
msg_dict['partner_ids'] = [(4, partner_id) for partner_id in partner_ids]
if message.get('Date'):
try:
date_hdr = decode(message.get('Date'))
parsed_date = dateutil.parser.parse(date_hdr, fuzzy=True)
if parsed_date.utcoffset() is None:
# naive datetime, so we arbitrarily decide to make it
# UTC, there's no better choice. Should not happen,
# as RFC2822 requires timezone offset in Date headers.
stored_date = parsed_date.replace(tzinfo=pytz.utc)
else:
stored_date = parsed_date.astimezone(tz=pytz.utc)
except Exception:
_logger.warning('Failed to parse Date header %r in incoming mail '
'with message-id %r, assuming current date/time.',
message.get('Date'), message_id)
stored_date = datetime.datetime.now()
msg_dict['date'] = stored_date.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
if message.get('In-Reply-To'):
parent_ids = self.pool.get('mail.message').search(cr, uid, [('message_id', '=', decode(message['In-Reply-To']))])
if parent_ids:
msg_dict['parent_id'] = parent_ids[0]
if message.get('References') and 'parent_id' not in msg_dict:
parent_ids = self.pool.get('mail.message').search(cr, uid, [('message_id', 'in',
[x.strip() for x in decode(message['References']).split()])])
if parent_ids:
msg_dict['parent_id'] = parent_ids[0]
msg_dict['body'], msg_dict['attachments'] = self._message_extract_payload(message, save_original=save_original)
return msg_dict
#------------------------------------------------------
# Note specific
#------------------------------------------------------
def log(self, cr, uid, id, message, secondary=False, context=None):
_logger.warning("log() is deprecated. As this module inherit from "\
"mail.thread, the message will be managed by this "\
"module instead of by the res.log mechanism. Please "\
"use mail_thread.message_post() instead of the "\
"now deprecated res.log.")
self.message_post(cr, uid, [id], message, context=context)
def _message_add_suggested_recipient(self, cr, uid, result, obj, partner=None, email=None, reason='', context=None):
""" Called by message_get_suggested_recipients, to add a suggested
recipient in the result dictionary. The form is :
partner_id, partner_name<partner_email> or partner_name, reason """
if email and not partner:
# get partner info from email
partner_info = self.message_get_partner_info_from_emails(cr, uid, [email], context=context, res_id=obj.id)[0]
if partner_info.get('partner_id'):
partner = self.pool.get('res.partner').browse(cr, SUPERUSER_ID, [partner_info.get('partner_id')], context=context)[0]
if email and email in [val[1] for val in result[obj.id]]: # already existing email -> skip
return result
if partner and partner in obj.message_follower_ids: # recipient already in the followers -> skip
return result
if partner and partner in [val[0] for val in result[obj.id]]: # already existing partner ID -> skip
return result
if partner and partner.email: # complete profile: id, name <email>
result[obj.id].append((partner.id, '%s<%s>' % (partner.name, partner.email), reason))
elif partner: # incomplete profile: id, name
result[obj.id].append((partner.id, '%s' % (partner.name), reason))
else: # unknown partner, we are probably managing an email address
result[obj.id].append((False, email, reason))
return result
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
""" Returns suggested recipients for ids. Those are a list of
tuple (partner_id, partner_name, reason), to be managed by Chatter. """
result = dict.fromkeys(ids, list())
if self._all_columns.get('user_id'):
for obj in self.browse(cr, SUPERUSER_ID, ids, context=context): # SUPERUSER because of a read on res.users that would crash otherwise
if not obj.user_id or not obj.user_id.partner_id:
continue
self._message_add_suggested_recipient(cr, uid, result, obj, partner=obj.user_id.partner_id, reason=self._all_columns['user_id'].column.string, context=context)
return result
def message_get_partner_info_from_emails(self, cr, uid, emails, link_mail=False, context=None, res_id=None):
""" Wrapper with weird order parameter because of 7.0 fix.
TDE TODO: remove me in 8.0 """
return self.message_find_partner_from_emails(cr, uid, res_id, emails, link_mail=link_mail, context=context)
def message_find_partner_from_emails(self, cr, uid, id, emails, link_mail=False, context=None):
""" Convert a list of emails into a list partner_ids and a list
new_partner_ids. The return value is non conventional because
it is meant to be used by the mail widget.
:return dict: partner_ids and new_partner_ids
TDE TODO: merge me with other partner finding methods in 8.0 """
mail_message_obj = self.pool.get('mail.message')
partner_obj = self.pool.get('res.partner')
result = list()
if id and self._name != 'mail.thread':
obj = self.browse(cr, SUPERUSER_ID, id, context=context)
else:
obj = None
for email in emails:
partner_info = {'full_name': email, 'partner_id': False}
m = re.search(r"((.+?)\s*<)?([^<>]+@[^<>]+)>?", email, re.IGNORECASE | re.DOTALL)
if not m:
continue
email_address = m.group(3)
# first try: check in document's followers
if obj:
for follower in obj.message_follower_ids:
if follower.email == email_address:
partner_info['partner_id'] = follower.id
# second try: check in partners
if not partner_info.get('partner_id'):
ids = partner_obj.search(cr, SUPERUSER_ID, [('email', 'ilike', email_address), ('user_ids', '!=', False)], limit=1, context=context)
if not ids:
ids = partner_obj.search(cr, SUPERUSER_ID, [('email', 'ilike', email_address)], limit=1, context=context)
if ids:
partner_info['partner_id'] = ids[0]
result.append(partner_info)
# link mail with this from mail to the new partner id
if link_mail and partner_info['partner_id']:
message_ids = mail_message_obj.search(cr, SUPERUSER_ID, [
'|',
('email_from', '=', email),
('email_from', 'ilike', '<%s>' % email),
('author_id', '=', False)
], context=context)
if message_ids:
mail_message_obj.write(cr, SUPERUSER_ID, message_ids, {'author_id': partner_info['partner_id']}, context=context)
return result
def message_post(self, cr, uid, thread_id, body='', subject=None, type='notification',
subtype=None, parent_id=False, attachments=None, context=None,
content_subtype='html', **kwargs):
""" Post a new message in an existing thread, returning the new
mail.message ID.
:param int thread_id: thread ID to post into, or list with one ID;
if False/0, mail.message model will also be set as False
:param str body: body of the message, usually raw HTML that will
be sanitized
:param str type: see mail_message.type field
:param str content_subtype:: if plaintext: convert body into html
:param int parent_id: handle reply to a previous message by adding the
parent partners to the message in case of private discussion
:param tuple(str,str) attachments or list id: list of attachment tuples in the form
``(name,content)``, where content is NOT base64 encoded
Extra keyword arguments will be used as default column values for the
new mail.message record. Special cases:
- attachment_ids: supposed not attached to any document; attach them
to the related document. Should only be set by Chatter.
:return int: ID of newly created mail.message
"""
if context is None:
context = {}
if attachments is None:
attachments = {}
mail_message = self.pool.get('mail.message')
ir_attachment = self.pool.get('ir.attachment')
assert (not thread_id) or \
isinstance(thread_id, (int, long)) or \
(isinstance(thread_id, (list, tuple)) and len(thread_id) == 1), \
"Invalid thread_id; should be 0, False, an ID or a list with one ID"
if isinstance(thread_id, (list, tuple)):
thread_id = thread_id[0]
# if we're processing a message directly coming from the gateway, the destination model was
# set in the context.
model = False
if thread_id:
model = context.get('thread_model', self._name) if self._name == 'mail.thread' else self._name
if model != self._name:
del context['thread_model']
return self.pool.get(model).message_post(cr, uid, thread_id, body=body, subject=subject, type=type, subtype=subtype, parent_id=parent_id, attachments=attachments, context=context, content_subtype=content_subtype, **kwargs)
# 0: Parse email-from, try to find a better author_id based on document's followers for incoming emails
email_from = kwargs.get('email_from')
if email_from and thread_id and type == 'email' and kwargs.get('author_id'):
email_list = tools.email_split(email_from)
doc = self.browse(cr, uid, thread_id, context=context)
if email_list and doc:
author_ids = self.pool.get('res.partner').search(cr, uid, [
('email', 'ilike', email_list[0]),
('id', 'in', [f.id for f in doc.message_follower_ids])
], limit=1, context=context)
if author_ids:
kwargs['author_id'] = author_ids[0]
author_id = kwargs.get('author_id')
if author_id is None: # keep False values
author_id = self.pool.get('mail.message')._get_default_author(cr, uid, context=context)
# 1: Handle content subtype: if plaintext, converto into HTML
if content_subtype == 'plaintext':
body = tools.plaintext2html(body)
# 2: Private message: add recipients (recipients and author of parent message) - current author
# + legacy-code management (! we manage only 4 and 6 commands)
partner_ids = set()
kwargs_partner_ids = kwargs.pop('partner_ids', [])
for partner_id in kwargs_partner_ids:
if isinstance(partner_id, (list, tuple)) and partner_id[0] == 4 and len(partner_id) == 2:
partner_ids.add(partner_id[1])
if isinstance(partner_id, (list, tuple)) and partner_id[0] == 6 and len(partner_id) == 3:
partner_ids |= set(partner_id[2])
elif isinstance(partner_id, (int, long)):
partner_ids.add(partner_id)
else:
pass # we do not manage anything else
if parent_id and not model:
parent_message = mail_message.browse(cr, uid, parent_id, context=context)
private_followers = set([partner.id for partner in parent_message.partner_ids])
if parent_message.author_id:
private_followers.add(parent_message.author_id.id)
private_followers -= set([author_id])
partner_ids |= private_followers
# 3. Attachments
# - HACK TDE FIXME: Chatter: attachments linked to the document (not done JS-side), load the message
attachment_ids = kwargs.pop('attachment_ids', []) or [] # because we could receive None (some old code sends None)
if attachment_ids:
filtered_attachment_ids = ir_attachment.search(cr, SUPERUSER_ID, [
('res_model', '=', 'mail.compose.message'),
('create_uid', '=', uid),
('id', 'in', attachment_ids)], context=context)
if filtered_attachment_ids:
ir_attachment.write(cr, SUPERUSER_ID, filtered_attachment_ids, {'res_model': model, 'res_id': thread_id}, context=context)
attachment_ids = [(4, id) for id in attachment_ids]
# Handle attachments parameter, that is a dictionary of attachments
for name, content in attachments:
if isinstance(content, unicode):
content = content.encode('utf-8')
data_attach = {
'name': name,
'datas': base64.b64encode(str(content)),
'datas_fname': name,
'description': name,
'res_model': model,
'res_id': thread_id,
}
attachment_ids.append((0, 0, data_attach))
# 4: mail.message.subtype
subtype_id = False
if subtype:
if '.' not in subtype:
subtype = 'mail.%s' % subtype
ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, *subtype.split('.'))
subtype_id = ref and ref[1] or False
# automatically subscribe recipients if asked to
if context.get('mail_post_autofollow') and thread_id and partner_ids:
partner_to_subscribe = partner_ids
if context.get('mail_post_autofollow_partner_ids'):
partner_to_subscribe = filter(lambda item: item in context.get('mail_post_autofollow_partner_ids'), partner_ids)
self.message_subscribe(cr, uid, [thread_id], list(partner_to_subscribe), context=context)
# _mail_flat_thread: automatically set free messages to the first posted message
if self._mail_flat_thread and not parent_id and thread_id:
message_ids = mail_message.search(cr, uid, ['&', ('res_id', '=', thread_id), ('model', '=', model)], context=context, order="id ASC", limit=1)
parent_id = message_ids and message_ids[0] or False
# we want to set a parent: force to set the parent_id to the oldest ancestor, to avoid having more than 1 level of thread
elif parent_id:
message_ids = mail_message.search(cr, SUPERUSER_ID, [('id', '=', parent_id), ('parent_id', '!=', False)], context=context)
# avoid loops when finding ancestors
processed_list = []
if message_ids:
message = mail_message.browse(cr, SUPERUSER_ID, message_ids[0], context=context)
while (message.parent_id and message.parent_id.id not in processed_list):
processed_list.append(message.parent_id.id)
message = message.parent_id
parent_id = message.id
values = kwargs
values.update({
'author_id': author_id,
'model': model,
'res_id': thread_id or False,
'body': body,
'subject': subject or False,
'type': type,
'parent_id': parent_id,
'attachment_ids': attachment_ids,
'subtype_id': subtype_id,
'partner_ids': [(4, pid) for pid in partner_ids],
})
# Avoid warnings about non-existing fields
for x in ('from', 'to', 'cc'):
values.pop(x, None)
# Create and auto subscribe the author
msg_id = mail_message.create(cr, uid, values, context=context)
message = mail_message.browse(cr, uid, msg_id, context=context)
if message.author_id and thread_id and type != 'notification' and not context.get('mail_create_nosubscribe'):
self.message_subscribe(cr, uid, [thread_id], [message.author_id.id], context=context)
return msg_id
#------------------------------------------------------
# Compatibility methods: do not use
# TDE TODO: remove me in 8.0
#------------------------------------------------------
def message_create_partners_from_emails(self, cr, uid, emails, context=None):
return {'partner_ids': [], 'new_partner_ids': []}
def message_post_user_api(self, cr, uid, thread_id, body='', parent_id=False,
attachment_ids=None, content_subtype='plaintext',
context=None, **kwargs):
return self.message_post(cr, uid, thread_id, body=body, parent_id=parent_id,
attachment_ids=attachment_ids, content_subtype=content_subtype,
context=context, **kwargs)
#------------------------------------------------------
# Followers API
#------------------------------------------------------
def message_get_subscription_data(self, cr, uid, ids, context=None):
""" Wrapper to get subtypes data. """
return self._get_subscription_data(cr, uid, ids, None, None, context=context)
def message_subscribe_users(self, cr, uid, ids, user_ids=None, subtype_ids=None, context=None):
""" Wrapper on message_subscribe, using users. If user_ids is not
provided, subscribe uid instead. """
if user_ids is None:
user_ids = [uid]
partner_ids = [user.partner_id.id for user in self.pool.get('res.users').browse(cr, uid, user_ids, context=context)]
return self.message_subscribe(cr, uid, ids, partner_ids, subtype_ids=subtype_ids, context=context)
def message_subscribe(self, cr, uid, ids, partner_ids, subtype_ids=None, context=None):
""" Add partners to the records followers. """
user_pid = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
if set(partner_ids) == set([user_pid]):
try:
self.check_access_rights(cr, uid, 'read')
except (osv.except_osv, orm.except_orm):
return
else:
self.check_access_rights(cr, uid, 'write')
self.write(cr, SUPERUSER_ID, ids, {'message_follower_ids': [(4, pid) for pid in partner_ids]}, context=context)
# if subtypes are not specified (and not set to a void list), fetch default ones
if subtype_ids is None:
subtype_obj = self.pool.get('mail.message.subtype')
subtype_ids = subtype_obj.search(cr, uid, [('default', '=', True), '|', ('res_model', '=', self._name), ('res_model', '=', False)], context=context)
# update the subscriptions
fol_obj = self.pool.get('mail.followers')
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('res_id', 'in', ids), ('partner_id', 'in', partner_ids)], context=context)
fol_obj.write(cr, SUPERUSER_ID, fol_ids, {'subtype_ids': [(6, 0, subtype_ids)]}, context=context)
return True
def message_unsubscribe_users(self, cr, uid, ids, user_ids=None, context=None):
""" Wrapper on message_subscribe, using users. If user_ids is not
provided, unsubscribe uid instead. """
if user_ids is None:
user_ids = [uid]
partner_ids = [user.partner_id.id for user in self.pool.get('res.users').browse(cr, uid, user_ids, context=context)]
return self.message_unsubscribe(cr, uid, ids, partner_ids, context=context)
def message_unsubscribe(self, cr, uid, ids, partner_ids, context=None):
""" Remove partners from the records followers. """
user_pid = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
if set(partner_ids) == set([user_pid]):
self.check_access_rights(cr, uid, 'read')
else:
self.check_access_rights(cr, uid, 'write')
return self.write(cr, SUPERUSER_ID, ids, {'message_follower_ids': [(3, pid) for pid in partner_ids]}, context=context)
def _message_get_auto_subscribe_fields(self, cr, uid, updated_fields, auto_follow_fields=['user_id'], context=None):
""" Returns the list of relational fields linking to res.users that should
trigger an auto subscribe. The default list checks for the fields
- called 'user_id'
- linking to res.users
- with track_visibility set
In OpenERP V7, this is sufficent for all major addon such as opportunity,
project, issue, recruitment, sale.
Override this method if a custom behavior is needed about fields
that automatically subscribe users.
"""
user_field_lst = []
for name, column_info in self._all_columns.items():
if name in auto_follow_fields and name in updated_fields and getattr(column_info.column, 'track_visibility', False) and column_info.column._obj == 'res.users':
user_field_lst.append(name)
return user_field_lst
def message_auto_subscribe(self, cr, uid, ids, updated_fields, context=None):
"""
1. fetch project subtype related to task (parent_id.res_model = 'project.task')
2. for each project subtype: subscribe the follower to the task
"""
subtype_obj = self.pool.get('mail.message.subtype')
follower_obj = self.pool.get('mail.followers')
# fetch auto_follow_fields
user_field_lst = self._message_get_auto_subscribe_fields(cr, uid, updated_fields, context=context)
# fetch related record subtypes
related_subtype_ids = subtype_obj.search(cr, uid, ['|', ('res_model', '=', False), ('parent_id.res_model', '=', self._name)], context=context)
subtypes = subtype_obj.browse(cr, uid, related_subtype_ids, context=context)
default_subtypes = [subtype for subtype in subtypes if subtype.res_model == False]
related_subtypes = [subtype for subtype in subtypes if subtype.res_model != False]
relation_fields = set([subtype.relation_field for subtype in subtypes if subtype.relation_field != False])
if (not related_subtypes or not any(relation in updated_fields for relation in relation_fields)) and not user_field_lst:
return True
for record in self.browse(cr, uid, ids, context=context):
new_followers = dict()
parent_res_id = False
parent_model = False
for subtype in related_subtypes:
if not subtype.relation_field or not subtype.parent_id:
continue
if not subtype.relation_field in self._columns or not getattr(record, subtype.relation_field, False):
continue
parent_res_id = getattr(record, subtype.relation_field).id
parent_model = subtype.res_model
follower_ids = follower_obj.search(cr, SUPERUSER_ID, [
('res_model', '=', parent_model),
('res_id', '=', parent_res_id),
('subtype_ids', 'in', [subtype.id])
], context=context)
for follower in follower_obj.browse(cr, SUPERUSER_ID, follower_ids, context=context):
new_followers.setdefault(follower.partner_id.id, set()).add(subtype.parent_id.id)
if parent_res_id and parent_model:
for subtype in default_subtypes:
follower_ids = follower_obj.search(cr, SUPERUSER_ID, [
('res_model', '=', parent_model),
('res_id', '=', parent_res_id),
('subtype_ids', 'in', [subtype.id])
], context=context)
for follower in follower_obj.browse(cr, SUPERUSER_ID, follower_ids, context=context):
new_followers.setdefault(follower.partner_id.id, set()).add(subtype.id)
# add followers coming from res.users relational fields that are tracked
user_ids = [getattr(record, name).id for name in user_field_lst if getattr(record, name)]
user_id_partner_ids = [user.partner_id.id for user in self.pool.get('res.users').browse(cr, SUPERUSER_ID, user_ids, context=context)]
for partner_id in user_id_partner_ids:
new_followers.setdefault(partner_id, None)
for pid, subtypes in new_followers.items():
subtypes = list(subtypes) if subtypes is not None else None
self.message_subscribe(cr, uid, [record.id], [pid], subtypes, context=context)
# find first email message, set it as unread for auto_subscribe fields for them to have a notification
if user_id_partner_ids:
msg_ids = self.pool.get('mail.message').search(cr, uid, [
('model', '=', self._name),
('res_id', '=', record.id),
('type', '=', 'email')], limit=1, context=context)
if not msg_ids and record.message_ids:
msg_ids = [record.message_ids[-1].id]
if msg_ids:
self.pool.get('mail.notification')._notify(cr, uid, msg_ids[0], partners_to_notify=user_id_partner_ids, context=context)
return True
#------------------------------------------------------
# Thread state
#------------------------------------------------------
def message_mark_as_unread(self, cr, uid, ids, context=None):
""" Set as unread. """
partner_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id.id
cr.execute('''
UPDATE mail_notification SET
read=false
WHERE
message_id IN (SELECT id from mail_message where res_id=any(%s) and model=%s limit 1) and
partner_id = %s
''', (ids, self._name, partner_id))
return True
def message_mark_as_read(self, cr, uid, ids, context=None):
""" Set as read. """
partner_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id.id
cr.execute('''
UPDATE mail_notification SET
read=true
WHERE
message_id IN (SELECT id FROM mail_message WHERE res_id=ANY(%s) AND model=%s) AND
partner_id = %s
''', (ids, self._name, partner_id))
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -7,476,913,251,874,143,000 | 53.280702 | 238 | 0.56956 | false |
JoelBender/bacpypes | samples/Tutorial/ControllerAndIOCB.py | 1 | 2707 | #!/usr/bin/env python
"""
The IO Control Block (IOCB) is an object that holds the parameters
for some kind of operation or function and a place for the result.
The IOController processes the IOCBs it is given and returns the
IOCB back to the caller.
"""
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from bacpypes.consolelogging import ArgumentParser
from bacpypes.iocb import IOCB, IOController, COMPLETED, ABORTED
# some debugging
_debug = 0
_log = ModuleLogger(globals())
@bacpypes_debugging
class SomeController(IOController):
def process_io(self, iocb):
if _debug: SomeController._debug("process_io", iocb)
# try to complete the request
try:
response = iocb.args[0] + iocb.args[1] * iocb.kwargs['a']
self.complete_io(iocb, response)
except Exception as err:
self.abort_io(iocb, err)
@bacpypes_debugging
def call_me(iocb):
"""
When a controller completes the processing of a request,
the IOCB can contain one or more functions to be called.
"""
if _debug: call_me._debug("callback_function %r", iocb)
# it will be successful or have an error
print("call me, %r or %r" % (iocb.ioResponse, iocb.ioError))
def main():
# parse the command line arguments
args = ArgumentParser(description=__doc__).parse_args()
if _debug: _log.debug("initialization")
if _debug: _log.debug(" - args: %r", args)
# create a controller
some_controller = SomeController()
if _debug: _log.debug(" - some_controller: %r", some_controller)
# test set
tests = [
( (1,2,), {'a':3} ),
( (4,5,), {} ),
( (6,), {'a':7} ),
]
for test_args, test_kwargs in tests:
print("test_args, test_kwargs: %r, %r" % (test_args, test_kwargs))
# create a request with some args and kwargs
iocb = IOCB(*test_args, **test_kwargs)
# add a callback function , called when the request has been processed
iocb.add_callback(call_me)
# give the request to the controller
some_controller.request_io(iocb)
# wait for the request to be processed
iocb.ioComplete.wait()
if _debug: _log.debug(" - iocb: %r", iocb)
# dump the contents
print("iocb completion event set: %r" % (iocb.ioComplete.is_set(),))
print("")
print("iocb successful: %r" % (iocb.ioState == COMPLETED,))
print("iocb response: %r" % (iocb.ioResponse,))
print("")
print("iocb aborted: %r" % (iocb.ioState == ABORTED,))
print("iocb error: %r" % (iocb.ioError,))
print("")
if __name__ == '__main__':
main()
| mit | -3,490,389,466,756,564,000 | 27.797872 | 78 | 0.612117 | false |
Tefx/WINO | ec2.py | 1 | 1626 | #!/usr/bin/env python3
import scheduler as s
import worker as w
from cluster import Cluster
from gevent import sleep
from time import time
class EC2Task(s.Task):
def execute(self):
self.machine.worker.execute(task=w.Task(self.runtime))
class EC2Comm(s.Comm):
def execute(self):
self.rproc = self.from_task.machine.worker.async_call(
"send_to",
data=w.Data(self.data_size),
target_addr=self.to_task.machine.worker.private_ip)
self.rproc.join()
# print(self, self.rproc.value.statistic)
def wait_for_init(self):
while not hasattr(self, "rproc"):
sleep(0.01)
def suspend(self):
self.wait_for_init()
self.from_task.machine.worker.suspend(self.rproc)
def resume(self):
self.from_task.machine.worker.resume(self.rproc)
class EC2Scheduler(s.Scheduler):
task_cls = EC2Task
comm_cls = EC2Comm
ami = "ami-f796e594"
sgroup = "sg-c86bc4ae"
region = "ap-southeast-1"
pgroup = "wino"
def __init__(self, vm_type):
self.vm_type = vm_type
super().__init__()
def prepare_workers(self, **kwargs):
cluster = Cluster(self.ami, self.sgroup, self.region, self.pgroup, **kwargs)
workers = cluster.create_workers(len(self.machines), self.vm_type)
for worker, machine in zip(workers, self.machines):
machine.worker = worker
if __name__ == "__main__":
from sys import argv
for path in argv[1:]:
s = EC2Scheduler("c4.large")
# s = EC2Scheduler("t2.micro")
s.load(path)
s.run(log="p")
| gpl-3.0 | -4,019,200,081,466,505,700 | 25.655738 | 84 | 0.612546 | false |
google/turbinia | tools/turbinia_job_graph.py | 1 | 3144 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Graph to visualise job/evidence relationships."""
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import graphviz
import sys
from turbinia.jobs import manager as jobs_manager
try:
unicode
except NameError:
unicode = str # pylint: disable=redefined-builtin
def create_graph():
"""Create graph of relationships between Turbinia jobs and evidence.
Returns:
Instance of graphviz.dot.Digraph
"""
dot = graphviz.Digraph(comment='Turbinia Evidence graph', format='png')
for _, job in jobs_manager.JobsManager.GetJobs():
dot.node(job.NAME)
for evidence in job.evidence_input:
dot.node(evidence.__name__, shape='box')
dot.edge(evidence.__name__, job.NAME)
for evidence in job.evidence_output:
dot.node(evidence.__name__, shape='box')
dot.edge(job.NAME, evidence.__name__)
return dot
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Create Turbinia evidence graph.')
parser.add_argument(
'-f', '--format', default='png',
help='The format of the output file you wish to generate. Specify '
'"list" to list out the available output types. More info is here: '
'http://www.graphviz.org/doc/info/output.html')
parser.add_argument(
'-e', '--engine', default='dot',
help='The graphviz engine used to generate the graph layout. Specify '
'"list" to list out the available engines.')
parser.add_argument('filename', type=unicode, help='where to save the file')
args = parser.parse_args()
if args.format == 'list':
formats = ' '.join(graphviz.FORMATS)
print('Available format types: {0:s}'.format(formats))
sys.exit(0)
if args.format not in graphviz.FORMATS:
print('Format type {0:s} is not supported'.format(args.format))
sys.exit(1)
if args.engine == 'list':
engines = ' '.join(graphviz.ENGINES)
print('Available graph layout engines: {0:s}'.format(engines))
sys.exit(0)
if args.engine not in graphviz.ENGINES:
print('Layout engine type {0:s} is not supported'.format(args.engine))
sys.exit(1)
graph = create_graph()
graph.engine = args.engine
output_file = args.filename.replace('.png', '')
try:
rendered_graph = graph.render(
filename=output_file, format=args.format, cleanup=True)
print('Graph generated and saved to: {0}'.format(rendered_graph))
except graphviz.ExecutableNotFound:
print('Graphviz is not installed - Run: apt-get install graphviz')
| apache-2.0 | 2,878,288,893,988,940,300 | 32.446809 | 78 | 0.69243 | false |
mazz/kifu | lib/access.py | 1 | 8825 | """Handle auth and authz activities in ~~~PROJNAME~~~"""
import logging
from decorator import decorator
from pyramid.decorator import reify
from pyramid.httpexceptions import HTTPForbidden
from pyramid.httpexceptions import HTTPFound
from pyramid.request import Request
from pyramid.security import unauthenticated_userid
from ~~~PROJNAME~~~.models.auth import UserMgr
LOG = logging.getLogger(__name__)
class AuthHelper(object):
"""Manage the inner workings of authorizing things"""
@staticmethod
def check_api(submitted_key, users_key):
"""Verify the api key is valid"""
if users_key != submitted_key:
return False
else:
return True
@staticmethod
def check_login(request, username=None):
"""Check that the user is logged in correctly
:param username: a username to make sure the current user is in fact
"""
if request.user is None:
return False
# if we have a username we're told to check against, make sure the
# username matches
if username is not None and username != request.user.username:
return False
return True
@staticmethod
def not_valid(request, redirect=None):
"""Handle the Forbidden exception unless redirect is there
The idea is that if there's a redirect we shoot them to the login form
instead
"""
if redirect is None:
raise HTTPForbidden('Deactivated Account')
else:
raise HTTPFound(location=request.route_url(redirect))
class ReqOrApiAuthorize(object):
"""A context manager that works with either Api key or logged in user"""
def __init__(self, request, api_key, user_acct, username=None,
redirect=None):
self.request = request
self.api_key = api_key
self.user_acct = user_acct
self.username = username
if redirect:
self.redirect = redirect
def __enter__(self):
"""Handle the verification side
Logged in user checked first, then api matching
"""
# if the user account is not activated then no go
if not self.user_acct.activated:
raise HTTPForbidden('Deactivated Account')
if AuthHelper.check_login(self.request, username=self.username):
return True
if AuthHelper.check_api(self.api_key, self.user_acct.api_key):
return True
raise HTTPForbidden('Invalid Authorization')
def __exit__(self, exc_type, exc_value, traceback):
"""No cleanup to do here"""
pass
class ApiAuthorize(object):
"""Context manager to check if the user is authorized
use:
with ApiAuthorize(some_key):
# do work
Will return NotAuthorized if it fails
"""
def __init__(self, user, submitted_key, redirect=None):
"""Create the context manager"""
self.user = user
class RequestWithUserAttribute(Request):
@reify
def user(self):
# <your database connection, however you get it, the below line
# is just an example>
# dbconn = self.registry.settings['dbconn']
user_id = unauthenticated_userid(self)
if user_id is not None:
# this should return None if the user doesn't exist
# in the database
user = UserMgr.get(user_id=user_id)
return user
def __enter__(self):
"""Verify api key set in constructor"""
# if the user account is not activated then no go
if not self.user.activated:
raise HTTPForbidden('Deactivated Account')
if not AuthHelper.check_api(self.check_key, self.user.api_key):
raise HTTPForbidden('Invalid Authorization')
def __exit__(self, exc_type, exc_value, traceback):
"""No cleanup work to do after usage"""
pass
class ReqAuthorize(object):
"""Context manager to check if the user is logged in
use:
with ReqAuthorize(request):
# do work
Will return NotAuthorized if it fails
"""
def __init__(self, request, username=None, redirect=None):
"""Create the context manager"""
self.request = request
self.username = username
self.redirect = redirect
def __enter__(self):
"""Verify api key set in constructor"""
if not AuthHelper.check_login(self.request, self.username):
raise HTTPForbidden('Invalid Authorization')
def __exit__(self, exc_type, exc_value, traceback):
"""No cleanup work to do after usage"""
pass
class api_auth():
"""View decorator to set check the client is permitted
Since api calls can come from the api via a api_key or a logged in user via
the website, we need to check/authorize both
If this is an api call and the api key is valid, stick the user object
found onto the request.user so that the view can find it there in one
place.
"""
def __init__(self, api_field, user_fetcher, admin_only=False, anon=False):
"""
:param api_field: the name of the data in the request.params and the
User object we compare to make sure they match
:param user_fetcher: a callable that I can give a username to and
get back the user object
:sample: @ApiAuth('api_key', UserMgr.get)
"""
self.api_field = api_field
self.user_fetcher = user_fetcher
self.admin_only = admin_only
self.anon = anon
def __call__(self, action_):
""" Return :meth:`wrap_action` as the decorator for ``action_``. """
return decorator(self.wrap_action, action_)
def _check_admin_only(self, request):
"""If admin only, verify current api belongs to an admin user"""
api_key = request.params.get(self.api_field, None)
if request.user is None:
user = self.user_fetcher(api_key=api_key)
else:
user = request.user
if user is not None and user.is_admin:
request.user = user
return True
def wrap_action(self, action_, *args, **kwargs):
"""
Wrap the controller action ``action_``.
:param action_: The controller action to be wrapped.
``args`` and ``kwargs`` are the positional and named arguments which
will be passed to ``action_`` when called.
"""
# check request.user to see if this is a logged in user
# if so, then make sure it matches the matchdict user
# request should be the one and only arg to the view function
request = args[0]
username = request.matchdict.get('username', None)
api_key = None
# if this is admin only, you're either an admin or not
if self.admin_only:
if self._check_admin_only(request):
return action_(*args, **kwargs)
else:
request.response.status_int = 403
return {'error': "Not authorized for request."}
if request.user is not None:
if AuthHelper.check_login(request, username):
# then we're good, this is a valid user for this url
return action_(*args, **kwargs)
# get the user the api key belongs to
if self.api_field in request.params:
# we've got a request with url params
api_key = request.params.get(self.api_field, None)
username = request.params.get('username', username)
def is_json_auth_request(request):
if hasattr(request, 'json_body'):
if self.api_field in request.json_body:
return True
return False
if is_json_auth_request(request):
# we've got a ajax request with post data
api_key = request.json_body.get(self.api_field, None)
username = request.json_body.get('username', None)
if username is not None and api_key is not None:
# now get what this user should be based on the api_key
request.user = self.user_fetcher(api_key=api_key)
# if there's a username in the url (rdict) then make sure the user
# the api belongs to is the same as the url. You can't currently
# use the api to get info for other users.
if request.user and request.user.username == username:
return action_(*args, **kwargs)
# if this api call accepts anon requests then let it through
if self.anon:
return action_(*args, **kwargs)
# otherwise, we're done, you're not allowed
request.response.status_int = 403
return {'error': "Not authorized for request."}
| mit | 4,950,611,689,445,477,000 | 31.326007 | 79 | 0.608725 | false |
astrofra/amiga-experiments | game-shop-shop-galaxy/python-version/game_puck_player_racket.py | 1 | 1183 | import math
import game_puck_board as board
from utils import *
racket_speed = 50
velocity_x = 0.0
velocity_z = 0.0
initial_pox_x = 0.0
initial_pox_z = 0.0
pos_x = 0.0
pos_z = 0.0
target_pos_x = 0.0
target_pos_z = 0.0
prev_pos_x = 0.0
prev_pos_z = 0.0
width = 2.0
length = 0.5
def setPosition(x,z):
global pos_x, pos_z
pos_x = x
pos_z = z
def reset():
global pos_x, pos_z, initial_pox_x, initial_pox_z, velocity_x, velocity_z
pos_x = initial_pox_x
pos_z = initial_pox_z
prev_pos_x = pos_x
prev_pos_z = pos_z
def setMouse(x,y):
global target_pos_x, target_pos_z
x = Clamp(x, 0, 1.0)
y = Clamp(y, 0, 0.5)
target_pos_x = RangeAdjust(x, 0.0, 1.0, board.board_width * -0.5 + (width * 0.5), board.board_width * 0.5 - (width * 0.5))
target_pos_z = RangeAdjust(y, 0.0, 0.5, board.board_length * 0.5 - (length * 0.5), board.board_length * 0.35 - (length * 0.5))
def update(dt):
global pos_x, pos_z, velocity_x, velocity_z, prev_pos_x, prev_pos_z
prev_pos_x = pos_x
prev_pos_z = pos_z
pos_x += (target_pos_x - pos_x) * dt * racket_speed
pos_z += (target_pos_z - pos_z) * dt * racket_speed
velocity_x = pos_x - prev_pos_x
velocity_z = pos_z - prev_pos_z
| mit | -3,492,901,453,538,054,700 | 19.050847 | 127 | 0.623838 | false |
ngageoint/gamification-server | gamification/core/templatetags/teams.py | 1 | 1710 | # -*- coding: utf-8 -*-
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, as long as
# any reuse or further development of the software attributes the
# National Geospatial-Intelligence Agency (NGA) authorship as follows:
# 'This software (gamification-server)
# is provided to the public as a courtesy of the National
# Geospatial-Intelligence Agency.
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from django import template
from django.utils.text import slugify
register = template.Library()
@register.filter('teamify')
def teamify(nodes):
team_members = {}
for team in nodes:
team_members[str(slugify(team.name))] = []
for member in team.memberpoints['members']:
team_members[str(slugify(team.name))].append(member);
return team_members
| mit | 369,573,262,884,101,250 | 40.707317 | 72 | 0.753801 | false |
kayhayen/Nuitka | tests/standalone/LxmlUsing.py | 1 | 1151 | # Copyright 2021, Kay Hayen, mailto:[email protected]
#
# Python test originally created or extracted from other peoples work. The
# parts from me are licensed as below. It is at least Free Software where
# it's copied from other people. In these cases, that will normally be
# indicated.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Lxml standalone basic test.
"""
from __future__ import print_function
import lxml.etree
# nuitka-skip-unless-imports: lxml.etree
tree = lxml.etree.fromstring("<root>value</root>")
assert tree.tag == "root"
assert tree.text == "value"
print("OK")
| apache-2.0 | -926,701,526,460,733,600 | 32.852941 | 78 | 0.710686 | false |
sushant-hiray/teamflowy | project/views.py | 1 | 35934 | # Create your views here.
from django.http import HttpResponse,HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from accounts.models import Employee, PhoneNo, Skill, HasSkill
from django.core.urlresolvers import reverse
from django.contrib.auth.hashers import make_password, check_password
from accounts.utils import *
from project.models import *
from django.http import Http404
import json as simplejson
from notification.utils import *
from django.db.models import Q,F
@login_required
def createhandler(request):
if not(isManagerAuthenticated(request)):
return HttpResponseRedirect(reverse('accounts:index'))
else:
name = request.POST['name']
desc = request.POST['desc']
username = request.session.get('username')
emp = Employee.objects.get(username=username)
p = Project(name = name, description = desc, manager = emp)
p.save()
id = p.pk
tm=Teammember(project=p,employee=emp)
tm.save()
return HttpResponseRedirect(reverse('project:view' ,args=(id,)))
@login_required
def proj_analytics(request,project_id):
username = request.session.get('username')
emp = Employee.objects.get(username=username)
context = {}
context['user'] = emp
try:
project = Project.objects.get(pk=project_id)
except Project.DoesNotExist:
raise Http404
taskCount = Task.objects.filter(project = project).count()
completedTaskCount = Task.objects.filter(project = project , approved = True).count()
delayedTaskCount = Task.objects.filter(project = project , deadline__lt = F('enddate')).count()
taskIssueCount = 0
taskUnresolvedIssueCount = 0
subtaskCount = 0
completedSubtaskCount = 0
delayedSubtaskCount = 0
subtaskIssueCount = 0
subtaskUnresolvedIssueCount = 0
for taskitem in Task.objects.filter(project = project):
subtaskCount = subtaskCount + Subtask.objects.filter(task = taskitem).count()
completedSubtaskCount = completedSubtaskCount + Subtask.objects.filter(task = taskitem , approved = True).count()
delayedSubtaskCount = delayedSubtaskCount + Subtask.objects.filter(task= taskitem , deadline__lt = F('enddate')).count()
taskUnresolvedIssueCount = taskUnresolvedIssueCount + TaskIssue.objects.filter(Q(task = taskitem) & (Q(resolvedate = datetime.date.today()) |Q(resolvedate = None))).count()
taskIssueCount = taskIssueCount + TaskIssue.objects.filter(task= taskitem).count()
for subtaskitem in Subtask.objects.filter(task = taskitem):
subtaskUnresolvedIssueCount = subtaskUnresolvedIssueCount + SubtaskIssue.objects.filter(Q(subtask = subtaskitem) & (Q(resolvedate = datetime.date.today()) |Q(resolvedate = None))).count()
subtaskIssueCount = subtaskIssueCount + SubtaskIssue.objects.filter(subtask= subtaskitem).count()
print completedTaskCount , completedSubtaskCount
context['taskCount'] = taskCount
context['completedTaskCount'] = completedTaskCount
context['subtaskCount'] = subtaskCount
context['completedSubtaskCount'] = completedSubtaskCount
context['delayedTaskCount'] = delayedTaskCount
context['delayedSubtaskCount'] = delayedSubtaskCount
context['project'] = project
context['taskIssueCount'] = taskIssueCount
context['taskUnresolvedIssueCount'] = taskUnresolvedIssueCount
context['subtaskIssueCount'] = subtaskIssueCount
context['subtaskUnresolvedIssueCount'] = subtaskUnresolvedIssueCount
p = Teammember.objects.filter(employee = emp)
context['projects'] = p
return render(request, 'project/projectAnalytics.html', context)
@login_required
def view(request,project_id):
#show my profile page
username = request.session.get('username')
emp = Employee.objects.get(username=username)
context = {}
context['user'] = emp
try:
proj = Project.objects.get(pk=project_id)
except Project.DoesNotExist:
raise Http404
projects = Teammember.objects.filter(employee=emp)
context['project'] = proj
manager = proj.manager;
#check if the project is edited
if request.GET.get('edit'):
context['edited'] = 1
else:
context['edited'] = 0
#check if the person viewinf is manager who created the project
if(emp == manager):
context['edit'] = 1
else:
context['edit'] = 0
p = Teammember.objects.filter(employee = emp)
context['projects'] = p
return render(request, 'project/view.html', context)
@login_required
def members(request,project_id):
try:
project = Project.objects.get(pk=project_id)
except Project.DoesNotExist:
raise Http404
tms= Teammember.objects.filter(project = project)
username = request.session.get('username')
emp = Employee.objects.get(username=username)
context = {}
context['user'] = emp
context['project'] = project
context['members'] = tms
p = Teammember.objects.filter(employee = emp)
context['projects'] = p
return render(request, 'project/members.html', context)
@login_required
def tasks(request,project_id):
username = request.session.get('username')
emp = Employee.objects.get(username=username)
context = {}
context['user'] = emp
try:
project = Project.objects.get(pk=project_id)
except Project.DoesNotExist:
raise Http404
t = Task.objects.filter(project = project)
#check if the person viewinf is manager who created the project
manager = project.manager;
if(emp == manager):
context['manager'] = 1
else:
context['manager'] = 0
context['project'] = project
context['tasks'] = t
p = Teammember.objects.filter(employee = emp)
context['projects'] = p
return render(request, 'project/task.html', context)
@login_required
def subtasks(request,project_id,task_id):
username = request.session.get('username')
emp = Employee.objects.get(username = username)
context = {}
context['user'] = emp
try:
project = Project.objects.get(pk = project_id)
task = Task.objects.get(project = project,taskid = task_id)
except Project.DoesNotExist:
raise Http404
st = Subtask.objects.filter(project = project,task = task)
t = Task.objects.filter(project = project)
#check if the person viewing is manager who created the project
manager = project.manager
taskmanager = task.manager
if(emp == manager or emp == taskmanager):
context['manager'] = 1
else:
context['manager'] = 0
context['task'] = task
context['subtasks'] = st
context['count'] = st.count()
context['tasks'] = t
context['today'] = datetime.date.today()
return render(request, 'project/subtask.html', context)
@login_required
def subtaskview(request,project_id,task_id,subtask_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid=task_id,project=project)
subtask = Subtask.objects.get(subtaskid=subtask_id,task=task)
except Project.DoesNotExist:
raise Http404
context = {}
username = request.session.get('username')
emp = Employee.objects.get(username=username)
context['user'] = emp
context['subtask']=subtask
st = Subtask.objects.filter(task = task, project = project)
context['today'] = datetime.date.today()
context['maxdate'] = datetime.date.max
context['subtasks'] = st
skills = SubtaskSkills.objects.filter(subtask=subtask)
context['skills'] = skills
if subtask.enddate == None or subtask.enddate == datetime.date.max :
context['enddate'] = "Yet to be completed"
if emp == subtask.assignee:
context['close'] = 1
else:
context['close'] = 0
if datetime.date.today() > subtask.deadline:
context['status'] = "Deadline exceeded, Ongoing"
else:
context['status'] = "Ongoing"
else:
context['enddate'] = subtask.enddate
context['close'] = 0
if subtask.enddate > subtask.deadline:
if subtask.approved == 1:
context['status'] = "Approved, Deadline Exceeded, Complete"
else:
context['status'] = "Approval Pending, Deadline Exceeded, Complete"
else:
if subtask.approved == 1:
context['status'] = "Approved, Subtask Complete"
else:
context['status'] = "Approval Pending, Subtask Complete"
return render(request, 'project/viewsubtask.html', context)
@login_required
def closesubtask(request,project_id,task_id,subtask_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid=task_id,project=project)
subtask = Subtask.objects.get(subtaskid=subtask_id,task=task)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
if emp == subtask.assignee:
subtask.enddate = datetime.date.today()
subtask.save()
subtaskCompleteNotification(subtask)
return HttpResponseRedirect(reverse('project:subtaskview' ,args=(project_id,task_id,subtask_id,)))
else:
raise Http404
@login_required
def opensubtask(request,project_id,task_id,subtask_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid=task_id,project=project)
subtask = Subtask.objects.get(subtaskid=subtask_id,task=task)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
if emp == task.manager or emp == project.manager:
subtask.enddate = datetime.date.max
subtask.save()
return HttpResponseRedirect(reverse('project:subtaskview' ,args=(project_id,task_id,subtask_id,)))
else:
raise Http404
@login_required
def approvesubtask(request,project_id,task_id,subtask_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid=task_id,project=project)
subtask = Subtask.objects.get(subtaskid=subtask_id,task=task)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
if emp == task.manager or emp == project.manager:
review = request.POST['review']
rating = request.POST['rating']
subtask.approved = 1
subtask.review = review
subtask.rating = rating
subtask.save()
skills = SubtaskSkills.objects.filter(subtask=subtask)
for skill in skills:
emsk=HasSkill.objects.filter(employee=subtask.assignee,skill=skill.skill)
if not emsk:
emsk=HasSkill(employee=task.manager,skill=skill.skill)
emsk.save()
else:
emsk=emsk[0]
no=int(emsk.number)
newrating=(no*float(emsk.rating) + float(rating)) /(no+1)
emsk.number = no + 1
emsk.rating = newrating
emsk.save()
return HttpResponseRedirect(reverse('project:subtaskfeedback' ,args=(project_id,task_id,subtask_id,)))
else:
raise Http404
@login_required
def subtaskfeedback(request,project_id,task_id,subtask_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid=task_id,project=project)
subtask = Subtask.objects.get(subtaskid=subtask_id,task=task)
except Project.DoesNotExist:
raise Http404
context = {}
username = request.session.get('username')
emp = Employee.objects.get(username=username)
if emp == task.manager or emp == project.manager:
if subtask.enddate == None or subtask.enddate == datetime.date.max:
context['form'] = 0
context['complete'] = 0
context['approved'] = 0
else:
context['complete'] = 1
if subtask.approved == 1:
context['form'] = 0
context['approved'] = 1
else:
context['form'] = 1
context['approved'] = 0
else:
context['form'] = 0
if subtask.enddate == None or subtask.enddate == datetime.date.max:
context['complete'] = 0
context['approved'] = 0
else:
context['complete'] = 1
if subtask.approved == 1:
context['approved'] = 1
else:
context['approved'] = 0
context['user'] = emp
context['subtask']=subtask
st = Subtask.objects.filter(task = task, project = project)
context['subtasks'] = st
context['range'] = range(10)
return render(request, 'project/subtaskfeedback.html', context)
@login_required
def taskissueview(request,project_id,task_id,issue_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid=task_id,project=project)
issue = TaskIssue.objects.get(pk=issue_id,task = task)
except Project.DoesNotExist:
raise Http404
context = {}
username = request.session.get('username')
emp = Employee.objects.get(username=username)
context['user'] = emp
context['issue']=issue
ti = TaskIssue.objects.filter(task = task, project = project)
t = Task.objects.filter(project = project)
context['issues'] = ti
context['today'] = datetime.date.today()
context['tasks'] = t
if(Teammember.objects.filter(project=project,employee=emp).count()==1):
context['member'] = 1
else:
context['member'] = 0
return render(request, 'project/viewtaskissue.html', context)
@login_required
def subtaskissueview(request,project_id,task_id,subtask_id,issue_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid=task_id,project=project)
subtask = Subtask.objects.get(subtaskid=subtask_id,task=task,project=project)
issue = SubtaskIssue.objects.get(pk=issue_id,subtask = subtask)
except Project.DoesNotExist:
raise Http404
context = {}
username = request.session.get('username')
emp = Employee.objects.get(username=username)
context['user'] = emp
context['issue']=issue
sti = SubtaskIssue.objects.filter(subtask = subtask, task = task, project = project)
st = Subtask.objects.filter(project = project, task = task)
context['issues'] = sti
context['today'] = datetime.date.today()
context['subtasks'] = st
if(Teammember.objects.filter(project=project,employee=emp).count()==1):
context['member'] = 1
else:
context['member'] = 0
return render(request, 'project/viewsubtaskissue.html', context)
@login_required
def closesubtaskissue(request,project_id,task_id,subtask_id,issue_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid=task_id,project=project)
subtask = Subtask.objects.get(subtaskid=subtask_id,task=task,project=project)
issue = SubtaskIssue.objects.get(pk=issue_id,subtask = subtask)
except Project.DoesNotExist:
raise Http404
context = {}
username = request.session.get('username')
emp = Employee.objects.get(username=username)
context['user'] = emp
context['issue']=issue
if(Teammember.objects.filter(project=project,employee=emp).count()==1):
issue.resolvedate=datetime.date.today()
issue.save()
return HttpResponseRedirect(reverse('project:subtaskissueview' ,args=(project_id,task_id,subtask_id,issue_id,)))
else:
raise Http404
@login_required
def closetaskissue(request,project_id,task_id,issue_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid=task_id,project=project)
issue = TaskIssue.objects.get(pk=issue_id,task = task)
except Project.DoesNotExist:
raise Http404
context = {}
username = request.session.get('username')
emp = Employee.objects.get(username=username)
context['user'] = emp
context['issue']=issue
if(Teammember.objects.filter(project=project,employee=emp).count()==1):
issue.resolvedate=datetime.date.today()
issue.save()
return HttpResponseRedirect(reverse('project:taskissueview' ,args=(project_id,task_id,issue_id,)))
else:
raise Http404
@login_required
def taskview(request,project_id,task_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid=task_id,project=project)
except Project.DoesNotExist:
raise Http404
context = {}
username = request.session.get('username')
emp = Employee.objects.get(username=username)
context['user'] = emp
context['project']=project
context['task']=task
t = Task.objects.filter(project = project)
context['tasks'] = t
context['skills'] = TaskSkills.objects.filter(task=task)
return render(request, 'project/viewtask.html', context)
@login_required
def taskissues(request,project_id,task_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid=task_id,project=project)
except Task.DoesNotExist:
raise Http404
context = {}
username = request.session.get('username')
emp = Employee.objects.get(username=username)
manager = project.manager
taskmanager = task.manager
if(Teammember.objects.filter(project=project,employee=emp).count()==1):
context['manager'] = 1
else:
context['manager'] = 0
context['user'] = emp
context['project']=project
context['task']=task
issues = TaskIssue.objects.filter(task = task)
context['issues'] = issues
context['count'] = issues.count()
context['today'] = datetime.date.today()
t = Task.objects.filter(project = project)
context['tasks'] = t
return render(request, 'project/taskissues.html', context)
@login_required
def subtaskissues(request,project_id,task_id,subtask_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid=task_id,project=project)
subtask = Subtask.objects.get(subtaskid=subtask_id,task=task)
except Task.DoesNotExist:
raise Http404
context = {}
username = request.session.get('username')
emp = Employee.objects.get(username=username)
if(Teammember.objects.filter(project=project,employee=emp).count()==1):
context['manager'] = 1
else:
context['manager'] = 0
context['user'] = emp
context['project']=project
context['subtask']=subtask
issues = SubtaskIssue.objects.filter(subtask = subtask)
context['issues'] = issues
context['count'] = issues.count()
context['today'] = datetime.date.today()
st = Subtask.objects.filter(project = project,task=task)
context['subtasks'] = st
return render(request, 'project/subtaskissues.html', context)
@login_required
def taskfeedback(request,project_id,task_id):
try:
project = Project.objects.get(pk = project_id)
task = Task.objects.get(taskid = task_id,project = project)
except Project.DoesNotExist:
raise Http404
context = {}
username = request.session.get('username')
emp = Employee.objects.get(username=username)
context['user'] = emp
context['project']=project
context['task']=task
if(emp==task.manager or emp==project.manager):
context['manager']=1
else:
context['manager']=0
if(emp==project.manager):
context['pm'] = 1
else:
context['pm'] = 0
st = Subtask.objects.filter(task= task,project = project)
pending=[]
complete = 1
for subtask in st:
if subtask.approved == 1 :
complete = complete*1
else:
complete = 0
pending.append(subtask)
context['complete'] = complete
context['pending'] = pending
t = Task.objects.filter(project = project)
context['tasks'] = t
context['today'] = datetime.date.today()
context['range'] = range(10)
return render(request, 'project/taskfeedback.html', context)
@login_required
def closetask(request,project_id,task_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid=task_id,project=project)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
if emp == task.manager:
st = Subtask.objects.filter(task= task,project = project)
complete = 1
for subtask in st:
if subtask.approved == 1 :
complete = complete*1
else:
complete = 0
if complete == 0 :
return HttpResponseRedirect(reverse('project:taskfeedback' ,args=(project_id,task_id,)))
else:
task.enddate = datetime.date.today()
task.save()
taskCompleteNotification(task)
return HttpResponseRedirect(reverse('project:taskfeedback' ,args=(project_id,task_id,)))
else:
raise Http404
@login_required
def approvetask(request,project_id,task_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid=task_id,project=project)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
if emp == project.manager:
review = request.POST['review']
rating = request.POST['rating']
task.approved = 1
task.review = review
task.rating = rating
if emp == task.manager:
task.enddate = datetime.date.today()
task.save()
skills = TaskSkills.objects.filter(task=task)
for skill in skills:
emsk=HasSkill.objects.filter(employee=task.manager,skill=skill.skill)
if not emsk:
emsk=HasSkill(employee=task.manager,skill=skill.skill)
emsk.save()
else:
emsk=emsk[0]
no=int(emsk.number)
newrating=(no*float(emsk.rating) + float(rating)) /(no+1)
emsk.number = no + 1
emsk.rating = newrating
emsk.save()
return HttpResponseRedirect(reverse('project:taskfeedback' ,args=(project_id,task_id,)))
else:
raise Http404
@login_required
def addsubtask(request,project_id,task_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid = task_id,project = project)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
manager = project.manager;
if(emp != manager):
raise Http404
else:
context={}
context['user'] = emp
context['task'] = task
st = Subtask.objects.filter(task = task, project = project)
context['subtasks'] = st
users=Employee.objects.all()
names=[]
for user in users:
names.append(user.name)
context['users'] = simplejson.dumps(names)
context['today'] = datetime.date.today()
return render(request,'project/addsubtask.html',context)
@login_required
def addtaskissue(request,project_id,task_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid = task_id,project = project)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
manager = project.manager;
tms=Teammember.objects.filter(project=project)
c=0
for tm in tms:
if emp ==tm.employee:
c=1
if c!=1:
raise Http404
else:
context={}
context['user'] = emp
context['task'] = task
issues = TaskIssue.objects.filter(task = task)
context['issues'] = issues
return render(request,'project/addtaskissue.html',context)
@login_required
def addsubtaskissue(request,project_id,task_id,subtask_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid = task_id,project = project)
subtask = Subtask.objects.get(subtaskid=subtask_id,task=task)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
manager = project.manager;
###change this manager to teammember
if(emp != manager):
raise Http404
else:
context={}
context['user'] = emp
context['subtask'] = subtask
issues = SubtaskIssue.objects.filter(subtask = subtask)
context['issues'] = issues
return render(request,'project/addsubtaskissue.html',context)
@login_required
def addtask(request,project_id):
try:
project = Project.objects.get(pk=project_id)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
manager = project.manager;
if(emp != manager):
raise Http404
else:
context={}
context['user'] = emp
context['project']=project
t = Task.objects.filter(project = project)
context['tasks'] = t
users=Employee.objects.all()
names=[]
for user in users:
names.append(user.name)
context['users'] = simplejson.dumps(names)
context['today'] = datetime.date.today()
return render(request,'project/addtask.html',context)
@login_required
def addtmanager(request,project_id,task_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid = task_id,project = project)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
manager = request.POST['manager']
try:
manage=Employee.objects.get(name=manager)
except Employee.DoesNotExist:
raise Http404
task.manager=manage
task.save()
taskCreateNotification(task)
tm=Teammember.objects.filter(project=project,employee=manage)
if not tm:
tm=Teammember(employee=manage,project=project)
tm.save()
return HttpResponseRedirect(reverse('project:taskview' , args=(project_id,task_id,)))
@login_required
def addstmanager(request,project_id,task_id,subtask_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid = task_id,project = project)
subtask = Subtask.objects.get(subtaskid=subtask_id,task=task)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
assignee = request.POST['assignee']
try:
manage=Employee.objects.get(name=assignee)
except Employee.DoesNotExist:
raise Http404
subtask.assignee=manage
subtask.save()
subTaskCreateNotification(subtask)
tm=Teammember.objects.filter(project=project,employee=manage)
if not tm:
tm=Teammember(employee=manage,project=project)
tm.save()
return HttpResponseRedirect(reverse('project:subtaskview' , args=(project_id,task_id,subtask_id,)))
@login_required
def createtaskhandler(request,project_id):
try:
project = Project.objects.get(pk=project_id)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
manager = project.manager;
if(emp != manager):
raise Http404
else:
name = request.POST['name']
desc = request.POST['desc']
start = request.POST['start']
deadline = request.POST['deadline']
priority = request.POST['priority']
skills = request.POST.getlist('skill[]')
t = Task.objects.all().filter(project=project).count()
tid=t+1
task=Task(manager=emp,taskid=tid,name=name,description=desc,priority=priority.strip(),startdate=start,deadline=deadline,project=project)
task.save()
for skill in skills:
sk=Skill.objects.filter(name=skill)
if not sk:
sk=Skill(name=skill)
sk.save()
else:
sk=sk[0]
tsk=TaskSkills(task=task,project=project,skill=sk)
tsk.save()
context={}
context['user'] = emp
context['project'] = project
context['task'] =task
skills=TaskSkills.objects.filter(task=task,project=project)
user=[]
users=Employee.objects.all()
for skill in skills:
hss=HasSkill.objects.filter(skill=skill.skill)
for hs in hss:
user.append(hs.employee.name)
context['users'] = simplejson.dumps(user)
return render(request,'project/addtm.html',context)
@login_required
def createsubtaskhandler(request,project_id,task_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid = task_id,project = project)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
manager = project.manager
taskmanager = task.manager
if(emp != manager and emp!= taskmanager):
raise Http404
else:
name = request.POST['name']
desc = request.POST['desc']
start = request.POST['start']
deadline = request.POST['deadline']
priority = request.POST['priority']
skills = request.POST.getlist('skill[]')
t = Subtask.objects.all().filter(project=project,task=task).count()
tid=t+1
subtask=Subtask(subtaskid=tid,name=name,description=desc,priority=priority.strip(),enddate=datetime.date.max,startdate=start,deadline=deadline,project=project,task=task,assignee=emp)
subtask.save()
for skill in skills:
sk=Skill.objects.filter(name=skill)
if not sk:
sk=Skill(name=skill)
sk.save()
else:
sk=sk[0]
tsk=SubtaskSkills(subtask=subtask,task=task,project=project,skill=sk)
tsk.save()
context={}
context['user'] = emp
context['project'] = project
context['task'] =task
context['subtask'] = subtask
skills=SubtaskSkills.objects.filter(task=task,project=project,subtask=subtask)
user=[]
for skill in skills:
hss=HasSkill.objects.filter(skill=skill.skill)
for hs in hss:
user.append(hs.employee.name)
context['users'] = simplejson.dumps(user)
return render(request,'project/addstm.html',context)
@login_required
def createtaskissuehandler(request,project_id,task_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid = task_id,project = project)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
manager = project.manager
taskmanager = task.manager
if(emp != manager and emp!= taskmanager):
raise Http404
else:
desc = request.POST['desc']
createdate = request.POST['start']
priority = request.POST['priority']
resolvedate = datetime.date.max
ti = TaskIssue(description=desc,priority=priority.strip(),createdate=createdate,resolvedate=resolvedate,task=task,project=project)
ti.save()
return HttpResponseRedirect(reverse('project:taskissueview' , args=(project_id,task_id,ti.pk,)))
@login_required
def createsubtaskissuehandler(request,project_id,task_id,subtask_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid = task_id,project = project)
subtask = Subtask.objects.get(subtaskid=subtask_id,task=task)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
manager = project.manager
taskmanager = task.manager
if(emp != manager and emp!= taskmanager):
raise Http404
else:
desc = request.POST['desc']
createdate = request.POST['start']
priority = request.POST['priority']
resolvedate = datetime.date.max
sti = SubtaskIssue(description=desc,priority=priority.strip(),createdate=createdate,resolvedate=resolvedate,task=task,project=project,subtask=subtask)
sti.save()
return HttpResponseRedirect(reverse('project:subtaskissueview' , args=(project_id,task_id,subtask_id,sti.pk,)))
@login_required
def editproject(request,project_id):
#show my profile page
username = request.session.get('username')
emp = Employee.objects.get(username=username)
context = {}
context['user'] = emp
try:
project = Project.objects.get(pk=project_id)
except Project.DoesNotExist:
raise Http404
context['project'] = project
manager = project.manager;
if(emp != manager):
raise Http404
else:
p = Teammember.objects.filter(employee = emp)
context['projects'] = p
return render(request, 'project/editproject.html', context)
@login_required
def create(request):
if not(isManagerAuthenticated(request)):
return HttpResponseRedirect(reverse('accounts:index'))
else:
username = request.session.get('username')
emp = Employee.objects.get(username=username)
context = {}
context['user'] = emp
p = Teammember.objects.filter(employee = emp)
context['projects'] = p
return render(request, 'project/create.html',context)
@login_required
def edithandler(request):
if not(isManagerAuthenticated(request)):
return HttpResponseRedirect(reverse('accounts:index'))
else:
name = request.POST['name']
desc = request.POST['desc']
pid = request.POST['id']
try:
project = Project.objects.get(pk=pid)
except Project.DoesNotExist:
raise Http404
project.name = name
project.description = desc
project.save()
return HttpResponseRedirect("%s?edit=1" %reverse('project:view' ,args=(pid,)))
@login_required
def addteammember(request):
projects = Project.objects.all()
for project in projects:
tms = Teammember.objects.filter(project = project)
e = project.manager
tm = Teammember(project = project, employee = e)
if tms.filter(employee = e).count() == 0:
tm.save()
tasks = Task.objects.filter(project=project)
for task in tasks:
e=task.manager
tm = Teammember(project = project, employee = e)
if tms.filter(employee = e).count() == 0:
tm.save()
subtasks = Subtask.objects.filter(project=project,task=task)
for subtask in subtasks:
e=subtask.assignee
tm = Teammember(project = project, employee = e)
if tms.filter(employee = e).count() == 0:
tm.save()
return HttpResponseRedirect(reverse('accounts:myprofile'))
| mit | 5,414,454,718,698,807,000 | 35.817623 | 200 | 0.643346 | false |
errantlinguist/tangrams-analysis | write_word_ra_latex_table.py | 1 | 1522 | #!/usr/bin/env python3
"""
Reads in a vocabulary file from Gabriel containing referring ability (RA) scores and counts for each word and then writes it as a LaTeX tabular environment.
The first column is the word, second is the count and third is the RA.
"""
__author__ = "Todd Shore <[email protected]>"
__copyright__ = "Copyright 2018 Todd Shore"
__license__ = "Apache License, Version 2.0"
import argparse
import csv
import sys
COUNT_COL_IDX = 1
RA_COL_IDX = 2
def __create_argparser() -> argparse.ArgumentParser:
result = argparse.ArgumentParser(
description="Writes a vocabulary RA table in LaTeX format.")
result.add_argument("infile", metavar="FILE",
help="The vocabulary file to read.")
return result
def __main(args):
infile = args.infile
print("Reading \"{}\".".format(infile), file=sys.stderr)
with open(infile, 'r') as inf:
reader = csv.reader(inf, dialect=csv.excel_tab)
rows = tuple(sorted(reader, key=lambda r: float(r[RA_COL_IDX]), reverse=True))
print("\\begin{tabular}{| l r r |}")
print("\t\\hline")
print("\tWord & RA & Count \\\\")
print("\t\\hline")
for row in rows:
word_repr = "\\lingform{%s}" % row[0]
ra_repr = "${0:.5f}$".format(float(row[RA_COL_IDX]))
count_repr = "${}$".format(row[COUNT_COL_IDX])
latex_row = (word_repr, ra_repr, count_repr)
latex_line = "\t" + "\t&\t".join(latex_row) + " \\\\"
print(latex_line)
print("\t\\hline")
print("\\end{tabular}")
if __name__ == "__main__":
__main(__create_argparser().parse_args())
| apache-2.0 | 5,994,214,878,504,431,000 | 27.185185 | 156 | 0.659658 | false |
srgblnch/python-pylon | setup.py | 1 | 4096 | #!/usr/bin/env python
#---- licence header
###############################################################################
## file : setup.py
##
## description : This file has been made to provide a python access to
## the Pylon SDK from python.
##
## project : python-pylon
##
## author(s) : S.Blanch-Torn\'e
##
## Copyright (C) : 2015
## CELLS / ALBA Synchrotron,
## 08290 Bellaterra,
## Spain
##
## This file is part of python-pylon.
##
## python-pylon is free software: you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## python-pylon is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with python-pylon. If not, see <http://www.gnu.org/licenses/>.
##
###############################################################################
#import pyximport; pyximport.install()
from pylon.version import version_python_pylon_string
from Cython.Distutils import build_ext
from distutils.core import setup
from distutils.extension import Extension
pylonExtension = Extension('pylon',['pylon/__init__.pyx',
'pylon/Logger.cpp',
'pylon/Factory.cpp',
'pylon/DevInfo.cpp',
'pylon/Camera.cpp',
'pylon/TransportLayer.cpp',
'pylon/GenApiWrap/INode.cpp',
'pylon/GenApiWrap/ICategory.cpp',
'pylon/GenApiWrap/IEnumeration.cpp',
'pylon/PyCallback.cpp'],
language="c++",
extra_compile_args=[#"-static",
#"-fPIC",
#"-std=c++11",
]
)
#FIXME: check how can be know if c++11 is available to be used
setup(name = 'pylon',
license = "LGPLv3+",
description = "Cython module to provide access to Pylon's SDK.",
version = version_python_pylon_string(),
author = "Sergi Blanch-Torn\'e",
author_email = "[email protected]",
classifiers = ['Development Status :: 1 - Planning',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: '\
'GNU Lesser General Public License v3 or later (LGPLv3+)',
'Operating System :: POSIX',
'Programming Language :: Cython',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: '\
'Interface Engine/Protocol Translator',
'Topic :: Software Development :: Embedded Systems',
'Topic :: Software Development :: Libraries :: '\
'Python Modules',
'Topic :: Multimedia :: Graphics :: Capture',
'Topic :: Multimedia :: Video :: Capture',
''],
url="https://github.com/srgblnch/python-pylon",
cmdclass = {'build_ext': build_ext},
ext_modules=[pylonExtension],
#install_requires=['cython>=0.20.1'],
)
#for the classifiers review see:
#https://pypi.python.org/pypi?%3Aaction=list_classifiers
#
#Development Status :: 1 - Planning
#Development Status :: 2 - Pre-Alpha
#Development Status :: 3 - Alpha
#Development Status :: 4 - Beta
#Development Status :: 5 - Production/Stable
| gpl-3.0 | -1,005,211,932,998,684,400 | 41.226804 | 79 | 0.508301 | false |
Art-SoftWare/ircbot | events/02-msgTrigger.py | 1 | 3235 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Regular expression triggers on user messages."""
import re
from random import randint
import util.cfg
bot = None
triggers = {}
def init(botInstance):
"""Inits the msgTrigger module"""
global bot, triggers
bot = botInstance
util.cfg.default = triggers
triggers = util.cfg.load("cfg/triggers.json")
bot.modules.modules["01-simpleCommand"].registerCommand(cmdDef, "def", [":adriens33!~adriens33@(home|homer)\.art-software\.fr"])
bot.modules.modules["01-simpleCommand"].registerCommand(cmdTrg, "trg")
bot.irc.hooks["PRIVMSG"].append(msgHook)
def cmdDef(data, opts=[]):
"""Defines a new trigger and it's associated message (or add a message to an existing trigger if it already exists)
def triggerName expr expression
def triggerName msg message
variables: %user=user name"""
global triggers
if len(opts)<3:
bot.irc.msg(bot._("Sorry! Not enought parameters. See help."), data["tgt"])
return
if opts[1] == "expr":
# Sets the regular expression
if not triggers.__contains__(opts[0]):
triggers[opts[0]] = {"expr": " ".join(opts[2:]), "msg":[]}
else:
triggers[opts[0]]["expr"] = " ".join(opts[2:])
bot.irc.msg(bot._("%s> Expression set for Trigger '%s'") % (data["user"], opts[0]), data["tgt"])
elif opts[1] == "msg":
# Adds a message
if not triggers.__contains__(opts[0]):
triggers[opts[0]] = {"expr": " ", "msg":[]}
triggers[opts[0]]["msg"].append(" ".join(opts[2:]))
bot.irc.msg(bot._("%s> Message added for Trigger '%s'") % (data["user"], opts[0]), data["tgt"])
else:
bot.irc.msg(bot._("Sorry! Subcommand %s unknown.") % opts[1], data["tgt"])
util.cfg.save(triggers, "cfg/triggers.json")
def cmdTrg(data, opts=[]):
"""List active triggers:
trg: list all trigger namer
trg expr name: list expression for trigger name
trg msg name: list messages for trigger name"""
from time import sleep
if len(opts) == 0:
bot.irc.msg(bot._("Loaded triggers: ") + ",".join(list(triggers.keys())), data["tgt"])
if len(opts) == 2:
if opts[0] == "expr" and triggers.__contains__(opts[1]):
bot.irc.msg(bot._("Expression for %s : %s") % (opts[1], triggers[opts[1]]["expr"]), data["tgt"])
elif opts[0] == "msg" and triggers.__contains__(opts[1]):
bot.irc.msg(bot._("Message(s) for %s :") % opts[1], data["tgt"])
nb = 0
for message in triggers[opts[1]]["msg"]:
bot.irc.msg("- %s" % message, data["tgt"])
nb += 1
if nb % 8 == 0:
sleep(1)
def msgHook(evt):
"""Hook for the event PRIVMSG"""
user = evt[0][1:].split("!")[0]
tgt = evt[2]
txt = " ".join(evt[3:])[1:]
if tgt==bot.cfg["nick"]:
tgt = user
for triggerName in triggers.keys():
if re.search(triggers[triggerName]["expr"], txt) != None and len(triggers[triggerName]["msg"])>0:
answer = triggers[triggerName]["msg"][randint(0, len(triggers[triggerName]["msg"])-1)]
bot.irc.msg(answer.replace("%user", user), tgt)
| gpl-3.0 | -176,986,693,126,925,700 | 34.163043 | 132 | 0.573725 | false |
helixyte/TheLMA | thelma/repositories/rdb/schema/tables/rack.py | 1 | 2753 | """
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Rack table.
"""
from datetime import datetime
from sqlalchemy import CHAR
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy.schema import DDL
from thelma.entities.rack import RACK_TYPES
from thelma.repositories.rdb.utils import BarcodeSequence
__docformat__ = 'reStructuredText en'
__all__ = ['create_table']
def _setup_postgres_ddl(table):
"""
Barcode default for PostgreSQL and a sequence to support the legacy DB
"""
DDL("""
ALTER TABLE %(table)s ALTER COLUMN barcode SET DATA TYPE cenix_barcode
""",
on='postgres'
).execute_at('after-create', table)
def _setup_sqlite_ddl(table):
"""
Barcode default for SQLite using a trigger and the ROWID as the sequence
It does not conform to how the legacy DB is setup but testing on sqlite
should not fail. Since we do not plan to use SQLite as the production
database the DDL below serves only to support development/testing.
"""
DDL("""
CREATE TRIGGER set_rack_barcode AFTER INSERT ON rack
BEGIN
UPDATE rack
SET barcode =
SUBSTR("00000000", length(new.rowid), 8-length(new.rowid)) ||
new.rowid
WHERE rowid = new.rowid;
END;
""",
on='sqlite'
).execute_at('after-create', table)
def create_table(metadata, item_status_tbl, rack_specs_tbl):
"Table factory."
tbl = Table('rack', metadata,
Column('rack_id', Integer, primary_key=True),
Column('barcode', CHAR, BarcodeSequence('barcode_seq',
start=2400000),
nullable=False, unique=True, index=True),
Column('creation_date', DateTime(timezone=True), nullable=False,
default=datetime.now),
Column('label', String, nullable=False, default=''),
Column('comment', String, nullable=False, default=''),
Column('item_status', String,
ForeignKey(item_status_tbl.c.item_status_id,
onupdate='CASCADE', ondelete='RESTRICT'),
nullable=False),
Column('rack_specs_id', Integer,
ForeignKey(rack_specs_tbl.c.rack_specs_id,
onupdate='CASCADE', ondelete='RESTRICT'),
nullable=False),
Column('rack_type', String(9), nullable=False,
default=RACK_TYPES.RACK),
)
_setup_postgres_ddl(tbl)
_setup_sqlite_ddl(tbl)
return tbl
| mit | 4,945,031,176,538,619,000 | 32.168675 | 80 | 0.639666 | false |
minlexx/pyevemon | esi_client/models/get_characters_character_id_wallets_200_ok.py | 1 | 3859 | # coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online
OpenAPI spec version: 0.4.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class GetCharactersCharacterIdWallets200Ok(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, balance=None, wallet_id=None):
"""
GetCharactersCharacterIdWallets200Ok - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'balance': 'int',
'wallet_id': 'int'
}
self.attribute_map = {
'balance': 'balance',
'wallet_id': 'wallet_id'
}
self._balance = balance
self._wallet_id = wallet_id
@property
def balance(self):
"""
Gets the balance of this GetCharactersCharacterIdWallets200Ok.
Wallet's balance in ISK hundredths.
:return: The balance of this GetCharactersCharacterIdWallets200Ok.
:rtype: int
"""
return self._balance
@balance.setter
def balance(self, balance):
"""
Sets the balance of this GetCharactersCharacterIdWallets200Ok.
Wallet's balance in ISK hundredths.
:param balance: The balance of this GetCharactersCharacterIdWallets200Ok.
:type: int
"""
self._balance = balance
@property
def wallet_id(self):
"""
Gets the wallet_id of this GetCharactersCharacterIdWallets200Ok.
wallet_id integer
:return: The wallet_id of this GetCharactersCharacterIdWallets200Ok.
:rtype: int
"""
return self._wallet_id
@wallet_id.setter
def wallet_id(self, wallet_id):
"""
Sets the wallet_id of this GetCharactersCharacterIdWallets200Ok.
wallet_id integer
:param wallet_id: The wallet_id of this GetCharactersCharacterIdWallets200Ok.
:type: int
"""
self._wallet_id = wallet_id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, GetCharactersCharacterIdWallets200Ok):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| gpl-3.0 | -132,285,698,399,243,070 | 25.986014 | 85 | 0.550661 | false |
Subsets and Splits